1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34 #include <linux/compat.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fs.h>
38 #include <linux/file.h>
39 #include <linux/jhash.h>
40 #include <linux/init.h>
41 #include <linux/futex.h>
42 #include <linux/mount.h>
43 #include <linux/pagemap.h>
44 #include <linux/syscalls.h>
45 #include <linux/signal.h>
46 #include <linux/export.h>
47 #include <linux/magic.h>
48 #include <linux/pid.h>
49 #include <linux/nsproxy.h>
50 #include <linux/ptrace.h>
51 #include <linux/sched/rt.h>
52 #include <linux/sched/wake_q.h>
53 #include <linux/sched/mm.h>
54 #include <linux/hugetlb.h>
55 #include <linux/freezer.h>
56 #include <linux/memblock.h>
57 #include <linux/fault-inject.h>
58 #include <linux/refcount.h>
59
60 #include <asm/futex.h>
61
62 #include "locking/rtmutex_common.h"
63 #include <trace/hooks/futex.h>
64
65 /*
66 * READ this before attempting to hack on futexes!
67 *
68 * Basic futex operation and ordering guarantees
69 * =============================================
70 *
71 * The waiter reads the futex value in user space and calls
72 * futex_wait(). This function computes the hash bucket and acquires
73 * the hash bucket lock. After that it reads the futex user space value
74 * again and verifies that the data has not changed. If it has not changed
75 * it enqueues itself into the hash bucket, releases the hash bucket lock
76 * and schedules.
77 *
78 * The waker side modifies the user space value of the futex and calls
79 * futex_wake(). This function computes the hash bucket and acquires the
80 * hash bucket lock. Then it looks for waiters on that futex in the hash
81 * bucket and wakes them.
82 *
83 * In futex wake up scenarios where no tasks are blocked on a futex, taking
84 * the hb spinlock can be avoided and simply return. In order for this
85 * optimization to work, ordering guarantees must exist so that the waiter
86 * being added to the list is acknowledged when the list is concurrently being
87 * checked by the waker, avoiding scenarios like the following:
88 *
89 * CPU 0 CPU 1
90 * val = *futex;
91 * sys_futex(WAIT, futex, val);
92 * futex_wait(futex, val);
93 * uval = *futex;
94 * *futex = newval;
95 * sys_futex(WAKE, futex);
96 * futex_wake(futex);
97 * if (queue_empty())
98 * return;
99 * if (uval == val)
100 * lock(hash_bucket(futex));
101 * queue();
102 * unlock(hash_bucket(futex));
103 * schedule();
104 *
105 * This would cause the waiter on CPU 0 to wait forever because it
106 * missed the transition of the user space value from val to newval
107 * and the waker did not find the waiter in the hash bucket queue.
108 *
109 * The correct serialization ensures that a waiter either observes
110 * the changed user space value before blocking or is woken by a
111 * concurrent waker:
112 *
113 * CPU 0 CPU 1
114 * val = *futex;
115 * sys_futex(WAIT, futex, val);
116 * futex_wait(futex, val);
117 *
118 * waiters++; (a)
119 * smp_mb(); (A) <-- paired with -.
120 * |
121 * lock(hash_bucket(futex)); |
122 * |
123 * uval = *futex; |
124 * | *futex = newval;
125 * | sys_futex(WAKE, futex);
126 * | futex_wake(futex);
127 * |
128 * `--------> smp_mb(); (B)
129 * if (uval == val)
130 * queue();
131 * unlock(hash_bucket(futex));
132 * schedule(); if (waiters)
133 * lock(hash_bucket(futex));
134 * else wake_waiters(futex);
135 * waiters--; (b) unlock(hash_bucket(futex));
136 *
137 * Where (A) orders the waiters increment and the futex value read through
138 * atomic operations (see hb_waiters_inc) and where (B) orders the write
139 * to futex and the waiters read -- this is done by the barriers for both
140 * shared and private futexes in get_futex_key_refs().
141 *
142 * This yields the following case (where X:=waiters, Y:=futex):
143 *
144 * X = Y = 0
145 *
146 * w[X]=1 w[Y]=1
147 * MB MB
148 * r[Y]=y r[X]=x
149 *
150 * Which guarantees that x==0 && y==0 is impossible; which translates back into
151 * the guarantee that we cannot both miss the futex variable change and the
152 * enqueue.
153 *
154 * Note that a new waiter is accounted for in (a) even when it is possible that
155 * the wait call can return error, in which case we backtrack from it in (b).
156 * Refer to the comment in queue_lock().
157 *
158 * Similarly, in order to account for waiters being requeued on another
159 * address we always increment the waiters for the destination bucket before
160 * acquiring the lock. It then decrements them again after releasing it -
161 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
162 * will do the additional required waiter count housekeeping. This is done for
163 * double_lock_hb() and double_unlock_hb(), respectively.
164 */
165
166 #ifdef CONFIG_HAVE_FUTEX_CMPXCHG
167 #define futex_cmpxchg_enabled 1
168 #else
169 static int __read_mostly futex_cmpxchg_enabled;
170 #endif
171
172 /*
173 * Futex flags used to encode options to functions and preserve them across
174 * restarts.
175 */
176 #ifdef CONFIG_MMU
177 # define FLAGS_SHARED 0x01
178 #else
179 /*
180 * NOMMU does not have per process address space. Let the compiler optimize
181 * code away.
182 */
183 # define FLAGS_SHARED 0x00
184 #endif
185 #define FLAGS_CLOCKRT 0x02
186 #define FLAGS_HAS_TIMEOUT 0x04
187
188 /*
189 * Priority Inheritance state:
190 */
191 struct futex_pi_state {
192 /*
193 * list of 'owned' pi_state instances - these have to be
194 * cleaned up in do_exit() if the task exits prematurely:
195 */
196 struct list_head list;
197
198 /*
199 * The PI object:
200 */
201 struct rt_mutex pi_mutex;
202
203 struct task_struct *owner;
204 refcount_t refcount;
205
206 union futex_key key;
207 } __randomize_layout;
208
209 /**
210 * struct futex_q - The hashed futex queue entry, one per waiting task
211 * @list: priority-sorted list of tasks waiting on this futex
212 * @task: the task waiting on the futex
213 * @lock_ptr: the hash bucket lock
214 * @key: the key the futex is hashed on
215 * @pi_state: optional priority inheritance state
216 * @rt_waiter: rt_waiter storage for use with requeue_pi
217 * @requeue_pi_key: the requeue_pi target futex key
218 * @bitset: bitset for the optional bitmasked wakeup
219 *
220 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
221 * we can wake only the relevant ones (hashed queues may be shared).
222 *
223 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
224 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
225 * The order of wakeup is always to make the first condition true, then
226 * the second.
227 *
228 * PI futexes are typically woken before they are removed from the hash list via
229 * the rt_mutex code. See unqueue_me_pi().
230 */
231 struct futex_q {
232 struct plist_node list;
233
234 struct task_struct *task;
235 spinlock_t *lock_ptr;
236 union futex_key key;
237 struct futex_pi_state *pi_state;
238 struct rt_mutex_waiter *rt_waiter;
239 union futex_key *requeue_pi_key;
240 u32 bitset;
241 } __randomize_layout;
242
243 static const struct futex_q futex_q_init = {
244 /* list gets initialized in queue_me()*/
245 .key = FUTEX_KEY_INIT,
246 .bitset = FUTEX_BITSET_MATCH_ANY
247 };
248
249 /*
250 * Hash buckets are shared by all the futex_keys that hash to the same
251 * location. Each key may have multiple futex_q structures, one for each task
252 * waiting on a futex.
253 */
254 struct futex_hash_bucket {
255 atomic_t waiters;
256 spinlock_t lock;
257 struct plist_head chain;
258 } ____cacheline_aligned_in_smp;
259
260 /*
261 * The base of the bucket array and its size are always used together
262 * (after initialization only in hash_futex()), so ensure that they
263 * reside in the same cacheline.
264 */
265 static struct {
266 struct futex_hash_bucket *queues;
267 unsigned long hashsize;
268 } __futex_data __read_mostly __aligned(2*sizeof(long));
269 #define futex_queues (__futex_data.queues)
270 #define futex_hashsize (__futex_data.hashsize)
271
272
273 /*
274 * Fault injections for futexes.
275 */
276 #ifdef CONFIG_FAIL_FUTEX
277
278 static struct {
279 struct fault_attr attr;
280
281 bool ignore_private;
282 } fail_futex = {
283 .attr = FAULT_ATTR_INITIALIZER,
284 .ignore_private = false,
285 };
286
setup_fail_futex(char * str)287 static int __init setup_fail_futex(char *str)
288 {
289 return setup_fault_attr(&fail_futex.attr, str);
290 }
291 __setup("fail_futex=", setup_fail_futex);
292
should_fail_futex(bool fshared)293 static bool should_fail_futex(bool fshared)
294 {
295 if (fail_futex.ignore_private && !fshared)
296 return false;
297
298 return should_fail(&fail_futex.attr, 1);
299 }
300
301 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
302
fail_futex_debugfs(void)303 static int __init fail_futex_debugfs(void)
304 {
305 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
306 struct dentry *dir;
307
308 dir = fault_create_debugfs_attr("fail_futex", NULL,
309 &fail_futex.attr);
310 if (IS_ERR(dir))
311 return PTR_ERR(dir);
312
313 debugfs_create_bool("ignore-private", mode, dir,
314 &fail_futex.ignore_private);
315 return 0;
316 }
317
318 late_initcall(fail_futex_debugfs);
319
320 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
321
322 #else
should_fail_futex(bool fshared)323 static inline bool should_fail_futex(bool fshared)
324 {
325 return false;
326 }
327 #endif /* CONFIG_FAIL_FUTEX */
328
329 #ifdef CONFIG_COMPAT
330 static void compat_exit_robust_list(struct task_struct *curr);
331 #else
compat_exit_robust_list(struct task_struct * curr)332 static inline void compat_exit_robust_list(struct task_struct *curr) { }
333 #endif
334
futex_get_mm(union futex_key * key)335 static inline void futex_get_mm(union futex_key *key)
336 {
337 mmgrab(key->private.mm);
338 /*
339 * Ensure futex_get_mm() implies a full barrier such that
340 * get_futex_key() implies a full barrier. This is relied upon
341 * as smp_mb(); (B), see the ordering comment above.
342 */
343 smp_mb__after_atomic();
344 }
345
346 /*
347 * Reflects a new waiter being added to the waitqueue.
348 */
hb_waiters_inc(struct futex_hash_bucket * hb)349 static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
350 {
351 #ifdef CONFIG_SMP
352 atomic_inc(&hb->waiters);
353 /*
354 * Full barrier (A), see the ordering comment above.
355 */
356 smp_mb__after_atomic();
357 #endif
358 }
359
360 /*
361 * Reflects a waiter being removed from the waitqueue by wakeup
362 * paths.
363 */
hb_waiters_dec(struct futex_hash_bucket * hb)364 static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
365 {
366 #ifdef CONFIG_SMP
367 atomic_dec(&hb->waiters);
368 #endif
369 }
370
hb_waiters_pending(struct futex_hash_bucket * hb)371 static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
372 {
373 #ifdef CONFIG_SMP
374 return atomic_read(&hb->waiters);
375 #else
376 return 1;
377 #endif
378 }
379
380 /**
381 * hash_futex - Return the hash bucket in the global hash
382 * @key: Pointer to the futex key for which the hash is calculated
383 *
384 * We hash on the keys returned from get_futex_key (see below) and return the
385 * corresponding hash bucket in the global hash.
386 */
hash_futex(union futex_key * key)387 static struct futex_hash_bucket *hash_futex(union futex_key *key)
388 {
389 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
390 key->both.offset);
391
392 return &futex_queues[hash & (futex_hashsize - 1)];
393 }
394
395
396 /**
397 * match_futex - Check whether two futex keys are equal
398 * @key1: Pointer to key1
399 * @key2: Pointer to key2
400 *
401 * Return 1 if two futex_keys are equal, 0 otherwise.
402 */
match_futex(union futex_key * key1,union futex_key * key2)403 static inline int match_futex(union futex_key *key1, union futex_key *key2)
404 {
405 return (key1 && key2
406 && key1->both.word == key2->both.word
407 && key1->both.ptr == key2->both.ptr
408 && key1->both.offset == key2->both.offset);
409 }
410
411 /*
412 * Take a reference to the resource addressed by a key.
413 * Can be called while holding spinlocks.
414 *
415 */
get_futex_key_refs(union futex_key * key)416 static void get_futex_key_refs(union futex_key *key)
417 {
418 if (!key->both.ptr)
419 return;
420
421 /*
422 * On MMU less systems futexes are always "private" as there is no per
423 * process address space. We need the smp wmb nevertheless - yes,
424 * arch/blackfin has MMU less SMP ...
425 */
426 if (!IS_ENABLED(CONFIG_MMU)) {
427 smp_mb(); /* explicit smp_mb(); (B) */
428 return;
429 }
430
431 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
432 case FUT_OFF_INODE:
433 smp_mb(); /* explicit smp_mb(); (B) */
434 break;
435 case FUT_OFF_MMSHARED:
436 futex_get_mm(key); /* implies smp_mb(); (B) */
437 break;
438 default:
439 /*
440 * Private futexes do not hold reference on an inode or
441 * mm, therefore the only purpose of calling get_futex_key_refs
442 * is because we need the barrier for the lockless waiter check.
443 */
444 smp_mb(); /* explicit smp_mb(); (B) */
445 }
446 }
447
448 /*
449 * Drop a reference to the resource addressed by a key.
450 * The hash bucket spinlock must not be held. This is
451 * a no-op for private futexes, see comment in the get
452 * counterpart.
453 */
drop_futex_key_refs(union futex_key * key)454 static void drop_futex_key_refs(union futex_key *key)
455 {
456 if (!key->both.ptr) {
457 /* If we're here then we tried to put a key we failed to get */
458 WARN_ON_ONCE(1);
459 return;
460 }
461
462 if (!IS_ENABLED(CONFIG_MMU))
463 return;
464
465 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
466 case FUT_OFF_INODE:
467 break;
468 case FUT_OFF_MMSHARED:
469 mmdrop(key->private.mm);
470 break;
471 }
472 }
473
474 enum futex_access {
475 FUTEX_READ,
476 FUTEX_WRITE
477 };
478
479 /**
480 * futex_setup_timer - set up the sleeping hrtimer.
481 * @time: ptr to the given timeout value
482 * @timeout: the hrtimer_sleeper structure to be set up
483 * @flags: futex flags
484 * @range_ns: optional range in ns
485 *
486 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
487 * value given
488 */
489 static inline struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)490 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
491 int flags, u64 range_ns)
492 {
493 if (!time)
494 return NULL;
495
496 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
497 CLOCK_REALTIME : CLOCK_MONOTONIC,
498 HRTIMER_MODE_ABS);
499 /*
500 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
501 * effectively the same as calling hrtimer_set_expires().
502 */
503 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
504
505 return timeout;
506 }
507
508 /*
509 * Generate a machine wide unique identifier for this inode.
510 *
511 * This relies on u64 not wrapping in the life-time of the machine; which with
512 * 1ns resolution means almost 585 years.
513 *
514 * This further relies on the fact that a well formed program will not unmap
515 * the file while it has a (shared) futex waiting on it. This mapping will have
516 * a file reference which pins the mount and inode.
517 *
518 * If for some reason an inode gets evicted and read back in again, it will get
519 * a new sequence number and will _NOT_ match, even though it is the exact same
520 * file.
521 *
522 * It is important that match_futex() will never have a false-positive, esp.
523 * for PI futexes that can mess up the state. The above argues that false-negatives
524 * are only possible for malformed programs.
525 */
get_inode_sequence_number(struct inode * inode)526 static u64 get_inode_sequence_number(struct inode *inode)
527 {
528 static atomic64_t i_seq;
529 u64 old;
530
531 /* Does the inode already have a sequence number? */
532 old = atomic64_read(&inode->i_sequence);
533 if (likely(old))
534 return old;
535
536 for (;;) {
537 u64 new = atomic64_add_return(1, &i_seq);
538 if (WARN_ON_ONCE(!new))
539 continue;
540
541 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
542 if (old)
543 return old;
544 return new;
545 }
546 }
547
548 /**
549 * get_futex_key() - Get parameters which are the keys for a futex
550 * @uaddr: virtual address of the futex
551 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
552 * @key: address where result is stored.
553 * @rw: mapping needs to be read/write (values: FUTEX_READ,
554 * FUTEX_WRITE)
555 *
556 * Return: a negative error code or 0
557 *
558 * The key words are stored in @key on success.
559 *
560 * For shared mappings (when @fshared), the key is:
561 * ( inode->i_sequence, page->index, offset_within_page )
562 * [ also see get_inode_sequence_number() ]
563 *
564 * For private mappings (or when !@fshared), the key is:
565 * ( current->mm, address, 0 )
566 *
567 * This allows (cross process, where applicable) identification of the futex
568 * without keeping the page pinned for the duration of the FUTEX_WAIT.
569 *
570 * lock_page() might sleep, the caller should not hold a spinlock.
571 */
572 static int
get_futex_key(u32 __user * uaddr,int fshared,union futex_key * key,enum futex_access rw)573 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
574 {
575 unsigned long address = (unsigned long)uaddr;
576 struct mm_struct *mm = current->mm;
577 struct page *page, *tail;
578 struct address_space *mapping;
579 int err, ro = 0;
580
581 /*
582 * The futex address must be "naturally" aligned.
583 */
584 key->both.offset = address % PAGE_SIZE;
585 if (unlikely((address % sizeof(u32)) != 0))
586 return -EINVAL;
587 address -= key->both.offset;
588
589 if (unlikely(!access_ok(uaddr, sizeof(u32))))
590 return -EFAULT;
591
592 if (unlikely(should_fail_futex(fshared)))
593 return -EFAULT;
594
595 /*
596 * PROCESS_PRIVATE futexes are fast.
597 * As the mm cannot disappear under us and the 'key' only needs
598 * virtual address, we dont even have to find the underlying vma.
599 * Note : We do have to check 'uaddr' is a valid user address,
600 * but access_ok() should be faster than find_vma()
601 */
602 if (!fshared) {
603 key->private.mm = mm;
604 key->private.address = address;
605 get_futex_key_refs(key); /* implies smp_mb(); (B) */
606 return 0;
607 }
608
609 again:
610 /* Ignore any VERIFY_READ mapping (futex common case) */
611 if (unlikely(should_fail_futex(fshared)))
612 return -EFAULT;
613
614 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
615 /*
616 * If write access is not required (eg. FUTEX_WAIT), try
617 * and get read-only access.
618 */
619 if (err == -EFAULT && rw == FUTEX_READ) {
620 err = get_user_pages_fast(address, 1, 0, &page);
621 ro = 1;
622 }
623 if (err < 0)
624 return err;
625 else
626 err = 0;
627
628 /*
629 * The treatment of mapping from this point on is critical. The page
630 * lock protects many things but in this context the page lock
631 * stabilizes mapping, prevents inode freeing in the shared
632 * file-backed region case and guards against movement to swap cache.
633 *
634 * Strictly speaking the page lock is not needed in all cases being
635 * considered here and page lock forces unnecessarily serialization
636 * From this point on, mapping will be re-verified if necessary and
637 * page lock will be acquired only if it is unavoidable
638 *
639 * Mapping checks require the head page for any compound page so the
640 * head page and mapping is looked up now. For anonymous pages, it
641 * does not matter if the page splits in the future as the key is
642 * based on the address. For filesystem-backed pages, the tail is
643 * required as the index of the page determines the key. For
644 * base pages, there is no tail page and tail == page.
645 */
646 tail = page;
647 page = compound_head(page);
648 mapping = READ_ONCE(page->mapping);
649
650 /*
651 * If page->mapping is NULL, then it cannot be a PageAnon
652 * page; but it might be the ZERO_PAGE or in the gate area or
653 * in a special mapping (all cases which we are happy to fail);
654 * or it may have been a good file page when get_user_pages_fast
655 * found it, but truncated or holepunched or subjected to
656 * invalidate_complete_page2 before we got the page lock (also
657 * cases which we are happy to fail). And we hold a reference,
658 * so refcount care in invalidate_complete_page's remove_mapping
659 * prevents drop_caches from setting mapping to NULL beneath us.
660 *
661 * The case we do have to guard against is when memory pressure made
662 * shmem_writepage move it from filecache to swapcache beneath us:
663 * an unlikely race, but we do need to retry for page->mapping.
664 */
665 if (unlikely(!mapping)) {
666 int shmem_swizzled;
667
668 /*
669 * Page lock is required to identify which special case above
670 * applies. If this is really a shmem page then the page lock
671 * will prevent unexpected transitions.
672 */
673 lock_page(page);
674 shmem_swizzled = PageSwapCache(page) || page->mapping;
675 unlock_page(page);
676 put_page(page);
677
678 if (shmem_swizzled)
679 goto again;
680
681 return -EFAULT;
682 }
683
684 /*
685 * Private mappings are handled in a simple way.
686 *
687 * If the futex key is stored on an anonymous page, then the associated
688 * object is the mm which is implicitly pinned by the calling process.
689 *
690 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
691 * it's a read-only handle, it's expected that futexes attach to
692 * the object not the particular process.
693 */
694 if (PageAnon(page)) {
695 /*
696 * A RO anonymous page will never change and thus doesn't make
697 * sense for futex operations.
698 */
699 if (unlikely(should_fail_futex(fshared)) || ro) {
700 err = -EFAULT;
701 goto out;
702 }
703
704 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
705 key->private.mm = mm;
706 key->private.address = address;
707
708 } else {
709 struct inode *inode;
710
711 /*
712 * The associated futex object in this case is the inode and
713 * the page->mapping must be traversed. Ordinarily this should
714 * be stabilised under page lock but it's not strictly
715 * necessary in this case as we just want to pin the inode, not
716 * update the radix tree or anything like that.
717 *
718 * The RCU read lock is taken as the inode is finally freed
719 * under RCU. If the mapping still matches expectations then the
720 * mapping->host can be safely accessed as being a valid inode.
721 */
722 rcu_read_lock();
723
724 if (READ_ONCE(page->mapping) != mapping) {
725 rcu_read_unlock();
726 put_page(page);
727
728 goto again;
729 }
730
731 inode = READ_ONCE(mapping->host);
732 if (!inode) {
733 rcu_read_unlock();
734 put_page(page);
735
736 goto again;
737 }
738
739 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
740 key->shared.i_seq = get_inode_sequence_number(inode);
741 key->shared.pgoff = page_to_pgoff(tail);
742 rcu_read_unlock();
743 }
744
745 get_futex_key_refs(key); /* implies smp_mb(); (B) */
746
747 out:
748 put_page(page);
749 return err;
750 }
751
put_futex_key(union futex_key * key)752 static inline void put_futex_key(union futex_key *key)
753 {
754 drop_futex_key_refs(key);
755 }
756
757 /**
758 * fault_in_user_writeable() - Fault in user address and verify RW access
759 * @uaddr: pointer to faulting user space address
760 *
761 * Slow path to fixup the fault we just took in the atomic write
762 * access to @uaddr.
763 *
764 * We have no generic implementation of a non-destructive write to the
765 * user address. We know that we faulted in the atomic pagefault
766 * disabled section so we can as well avoid the #PF overhead by
767 * calling get_user_pages() right away.
768 */
fault_in_user_writeable(u32 __user * uaddr)769 static int fault_in_user_writeable(u32 __user *uaddr)
770 {
771 struct mm_struct *mm = current->mm;
772 int ret;
773
774 down_read(&mm->mmap_sem);
775 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
776 FAULT_FLAG_WRITE, NULL);
777 up_read(&mm->mmap_sem);
778
779 return ret < 0 ? ret : 0;
780 }
781
782 /**
783 * futex_top_waiter() - Return the highest priority waiter on a futex
784 * @hb: the hash bucket the futex_q's reside in
785 * @key: the futex key (to distinguish it from other futex futex_q's)
786 *
787 * Must be called with the hb lock held.
788 */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)789 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
790 union futex_key *key)
791 {
792 struct futex_q *this;
793
794 plist_for_each_entry(this, &hb->chain, list) {
795 if (match_futex(&this->key, key))
796 return this;
797 }
798 return NULL;
799 }
800
cmpxchg_futex_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)801 static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
802 u32 uval, u32 newval)
803 {
804 int ret;
805
806 pagefault_disable();
807 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
808 pagefault_enable();
809
810 return ret;
811 }
812
get_futex_value_locked(u32 * dest,u32 __user * from)813 static int get_futex_value_locked(u32 *dest, u32 __user *from)
814 {
815 int ret;
816
817 pagefault_disable();
818 ret = __get_user(*dest, from);
819 pagefault_enable();
820
821 return ret ? -EFAULT : 0;
822 }
823
824
825 /*
826 * PI code:
827 */
refill_pi_state_cache(void)828 static int refill_pi_state_cache(void)
829 {
830 struct futex_pi_state *pi_state;
831
832 if (likely(current->pi_state_cache))
833 return 0;
834
835 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
836
837 if (!pi_state)
838 return -ENOMEM;
839
840 INIT_LIST_HEAD(&pi_state->list);
841 /* pi_mutex gets initialized later */
842 pi_state->owner = NULL;
843 refcount_set(&pi_state->refcount, 1);
844 pi_state->key = FUTEX_KEY_INIT;
845
846 current->pi_state_cache = pi_state;
847
848 return 0;
849 }
850
alloc_pi_state(void)851 static struct futex_pi_state *alloc_pi_state(void)
852 {
853 struct futex_pi_state *pi_state = current->pi_state_cache;
854
855 WARN_ON(!pi_state);
856 current->pi_state_cache = NULL;
857
858 return pi_state;
859 }
860
pi_state_update_owner(struct futex_pi_state * pi_state,struct task_struct * new_owner)861 static void pi_state_update_owner(struct futex_pi_state *pi_state,
862 struct task_struct *new_owner)
863 {
864 struct task_struct *old_owner = pi_state->owner;
865
866 lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
867
868 if (old_owner) {
869 raw_spin_lock(&old_owner->pi_lock);
870 WARN_ON(list_empty(&pi_state->list));
871 list_del_init(&pi_state->list);
872 raw_spin_unlock(&old_owner->pi_lock);
873 }
874
875 if (new_owner) {
876 raw_spin_lock(&new_owner->pi_lock);
877 WARN_ON(!list_empty(&pi_state->list));
878 list_add(&pi_state->list, &new_owner->pi_state_list);
879 pi_state->owner = new_owner;
880 raw_spin_unlock(&new_owner->pi_lock);
881 }
882 }
883
get_pi_state(struct futex_pi_state * pi_state)884 static void get_pi_state(struct futex_pi_state *pi_state)
885 {
886 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
887 }
888
889 /*
890 * Drops a reference to the pi_state object and frees or caches it
891 * when the last reference is gone.
892 */
put_pi_state(struct futex_pi_state * pi_state)893 static void put_pi_state(struct futex_pi_state *pi_state)
894 {
895 if (!pi_state)
896 return;
897
898 if (!refcount_dec_and_test(&pi_state->refcount))
899 return;
900
901 /*
902 * If pi_state->owner is NULL, the owner is most probably dying
903 * and has cleaned up the pi_state already
904 */
905 if (pi_state->owner) {
906 unsigned long flags;
907
908 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
909 pi_state_update_owner(pi_state, NULL);
910 rt_mutex_proxy_unlock(&pi_state->pi_mutex);
911 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
912 }
913
914 if (current->pi_state_cache) {
915 kfree(pi_state);
916 } else {
917 /*
918 * pi_state->list is already empty.
919 * clear pi_state->owner.
920 * refcount is at 0 - put it back to 1.
921 */
922 pi_state->owner = NULL;
923 refcount_set(&pi_state->refcount, 1);
924 current->pi_state_cache = pi_state;
925 }
926 }
927
928 #ifdef CONFIG_FUTEX_PI
929
930 /*
931 * This task is holding PI mutexes at exit time => bad.
932 * Kernel cleans up PI-state, but userspace is likely hosed.
933 * (Robust-futex cleanup is separate and might save the day for userspace.)
934 */
exit_pi_state_list(struct task_struct * curr)935 static void exit_pi_state_list(struct task_struct *curr)
936 {
937 struct list_head *next, *head = &curr->pi_state_list;
938 struct futex_pi_state *pi_state;
939 struct futex_hash_bucket *hb;
940 union futex_key key = FUTEX_KEY_INIT;
941
942 if (!futex_cmpxchg_enabled)
943 return;
944 /*
945 * We are a ZOMBIE and nobody can enqueue itself on
946 * pi_state_list anymore, but we have to be careful
947 * versus waiters unqueueing themselves:
948 */
949 raw_spin_lock_irq(&curr->pi_lock);
950 while (!list_empty(head)) {
951 next = head->next;
952 pi_state = list_entry(next, struct futex_pi_state, list);
953 key = pi_state->key;
954 hb = hash_futex(&key);
955
956 /*
957 * We can race against put_pi_state() removing itself from the
958 * list (a waiter going away). put_pi_state() will first
959 * decrement the reference count and then modify the list, so
960 * its possible to see the list entry but fail this reference
961 * acquire.
962 *
963 * In that case; drop the locks to let put_pi_state() make
964 * progress and retry the loop.
965 */
966 if (!refcount_inc_not_zero(&pi_state->refcount)) {
967 raw_spin_unlock_irq(&curr->pi_lock);
968 cpu_relax();
969 raw_spin_lock_irq(&curr->pi_lock);
970 continue;
971 }
972 raw_spin_unlock_irq(&curr->pi_lock);
973
974 spin_lock(&hb->lock);
975 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
976 raw_spin_lock(&curr->pi_lock);
977 /*
978 * We dropped the pi-lock, so re-check whether this
979 * task still owns the PI-state:
980 */
981 if (head->next != next) {
982 /* retain curr->pi_lock for the loop invariant */
983 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
984 spin_unlock(&hb->lock);
985 put_pi_state(pi_state);
986 continue;
987 }
988
989 WARN_ON(pi_state->owner != curr);
990 WARN_ON(list_empty(&pi_state->list));
991 list_del_init(&pi_state->list);
992 pi_state->owner = NULL;
993
994 raw_spin_unlock(&curr->pi_lock);
995 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
996 spin_unlock(&hb->lock);
997
998 rt_mutex_futex_unlock(&pi_state->pi_mutex);
999 put_pi_state(pi_state);
1000
1001 raw_spin_lock_irq(&curr->pi_lock);
1002 }
1003 raw_spin_unlock_irq(&curr->pi_lock);
1004 }
1005 #else
exit_pi_state_list(struct task_struct * curr)1006 static inline void exit_pi_state_list(struct task_struct *curr) { }
1007 #endif
1008
1009 /*
1010 * We need to check the following states:
1011 *
1012 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
1013 *
1014 * [1] NULL | --- | --- | 0 | 0/1 | Valid
1015 * [2] NULL | --- | --- | >0 | 0/1 | Valid
1016 *
1017 * [3] Found | NULL | -- | Any | 0/1 | Invalid
1018 *
1019 * [4] Found | Found | NULL | 0 | 1 | Valid
1020 * [5] Found | Found | NULL | >0 | 1 | Invalid
1021 *
1022 * [6] Found | Found | task | 0 | 1 | Valid
1023 *
1024 * [7] Found | Found | NULL | Any | 0 | Invalid
1025 *
1026 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
1027 * [9] Found | Found | task | 0 | 0 | Invalid
1028 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
1029 *
1030 * [1] Indicates that the kernel can acquire the futex atomically. We
1031 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
1032 *
1033 * [2] Valid, if TID does not belong to a kernel thread. If no matching
1034 * thread is found then it indicates that the owner TID has died.
1035 *
1036 * [3] Invalid. The waiter is queued on a non PI futex
1037 *
1038 * [4] Valid state after exit_robust_list(), which sets the user space
1039 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
1040 *
1041 * [5] The user space value got manipulated between exit_robust_list()
1042 * and exit_pi_state_list()
1043 *
1044 * [6] Valid state after exit_pi_state_list() which sets the new owner in
1045 * the pi_state but cannot access the user space value.
1046 *
1047 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
1048 *
1049 * [8] Owner and user space value match
1050 *
1051 * [9] There is no transient state which sets the user space TID to 0
1052 * except exit_robust_list(), but this is indicated by the
1053 * FUTEX_OWNER_DIED bit. See [4]
1054 *
1055 * [10] There is no transient state which leaves owner and user space
1056 * TID out of sync. Except one error case where the kernel is denied
1057 * write access to the user address, see fixup_pi_state_owner().
1058 *
1059 *
1060 * Serialization and lifetime rules:
1061 *
1062 * hb->lock:
1063 *
1064 * hb -> futex_q, relation
1065 * futex_q -> pi_state, relation
1066 *
1067 * (cannot be raw because hb can contain arbitrary amount
1068 * of futex_q's)
1069 *
1070 * pi_mutex->wait_lock:
1071 *
1072 * {uval, pi_state}
1073 *
1074 * (and pi_mutex 'obviously')
1075 *
1076 * p->pi_lock:
1077 *
1078 * p->pi_state_list -> pi_state->list, relation
1079 *
1080 * pi_state->refcount:
1081 *
1082 * pi_state lifetime
1083 *
1084 *
1085 * Lock order:
1086 *
1087 * hb->lock
1088 * pi_mutex->wait_lock
1089 * p->pi_lock
1090 *
1091 */
1092
1093 /*
1094 * Validate that the existing waiter has a pi_state and sanity check
1095 * the pi_state against the user space value. If correct, attach to
1096 * it.
1097 */
attach_to_pi_state(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state,struct futex_pi_state ** ps)1098 static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1099 struct futex_pi_state *pi_state,
1100 struct futex_pi_state **ps)
1101 {
1102 pid_t pid = uval & FUTEX_TID_MASK;
1103 u32 uval2;
1104 int ret;
1105
1106 /*
1107 * Userspace might have messed up non-PI and PI futexes [3]
1108 */
1109 if (unlikely(!pi_state))
1110 return -EINVAL;
1111
1112 /*
1113 * We get here with hb->lock held, and having found a
1114 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1115 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1116 * which in turn means that futex_lock_pi() still has a reference on
1117 * our pi_state.
1118 *
1119 * The waiter holding a reference on @pi_state also protects against
1120 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1121 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1122 * free pi_state before we can take a reference ourselves.
1123 */
1124 WARN_ON(!refcount_read(&pi_state->refcount));
1125
1126 /*
1127 * Now that we have a pi_state, we can acquire wait_lock
1128 * and do the state validation.
1129 */
1130 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1131
1132 /*
1133 * Since {uval, pi_state} is serialized by wait_lock, and our current
1134 * uval was read without holding it, it can have changed. Verify it
1135 * still is what we expect it to be, otherwise retry the entire
1136 * operation.
1137 */
1138 if (get_futex_value_locked(&uval2, uaddr))
1139 goto out_efault;
1140
1141 if (uval != uval2)
1142 goto out_eagain;
1143
1144 /*
1145 * Handle the owner died case:
1146 */
1147 if (uval & FUTEX_OWNER_DIED) {
1148 /*
1149 * exit_pi_state_list sets owner to NULL and wakes the
1150 * topmost waiter. The task which acquires the
1151 * pi_state->rt_mutex will fixup owner.
1152 */
1153 if (!pi_state->owner) {
1154 /*
1155 * No pi state owner, but the user space TID
1156 * is not 0. Inconsistent state. [5]
1157 */
1158 if (pid)
1159 goto out_einval;
1160 /*
1161 * Take a ref on the state and return success. [4]
1162 */
1163 goto out_attach;
1164 }
1165
1166 /*
1167 * If TID is 0, then either the dying owner has not
1168 * yet executed exit_pi_state_list() or some waiter
1169 * acquired the rtmutex in the pi state, but did not
1170 * yet fixup the TID in user space.
1171 *
1172 * Take a ref on the state and return success. [6]
1173 */
1174 if (!pid)
1175 goto out_attach;
1176 } else {
1177 /*
1178 * If the owner died bit is not set, then the pi_state
1179 * must have an owner. [7]
1180 */
1181 if (!pi_state->owner)
1182 goto out_einval;
1183 }
1184
1185 /*
1186 * Bail out if user space manipulated the futex value. If pi
1187 * state exists then the owner TID must be the same as the
1188 * user space TID. [9/10]
1189 */
1190 if (pid != task_pid_vnr(pi_state->owner))
1191 goto out_einval;
1192
1193 out_attach:
1194 get_pi_state(pi_state);
1195 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1196 *ps = pi_state;
1197 return 0;
1198
1199 out_einval:
1200 ret = -EINVAL;
1201 goto out_error;
1202
1203 out_eagain:
1204 ret = -EAGAIN;
1205 goto out_error;
1206
1207 out_efault:
1208 ret = -EFAULT;
1209 goto out_error;
1210
1211 out_error:
1212 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1213 return ret;
1214 }
1215
1216 /**
1217 * wait_for_owner_exiting - Block until the owner has exited
1218 * @exiting: Pointer to the exiting task
1219 *
1220 * Caller must hold a refcount on @exiting.
1221 */
wait_for_owner_exiting(int ret,struct task_struct * exiting)1222 static void wait_for_owner_exiting(int ret, struct task_struct *exiting)
1223 {
1224 if (ret != -EBUSY) {
1225 WARN_ON_ONCE(exiting);
1226 return;
1227 }
1228
1229 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
1230 return;
1231
1232 mutex_lock(&exiting->futex_exit_mutex);
1233 /*
1234 * No point in doing state checking here. If the waiter got here
1235 * while the task was in exec()->exec_futex_release() then it can
1236 * have any FUTEX_STATE_* value when the waiter has acquired the
1237 * mutex. OK, if running, EXITING or DEAD if it reached exit()
1238 * already. Highly unlikely and not a problem. Just one more round
1239 * through the futex maze.
1240 */
1241 mutex_unlock(&exiting->futex_exit_mutex);
1242
1243 put_task_struct(exiting);
1244 }
1245
handle_exit_race(u32 __user * uaddr,u32 uval,struct task_struct * tsk)1246 static int handle_exit_race(u32 __user *uaddr, u32 uval,
1247 struct task_struct *tsk)
1248 {
1249 u32 uval2;
1250
1251 /*
1252 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
1253 * caller that the alleged owner is busy.
1254 */
1255 if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
1256 return -EBUSY;
1257
1258 /*
1259 * Reread the user space value to handle the following situation:
1260 *
1261 * CPU0 CPU1
1262 *
1263 * sys_exit() sys_futex()
1264 * do_exit() futex_lock_pi()
1265 * futex_lock_pi_atomic()
1266 * exit_signals(tsk) No waiters:
1267 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1268 * mm_release(tsk) Set waiter bit
1269 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1270 * Set owner died attach_to_pi_owner() {
1271 * *uaddr = 0xC0000000; tsk = get_task(PID);
1272 * } if (!tsk->flags & PF_EXITING) {
1273 * ... attach();
1274 * tsk->futex_state = } else {
1275 * FUTEX_STATE_DEAD; if (tsk->futex_state !=
1276 * FUTEX_STATE_DEAD)
1277 * return -EAGAIN;
1278 * return -ESRCH; <--- FAIL
1279 * }
1280 *
1281 * Returning ESRCH unconditionally is wrong here because the
1282 * user space value has been changed by the exiting task.
1283 *
1284 * The same logic applies to the case where the exiting task is
1285 * already gone.
1286 */
1287 if (get_futex_value_locked(&uval2, uaddr))
1288 return -EFAULT;
1289
1290 /* If the user space value has changed, try again. */
1291 if (uval2 != uval)
1292 return -EAGAIN;
1293
1294 /*
1295 * The exiting task did not have a robust list, the robust list was
1296 * corrupted or the user space value in *uaddr is simply bogus.
1297 * Give up and tell user space.
1298 */
1299 return -ESRCH;
1300 }
1301
1302 /*
1303 * Lookup the task for the TID provided from user space and attach to
1304 * it after doing proper sanity checks.
1305 */
attach_to_pi_owner(u32 __user * uaddr,u32 uval,union futex_key * key,struct futex_pi_state ** ps,struct task_struct ** exiting)1306 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
1307 struct futex_pi_state **ps,
1308 struct task_struct **exiting)
1309 {
1310 pid_t pid = uval & FUTEX_TID_MASK;
1311 struct futex_pi_state *pi_state;
1312 struct task_struct *p;
1313
1314 /*
1315 * We are the first waiter - try to look up the real owner and attach
1316 * the new pi_state to it, but bail out when TID = 0 [1]
1317 *
1318 * The !pid check is paranoid. None of the call sites should end up
1319 * with pid == 0, but better safe than sorry. Let the caller retry
1320 */
1321 if (!pid)
1322 return -EAGAIN;
1323 p = find_get_task_by_vpid(pid);
1324 if (!p)
1325 return handle_exit_race(uaddr, uval, NULL);
1326
1327 if (unlikely(p->flags & PF_KTHREAD)) {
1328 put_task_struct(p);
1329 return -EPERM;
1330 }
1331
1332 /*
1333 * We need to look at the task state to figure out, whether the
1334 * task is exiting. To protect against the change of the task state
1335 * in futex_exit_release(), we do this protected by p->pi_lock:
1336 */
1337 raw_spin_lock_irq(&p->pi_lock);
1338 if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
1339 /*
1340 * The task is on the way out. When the futex state is
1341 * FUTEX_STATE_DEAD, we know that the task has finished
1342 * the cleanup:
1343 */
1344 int ret = handle_exit_race(uaddr, uval, p);
1345
1346 raw_spin_unlock_irq(&p->pi_lock);
1347 /*
1348 * If the owner task is between FUTEX_STATE_EXITING and
1349 * FUTEX_STATE_DEAD then store the task pointer and keep
1350 * the reference on the task struct. The calling code will
1351 * drop all locks, wait for the task to reach
1352 * FUTEX_STATE_DEAD and then drop the refcount. This is
1353 * required to prevent a live lock when the current task
1354 * preempted the exiting task between the two states.
1355 */
1356 if (ret == -EBUSY)
1357 *exiting = p;
1358 else
1359 put_task_struct(p);
1360 return ret;
1361 }
1362
1363 /*
1364 * No existing pi state. First waiter. [2]
1365 *
1366 * This creates pi_state, we have hb->lock held, this means nothing can
1367 * observe this state, wait_lock is irrelevant.
1368 */
1369 pi_state = alloc_pi_state();
1370
1371 /*
1372 * Initialize the pi_mutex in locked state and make @p
1373 * the owner of it:
1374 */
1375 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1376
1377 /* Store the key for possible exit cleanups: */
1378 pi_state->key = *key;
1379
1380 WARN_ON(!list_empty(&pi_state->list));
1381 list_add(&pi_state->list, &p->pi_state_list);
1382 /*
1383 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1384 * because there is no concurrency as the object is not published yet.
1385 */
1386 pi_state->owner = p;
1387 raw_spin_unlock_irq(&p->pi_lock);
1388
1389 put_task_struct(p);
1390
1391 *ps = pi_state;
1392
1393 return 0;
1394 }
1395
lookup_pi_state(u32 __user * uaddr,u32 uval,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct ** exiting)1396 static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1397 struct futex_hash_bucket *hb,
1398 union futex_key *key, struct futex_pi_state **ps,
1399 struct task_struct **exiting)
1400 {
1401 struct futex_q *top_waiter = futex_top_waiter(hb, key);
1402
1403 /*
1404 * If there is a waiter on that futex, validate it and
1405 * attach to the pi_state when the validation succeeds.
1406 */
1407 if (top_waiter)
1408 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1409
1410 /*
1411 * We are the first waiter - try to look up the owner based on
1412 * @uval and attach to it.
1413 */
1414 return attach_to_pi_owner(uaddr, uval, key, ps, exiting);
1415 }
1416
lock_pi_update_atomic(u32 __user * uaddr,u32 uval,u32 newval)1417 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1418 {
1419 int err;
1420 u32 curval;
1421
1422 if (unlikely(should_fail_futex(true)))
1423 return -EFAULT;
1424
1425 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1426 if (unlikely(err))
1427 return err;
1428
1429 /* If user space value changed, let the caller retry */
1430 return curval != uval ? -EAGAIN : 0;
1431 }
1432
1433 /**
1434 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
1435 * @uaddr: the pi futex user address
1436 * @hb: the pi futex hash bucket
1437 * @key: the futex key associated with uaddr and hb
1438 * @ps: the pi_state pointer where we store the result of the
1439 * lookup
1440 * @task: the task to perform the atomic lock work for. This will
1441 * be "current" except in the case of requeue pi.
1442 * @exiting: Pointer to store the task pointer of the owner task
1443 * which is in the middle of exiting
1444 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1445 *
1446 * Return:
1447 * - 0 - ready to wait;
1448 * - 1 - acquired the lock;
1449 * - <0 - error
1450 *
1451 * The hb->lock and futex_key refs shall be held by the caller.
1452 *
1453 * @exiting is only set when the return value is -EBUSY. If so, this holds
1454 * a refcount on the exiting task on return and the caller needs to drop it
1455 * after waiting for the exit to complete.
1456 */
futex_lock_pi_atomic(u32 __user * uaddr,struct futex_hash_bucket * hb,union futex_key * key,struct futex_pi_state ** ps,struct task_struct * task,struct task_struct ** exiting,int set_waiters)1457 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1458 union futex_key *key,
1459 struct futex_pi_state **ps,
1460 struct task_struct *task,
1461 struct task_struct **exiting,
1462 int set_waiters)
1463 {
1464 u32 uval, newval, vpid = task_pid_vnr(task);
1465 struct futex_q *top_waiter;
1466 int ret;
1467
1468 /*
1469 * Read the user space value first so we can validate a few
1470 * things before proceeding further.
1471 */
1472 if (get_futex_value_locked(&uval, uaddr))
1473 return -EFAULT;
1474
1475 if (unlikely(should_fail_futex(true)))
1476 return -EFAULT;
1477
1478 /*
1479 * Detect deadlocks.
1480 */
1481 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
1482 return -EDEADLK;
1483
1484 if ((unlikely(should_fail_futex(true))))
1485 return -EDEADLK;
1486
1487 /*
1488 * Lookup existing state first. If it exists, try to attach to
1489 * its pi_state.
1490 */
1491 top_waiter = futex_top_waiter(hb, key);
1492 if (top_waiter)
1493 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
1494
1495 /*
1496 * No waiter and user TID is 0. We are here because the
1497 * waiters or the owner died bit is set or called from
1498 * requeue_cmp_pi or for whatever reason something took the
1499 * syscall.
1500 */
1501 if (!(uval & FUTEX_TID_MASK)) {
1502 /*
1503 * We take over the futex. No other waiters and the user space
1504 * TID is 0. We preserve the owner died bit.
1505 */
1506 newval = uval & FUTEX_OWNER_DIED;
1507 newval |= vpid;
1508
1509 /* The futex requeue_pi code can enforce the waiters bit */
1510 if (set_waiters)
1511 newval |= FUTEX_WAITERS;
1512
1513 ret = lock_pi_update_atomic(uaddr, uval, newval);
1514 /* If the take over worked, return 1 */
1515 return ret < 0 ? ret : 1;
1516 }
1517
1518 /*
1519 * First waiter. Set the waiters bit before attaching ourself to
1520 * the owner. If owner tries to unlock, it will be forced into
1521 * the kernel and blocked on hb->lock.
1522 */
1523 newval = uval | FUTEX_WAITERS;
1524 ret = lock_pi_update_atomic(uaddr, uval, newval);
1525 if (ret)
1526 return ret;
1527 /*
1528 * If the update of the user space value succeeded, we try to
1529 * attach to the owner. If that fails, no harm done, we only
1530 * set the FUTEX_WAITERS bit in the user space variable.
1531 */
1532 return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
1533 }
1534
1535 /**
1536 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1537 * @q: The futex_q to unqueue
1538 *
1539 * The q->lock_ptr must not be NULL and must be held by the caller.
1540 */
__unqueue_futex(struct futex_q * q)1541 static void __unqueue_futex(struct futex_q *q)
1542 {
1543 struct futex_hash_bucket *hb;
1544
1545 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
1546 return;
1547 lockdep_assert_held(q->lock_ptr);
1548
1549 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1550 plist_del(&q->list, &hb->chain);
1551 hb_waiters_dec(hb);
1552 }
1553
1554 /*
1555 * The hash bucket lock must be held when this is called.
1556 * Afterwards, the futex_q must not be accessed. Callers
1557 * must ensure to later call wake_up_q() for the actual
1558 * wakeups to occur.
1559 */
mark_wake_futex(struct wake_q_head * wake_q,struct futex_q * q)1560 static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1561 {
1562 struct task_struct *p = q->task;
1563
1564 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1565 return;
1566
1567 get_task_struct(p);
1568 __unqueue_futex(q);
1569 /*
1570 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1571 * is written, without taking any locks. This is possible in the event
1572 * of a spurious wakeup, for example. A memory barrier is required here
1573 * to prevent the following store to lock_ptr from getting ahead of the
1574 * plist_del in __unqueue_futex().
1575 */
1576 smp_store_release(&q->lock_ptr, NULL);
1577
1578 /*
1579 * Queue the task for later wakeup for after we've released
1580 * the hb->lock. wake_q_add() grabs reference to p.
1581 */
1582 wake_q_add_safe(wake_q, p);
1583 }
1584
1585 /*
1586 * Caller must hold a reference on @pi_state.
1587 */
wake_futex_pi(u32 __user * uaddr,u32 uval,struct futex_pi_state * pi_state)1588 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
1589 {
1590 u32 curval, newval;
1591 struct task_struct *new_owner;
1592 bool postunlock = false;
1593 DEFINE_WAKE_Q(wake_q);
1594 int ret = 0;
1595
1596 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1597 if (WARN_ON_ONCE(!new_owner)) {
1598 /*
1599 * As per the comment in futex_unlock_pi() this should not happen.
1600 *
1601 * When this happens, give up our locks and try again, giving
1602 * the futex_lock_pi() instance time to complete, either by
1603 * waiting on the rtmutex or removing itself from the futex
1604 * queue.
1605 */
1606 ret = -EAGAIN;
1607 goto out_unlock;
1608 }
1609
1610 /*
1611 * We pass it to the next owner. The WAITERS bit is always kept
1612 * enabled while there is PI state around. We cleanup the owner
1613 * died bit, because we are the owner.
1614 */
1615 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
1616
1617 if (unlikely(should_fail_futex(true))) {
1618 ret = -EFAULT;
1619 goto out_unlock;
1620 }
1621
1622 ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
1623 if (!ret && (curval != uval)) {
1624 /*
1625 * If a unconditional UNLOCK_PI operation (user space did not
1626 * try the TID->0 transition) raced with a waiter setting the
1627 * FUTEX_WAITERS flag between get_user() and locking the hash
1628 * bucket lock, retry the operation.
1629 */
1630 if ((FUTEX_TID_MASK & curval) == uval)
1631 ret = -EAGAIN;
1632 else
1633 ret = -EINVAL;
1634 }
1635
1636 if (!ret) {
1637 /*
1638 * This is a point of no return; once we modified the uval
1639 * there is no going back and subsequent operations must
1640 * not fail.
1641 */
1642 pi_state_update_owner(pi_state, new_owner);
1643 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1644 }
1645
1646 out_unlock:
1647 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1648
1649 if (postunlock)
1650 rt_mutex_postunlock(&wake_q);
1651
1652 return ret;
1653 }
1654
1655 /*
1656 * Express the locking dependencies for lockdep:
1657 */
1658 static inline void
double_lock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1659 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1660 {
1661 if (hb1 <= hb2) {
1662 spin_lock(&hb1->lock);
1663 if (hb1 < hb2)
1664 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1665 } else { /* hb1 > hb2 */
1666 spin_lock(&hb2->lock);
1667 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1668 }
1669 }
1670
1671 static inline void
double_unlock_hb(struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2)1672 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1673 {
1674 spin_unlock(&hb1->lock);
1675 if (hb1 != hb2)
1676 spin_unlock(&hb2->lock);
1677 }
1678
1679 /*
1680 * Wake up waiters matching bitset queued on this futex (uaddr).
1681 */
1682 static int
futex_wake(u32 __user * uaddr,unsigned int flags,int nr_wake,u32 bitset)1683 futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
1684 {
1685 struct futex_hash_bucket *hb;
1686 struct futex_q *this, *next;
1687 union futex_key key = FUTEX_KEY_INIT;
1688 int ret;
1689 DEFINE_WAKE_Q(wake_q);
1690
1691 if (!bitset)
1692 return -EINVAL;
1693
1694 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
1695 if (unlikely(ret != 0))
1696 goto out;
1697
1698 hb = hash_futex(&key);
1699
1700 /* Make sure we really have tasks to wakeup */
1701 if (!hb_waiters_pending(hb))
1702 goto out_put_key;
1703
1704 spin_lock(&hb->lock);
1705
1706 plist_for_each_entry_safe(this, next, &hb->chain, list) {
1707 if (match_futex (&this->key, &key)) {
1708 if (this->pi_state || this->rt_waiter) {
1709 ret = -EINVAL;
1710 break;
1711 }
1712
1713 /* Check if one of the bits is set in both bitsets */
1714 if (!(this->bitset & bitset))
1715 continue;
1716
1717 mark_wake_futex(&wake_q, this);
1718 if (++ret >= nr_wake)
1719 break;
1720 }
1721 }
1722
1723 spin_unlock(&hb->lock);
1724 wake_up_q(&wake_q);
1725 out_put_key:
1726 put_futex_key(&key);
1727 out:
1728 return ret;
1729 }
1730
futex_atomic_op_inuser(unsigned int encoded_op,u32 __user * uaddr)1731 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1732 {
1733 unsigned int op = (encoded_op & 0x70000000) >> 28;
1734 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1735 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1736 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1737 int oldval, ret;
1738
1739 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
1740 if (oparg < 0 || oparg > 31) {
1741 char comm[sizeof(current->comm)];
1742 /*
1743 * kill this print and return -EINVAL when userspace
1744 * is sane again
1745 */
1746 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1747 get_task_comm(comm, current), oparg);
1748 oparg &= 31;
1749 }
1750 oparg = 1 << oparg;
1751 }
1752
1753 if (!access_ok(uaddr, sizeof(u32)))
1754 return -EFAULT;
1755
1756 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1757 if (ret)
1758 return ret;
1759
1760 switch (cmp) {
1761 case FUTEX_OP_CMP_EQ:
1762 return oldval == cmparg;
1763 case FUTEX_OP_CMP_NE:
1764 return oldval != cmparg;
1765 case FUTEX_OP_CMP_LT:
1766 return oldval < cmparg;
1767 case FUTEX_OP_CMP_GE:
1768 return oldval >= cmparg;
1769 case FUTEX_OP_CMP_LE:
1770 return oldval <= cmparg;
1771 case FUTEX_OP_CMP_GT:
1772 return oldval > cmparg;
1773 default:
1774 return -ENOSYS;
1775 }
1776 }
1777
1778 /*
1779 * Wake up all waiters hashed on the physical page that is mapped
1780 * to this virtual address:
1781 */
1782 static int
futex_wake_op(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_wake2,int op)1783 futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
1784 int nr_wake, int nr_wake2, int op)
1785 {
1786 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1787 struct futex_hash_bucket *hb1, *hb2;
1788 struct futex_q *this, *next;
1789 int ret, op_ret;
1790 DEFINE_WAKE_Q(wake_q);
1791
1792 retry:
1793 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
1794 if (unlikely(ret != 0))
1795 goto out;
1796 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
1797 if (unlikely(ret != 0))
1798 goto out_put_key1;
1799
1800 hb1 = hash_futex(&key1);
1801 hb2 = hash_futex(&key2);
1802
1803 retry_private:
1804 double_lock_hb(hb1, hb2);
1805 op_ret = futex_atomic_op_inuser(op, uaddr2);
1806 if (unlikely(op_ret < 0)) {
1807 double_unlock_hb(hb1, hb2);
1808
1809 if (!IS_ENABLED(CONFIG_MMU) ||
1810 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
1811 /*
1812 * we don't get EFAULT from MMU faults if we don't have
1813 * an MMU, but we might get them from range checking
1814 */
1815 ret = op_ret;
1816 goto out_put_keys;
1817 }
1818
1819 if (op_ret == -EFAULT) {
1820 ret = fault_in_user_writeable(uaddr2);
1821 if (ret)
1822 goto out_put_keys;
1823 }
1824
1825 if (!(flags & FLAGS_SHARED)) {
1826 cond_resched();
1827 goto retry_private;
1828 }
1829
1830 put_futex_key(&key2);
1831 put_futex_key(&key1);
1832 cond_resched();
1833 goto retry;
1834 }
1835
1836 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
1837 if (match_futex (&this->key, &key1)) {
1838 if (this->pi_state || this->rt_waiter) {
1839 ret = -EINVAL;
1840 goto out_unlock;
1841 }
1842 mark_wake_futex(&wake_q, this);
1843 if (++ret >= nr_wake)
1844 break;
1845 }
1846 }
1847
1848 if (op_ret > 0) {
1849 op_ret = 0;
1850 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
1851 if (match_futex (&this->key, &key2)) {
1852 if (this->pi_state || this->rt_waiter) {
1853 ret = -EINVAL;
1854 goto out_unlock;
1855 }
1856 mark_wake_futex(&wake_q, this);
1857 if (++op_ret >= nr_wake2)
1858 break;
1859 }
1860 }
1861 ret += op_ret;
1862 }
1863
1864 out_unlock:
1865 double_unlock_hb(hb1, hb2);
1866 wake_up_q(&wake_q);
1867 out_put_keys:
1868 put_futex_key(&key2);
1869 out_put_key1:
1870 put_futex_key(&key1);
1871 out:
1872 return ret;
1873 }
1874
1875 /**
1876 * requeue_futex() - Requeue a futex_q from one hb to another
1877 * @q: the futex_q to requeue
1878 * @hb1: the source hash_bucket
1879 * @hb2: the target hash_bucket
1880 * @key2: the new key for the requeued futex_q
1881 */
1882 static inline
requeue_futex(struct futex_q * q,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key2)1883 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1884 struct futex_hash_bucket *hb2, union futex_key *key2)
1885 {
1886
1887 /*
1888 * If key1 and key2 hash to the same bucket, no need to
1889 * requeue.
1890 */
1891 if (likely(&hb1->chain != &hb2->chain)) {
1892 plist_del(&q->list, &hb1->chain);
1893 hb_waiters_dec(hb1);
1894 hb_waiters_inc(hb2);
1895 plist_add(&q->list, &hb2->chain);
1896 q->lock_ptr = &hb2->lock;
1897 }
1898 get_futex_key_refs(key2);
1899 q->key = *key2;
1900 }
1901
1902 /**
1903 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1904 * @q: the futex_q
1905 * @key: the key of the requeue target futex
1906 * @hb: the hash_bucket of the requeue target futex
1907 *
1908 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1909 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1910 * to the requeue target futex so the waiter can detect the wakeup on the right
1911 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1912 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1913 * to protect access to the pi_state to fixup the owner later. Must be called
1914 * with both q->lock_ptr and hb->lock held.
1915 */
1916 static inline
requeue_pi_wake_futex(struct futex_q * q,union futex_key * key,struct futex_hash_bucket * hb)1917 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1918 struct futex_hash_bucket *hb)
1919 {
1920 get_futex_key_refs(key);
1921 q->key = *key;
1922
1923 __unqueue_futex(q);
1924
1925 WARN_ON(!q->rt_waiter);
1926 q->rt_waiter = NULL;
1927
1928 q->lock_ptr = &hb->lock;
1929
1930 wake_up_state(q->task, TASK_NORMAL);
1931 }
1932
1933 /**
1934 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1935 * @pifutex: the user address of the to futex
1936 * @hb1: the from futex hash bucket, must be locked by the caller
1937 * @hb2: the to futex hash bucket, must be locked by the caller
1938 * @key1: the from futex key
1939 * @key2: the to futex key
1940 * @ps: address to store the pi_state pointer
1941 * @exiting: Pointer to store the task pointer of the owner task
1942 * which is in the middle of exiting
1943 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
1944 *
1945 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1946 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1947 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1948 * hb1 and hb2 must be held by the caller.
1949 *
1950 * @exiting is only set when the return value is -EBUSY. If so, this holds
1951 * a refcount on the exiting task on return and the caller needs to drop it
1952 * after waiting for the exit to complete.
1953 *
1954 * Return:
1955 * - 0 - failed to acquire the lock atomically;
1956 * - >0 - acquired the lock, return value is vpid of the top_waiter
1957 * - <0 - error
1958 */
1959 static int
futex_proxy_trylock_atomic(u32 __user * pifutex,struct futex_hash_bucket * hb1,struct futex_hash_bucket * hb2,union futex_key * key1,union futex_key * key2,struct futex_pi_state ** ps,struct task_struct ** exiting,int set_waiters)1960 futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
1961 struct futex_hash_bucket *hb2, union futex_key *key1,
1962 union futex_key *key2, struct futex_pi_state **ps,
1963 struct task_struct **exiting, int set_waiters)
1964 {
1965 struct futex_q *top_waiter = NULL;
1966 u32 curval;
1967 int ret, vpid;
1968
1969 if (get_futex_value_locked(&curval, pifutex))
1970 return -EFAULT;
1971
1972 if (unlikely(should_fail_futex(true)))
1973 return -EFAULT;
1974
1975 /*
1976 * Find the top_waiter and determine if there are additional waiters.
1977 * If the caller intends to requeue more than 1 waiter to pifutex,
1978 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1979 * as we have means to handle the possible fault. If not, don't set
1980 * the bit unecessarily as it will force the subsequent unlock to enter
1981 * the kernel.
1982 */
1983 top_waiter = futex_top_waiter(hb1, key1);
1984
1985 /* There are no waiters, nothing for us to do. */
1986 if (!top_waiter)
1987 return 0;
1988
1989 /* Ensure we requeue to the expected futex. */
1990 if (!match_futex(top_waiter->requeue_pi_key, key2))
1991 return -EINVAL;
1992
1993 /*
1994 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1995 * the contended case or if set_waiters is 1. The pi_state is returned
1996 * in ps in contended cases.
1997 */
1998 vpid = task_pid_vnr(top_waiter->task);
1999 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
2000 exiting, set_waiters);
2001 if (ret == 1) {
2002 requeue_pi_wake_futex(top_waiter, key2, hb2);
2003 return vpid;
2004 }
2005 return ret;
2006 }
2007
2008 /**
2009 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
2010 * @uaddr1: source futex user address
2011 * @flags: futex flags (FLAGS_SHARED, etc.)
2012 * @uaddr2: target futex user address
2013 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
2014 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
2015 * @cmpval: @uaddr1 expected value (or %NULL)
2016 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
2017 * pi futex (pi to pi requeue is not supported)
2018 *
2019 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
2020 * uaddr2 atomically on behalf of the top waiter.
2021 *
2022 * Return:
2023 * - >=0 - on success, the number of tasks requeued or woken;
2024 * - <0 - on error
2025 */
futex_requeue(u32 __user * uaddr1,unsigned int flags,u32 __user * uaddr2,int nr_wake,int nr_requeue,u32 * cmpval,int requeue_pi)2026 static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
2027 u32 __user *uaddr2, int nr_wake, int nr_requeue,
2028 u32 *cmpval, int requeue_pi)
2029 {
2030 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
2031 int drop_count = 0, task_count = 0, ret;
2032 struct futex_pi_state *pi_state = NULL;
2033 struct futex_hash_bucket *hb1, *hb2;
2034 struct futex_q *this, *next;
2035 DEFINE_WAKE_Q(wake_q);
2036
2037 if (nr_wake < 0 || nr_requeue < 0)
2038 return -EINVAL;
2039
2040 /*
2041 * When PI not supported: return -ENOSYS if requeue_pi is true,
2042 * consequently the compiler knows requeue_pi is always false past
2043 * this point which will optimize away all the conditional code
2044 * further down.
2045 */
2046 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
2047 return -ENOSYS;
2048
2049 if (requeue_pi) {
2050 /*
2051 * Requeue PI only works on two distinct uaddrs. This
2052 * check is only valid for private futexes. See below.
2053 */
2054 if (uaddr1 == uaddr2)
2055 return -EINVAL;
2056
2057 /*
2058 * requeue_pi requires a pi_state, try to allocate it now
2059 * without any locks in case it fails.
2060 */
2061 if (refill_pi_state_cache())
2062 return -ENOMEM;
2063 /*
2064 * requeue_pi must wake as many tasks as it can, up to nr_wake
2065 * + nr_requeue, since it acquires the rt_mutex prior to
2066 * returning to userspace, so as to not leave the rt_mutex with
2067 * waiters and no owner. However, second and third wake-ups
2068 * cannot be predicted as they involve race conditions with the
2069 * first wake and a fault while looking up the pi_state. Both
2070 * pthread_cond_signal() and pthread_cond_broadcast() should
2071 * use nr_wake=1.
2072 */
2073 if (nr_wake != 1)
2074 return -EINVAL;
2075 }
2076
2077 retry:
2078 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
2079 if (unlikely(ret != 0))
2080 goto out;
2081 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
2082 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
2083 if (unlikely(ret != 0))
2084 goto out_put_key1;
2085
2086 /*
2087 * The check above which compares uaddrs is not sufficient for
2088 * shared futexes. We need to compare the keys:
2089 */
2090 if (requeue_pi && match_futex(&key1, &key2)) {
2091 ret = -EINVAL;
2092 goto out_put_keys;
2093 }
2094
2095 hb1 = hash_futex(&key1);
2096 hb2 = hash_futex(&key2);
2097
2098 retry_private:
2099 hb_waiters_inc(hb2);
2100 double_lock_hb(hb1, hb2);
2101
2102 if (likely(cmpval != NULL)) {
2103 u32 curval;
2104
2105 ret = get_futex_value_locked(&curval, uaddr1);
2106
2107 if (unlikely(ret)) {
2108 double_unlock_hb(hb1, hb2);
2109 hb_waiters_dec(hb2);
2110
2111 ret = get_user(curval, uaddr1);
2112 if (ret)
2113 goto out_put_keys;
2114
2115 if (!(flags & FLAGS_SHARED))
2116 goto retry_private;
2117
2118 put_futex_key(&key2);
2119 put_futex_key(&key1);
2120 goto retry;
2121 }
2122 if (curval != *cmpval) {
2123 ret = -EAGAIN;
2124 goto out_unlock;
2125 }
2126 }
2127
2128 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
2129 struct task_struct *exiting = NULL;
2130
2131 /*
2132 * Attempt to acquire uaddr2 and wake the top waiter. If we
2133 * intend to requeue waiters, force setting the FUTEX_WAITERS
2134 * bit. We force this here where we are able to easily handle
2135 * faults rather in the requeue loop below.
2136 */
2137 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
2138 &key2, &pi_state,
2139 &exiting, nr_requeue);
2140
2141 /*
2142 * At this point the top_waiter has either taken uaddr2 or is
2143 * waiting on it. If the former, then the pi_state will not
2144 * exist yet, look it up one more time to ensure we have a
2145 * reference to it. If the lock was taken, ret contains the
2146 * vpid of the top waiter task.
2147 * If the lock was not taken, we have pi_state and an initial
2148 * refcount on it. In case of an error we have nothing.
2149 */
2150 if (ret > 0) {
2151 WARN_ON(pi_state);
2152 drop_count++;
2153 task_count++;
2154 /*
2155 * If we acquired the lock, then the user space value
2156 * of uaddr2 should be vpid. It cannot be changed by
2157 * the top waiter as it is blocked on hb2 lock if it
2158 * tries to do so. If something fiddled with it behind
2159 * our back the pi state lookup might unearth it. So
2160 * we rather use the known value than rereading and
2161 * handing potential crap to lookup_pi_state.
2162 *
2163 * If that call succeeds then we have pi_state and an
2164 * initial refcount on it.
2165 */
2166 ret = lookup_pi_state(uaddr2, ret, hb2, &key2,
2167 &pi_state, &exiting);
2168 }
2169
2170 switch (ret) {
2171 case 0:
2172 /* We hold a reference on the pi state. */
2173 break;
2174
2175 /* If the above failed, then pi_state is NULL */
2176 case -EFAULT:
2177 double_unlock_hb(hb1, hb2);
2178 hb_waiters_dec(hb2);
2179 put_futex_key(&key2);
2180 put_futex_key(&key1);
2181 ret = fault_in_user_writeable(uaddr2);
2182 if (!ret)
2183 goto retry;
2184 goto out;
2185 case -EBUSY:
2186 case -EAGAIN:
2187 /*
2188 * Two reasons for this:
2189 * - EBUSY: Owner is exiting and we just wait for the
2190 * exit to complete.
2191 * - EAGAIN: The user space value changed.
2192 */
2193 double_unlock_hb(hb1, hb2);
2194 hb_waiters_dec(hb2);
2195 put_futex_key(&key2);
2196 put_futex_key(&key1);
2197 /*
2198 * Handle the case where the owner is in the middle of
2199 * exiting. Wait for the exit to complete otherwise
2200 * this task might loop forever, aka. live lock.
2201 */
2202 wait_for_owner_exiting(ret, exiting);
2203 cond_resched();
2204 goto retry;
2205 default:
2206 goto out_unlock;
2207 }
2208 }
2209
2210 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
2211 if (task_count - nr_wake >= nr_requeue)
2212 break;
2213
2214 if (!match_futex(&this->key, &key1))
2215 continue;
2216
2217 /*
2218 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2219 * be paired with each other and no other futex ops.
2220 *
2221 * We should never be requeueing a futex_q with a pi_state,
2222 * which is awaiting a futex_unlock_pi().
2223 */
2224 if ((requeue_pi && !this->rt_waiter) ||
2225 (!requeue_pi && this->rt_waiter) ||
2226 this->pi_state) {
2227 ret = -EINVAL;
2228 break;
2229 }
2230
2231 /*
2232 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2233 * lock, we already woke the top_waiter. If not, it will be
2234 * woken by futex_unlock_pi().
2235 */
2236 if (++task_count <= nr_wake && !requeue_pi) {
2237 mark_wake_futex(&wake_q, this);
2238 continue;
2239 }
2240
2241 /* Ensure we requeue to the expected futex for requeue_pi. */
2242 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2243 ret = -EINVAL;
2244 break;
2245 }
2246
2247 /*
2248 * Requeue nr_requeue waiters and possibly one more in the case
2249 * of requeue_pi if we couldn't acquire the lock atomically.
2250 */
2251 if (requeue_pi) {
2252 /*
2253 * Prepare the waiter to take the rt_mutex. Take a
2254 * refcount on the pi_state and store the pointer in
2255 * the futex_q object of the waiter.
2256 */
2257 get_pi_state(pi_state);
2258 this->pi_state = pi_state;
2259 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2260 this->rt_waiter,
2261 this->task);
2262 if (ret == 1) {
2263 /*
2264 * We got the lock. We do neither drop the
2265 * refcount on pi_state nor clear
2266 * this->pi_state because the waiter needs the
2267 * pi_state for cleaning up the user space
2268 * value. It will drop the refcount after
2269 * doing so.
2270 */
2271 requeue_pi_wake_futex(this, &key2, hb2);
2272 drop_count++;
2273 continue;
2274 } else if (ret) {
2275 /*
2276 * rt_mutex_start_proxy_lock() detected a
2277 * potential deadlock when we tried to queue
2278 * that waiter. Drop the pi_state reference
2279 * which we took above and remove the pointer
2280 * to the state from the waiters futex_q
2281 * object.
2282 */
2283 this->pi_state = NULL;
2284 put_pi_state(pi_state);
2285 /*
2286 * We stop queueing more waiters and let user
2287 * space deal with the mess.
2288 */
2289 break;
2290 }
2291 }
2292 requeue_futex(this, hb1, hb2, &key2);
2293 drop_count++;
2294 }
2295
2296 /*
2297 * We took an extra initial reference to the pi_state either
2298 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2299 * need to drop it here again.
2300 */
2301 put_pi_state(pi_state);
2302
2303 out_unlock:
2304 double_unlock_hb(hb1, hb2);
2305 wake_up_q(&wake_q);
2306 hb_waiters_dec(hb2);
2307
2308 /*
2309 * drop_futex_key_refs() must be called outside the spinlocks. During
2310 * the requeue we moved futex_q's from the hash bucket at key1 to the
2311 * one at key2 and updated their key pointer. We no longer need to
2312 * hold the references to key1.
2313 */
2314 while (--drop_count >= 0)
2315 drop_futex_key_refs(&key1);
2316
2317 out_put_keys:
2318 put_futex_key(&key2);
2319 out_put_key1:
2320 put_futex_key(&key1);
2321 out:
2322 return ret ? ret : task_count;
2323 }
2324
2325 /* The key must be already stored in q->key. */
queue_lock(struct futex_q * q)2326 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
2327 __acquires(&hb->lock)
2328 {
2329 struct futex_hash_bucket *hb;
2330
2331 hb = hash_futex(&q->key);
2332
2333 /*
2334 * Increment the counter before taking the lock so that
2335 * a potential waker won't miss a to-be-slept task that is
2336 * waiting for the spinlock. This is safe as all queue_lock()
2337 * users end up calling queue_me(). Similarly, for housekeeping,
2338 * decrement the counter at queue_unlock() when some error has
2339 * occurred and we don't end up adding the task to the list.
2340 */
2341 hb_waiters_inc(hb); /* implies smp_mb(); (A) */
2342
2343 q->lock_ptr = &hb->lock;
2344
2345 spin_lock(&hb->lock);
2346 return hb;
2347 }
2348
2349 static inline void
queue_unlock(struct futex_hash_bucket * hb)2350 queue_unlock(struct futex_hash_bucket *hb)
2351 __releases(&hb->lock)
2352 {
2353 spin_unlock(&hb->lock);
2354 hb_waiters_dec(hb);
2355 }
2356
__queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2357 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2358 {
2359 int prio;
2360 bool already_on_hb = false;
2361
2362 /*
2363 * The priority used to register this element is
2364 * - either the real thread-priority for the real-time threads
2365 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2366 * - or MAX_RT_PRIO for non-RT threads.
2367 * Thus, all RT-threads are woken first in priority order, and
2368 * the others are woken last, in FIFO order.
2369 */
2370 prio = min(current->normal_prio, MAX_RT_PRIO);
2371
2372 plist_node_init(&q->list, prio);
2373 trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb);
2374 if (!already_on_hb)
2375 plist_add(&q->list, &hb->chain);
2376 q->task = current;
2377 }
2378
2379 /**
2380 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2381 * @q: The futex_q to enqueue
2382 * @hb: The destination hash bucket
2383 *
2384 * The hb->lock must be held by the caller, and is released here. A call to
2385 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2386 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2387 * or nothing if the unqueue is done as part of the wake process and the unqueue
2388 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2389 * an example).
2390 */
queue_me(struct futex_q * q,struct futex_hash_bucket * hb)2391 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2392 __releases(&hb->lock)
2393 {
2394 __queue_me(q, hb);
2395 spin_unlock(&hb->lock);
2396 }
2397
2398 /**
2399 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2400 * @q: The futex_q to unqueue
2401 *
2402 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2403 * be paired with exactly one earlier call to queue_me().
2404 *
2405 * Return:
2406 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2407 * - 0 - if the futex_q was already removed by the waking thread
2408 */
unqueue_me(struct futex_q * q)2409 static int unqueue_me(struct futex_q *q)
2410 {
2411 spinlock_t *lock_ptr;
2412 int ret = 0;
2413
2414 /* In the common case we don't take the spinlock, which is nice. */
2415 retry:
2416 /*
2417 * q->lock_ptr can change between this read and the following spin_lock.
2418 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2419 * optimizing lock_ptr out of the logic below.
2420 */
2421 lock_ptr = READ_ONCE(q->lock_ptr);
2422 if (lock_ptr != NULL) {
2423 spin_lock(lock_ptr);
2424 /*
2425 * q->lock_ptr can change between reading it and
2426 * spin_lock(), causing us to take the wrong lock. This
2427 * corrects the race condition.
2428 *
2429 * Reasoning goes like this: if we have the wrong lock,
2430 * q->lock_ptr must have changed (maybe several times)
2431 * between reading it and the spin_lock(). It can
2432 * change again after the spin_lock() but only if it was
2433 * already changed before the spin_lock(). It cannot,
2434 * however, change back to the original value. Therefore
2435 * we can detect whether we acquired the correct lock.
2436 */
2437 if (unlikely(lock_ptr != q->lock_ptr)) {
2438 spin_unlock(lock_ptr);
2439 goto retry;
2440 }
2441 __unqueue_futex(q);
2442
2443 BUG_ON(q->pi_state);
2444
2445 spin_unlock(lock_ptr);
2446 ret = 1;
2447 }
2448
2449 drop_futex_key_refs(&q->key);
2450 return ret;
2451 }
2452
2453 /*
2454 * PI futexes can not be requeued and must remove themself from the
2455 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2456 * and dropped here.
2457 */
unqueue_me_pi(struct futex_q * q)2458 static void unqueue_me_pi(struct futex_q *q)
2459 __releases(q->lock_ptr)
2460 {
2461 __unqueue_futex(q);
2462
2463 BUG_ON(!q->pi_state);
2464 put_pi_state(q->pi_state);
2465 q->pi_state = NULL;
2466
2467 spin_unlock(q->lock_ptr);
2468 }
2469
__fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * argowner)2470 static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2471 struct task_struct *argowner)
2472 {
2473 u32 uval, uninitialized_var(curval), newval, newtid;
2474 struct futex_pi_state *pi_state = q->pi_state;
2475 struct task_struct *oldowner, *newowner;
2476 int err = 0;
2477
2478 oldowner = pi_state->owner;
2479
2480 /*
2481 * We are here because either:
2482 *
2483 * - we stole the lock and pi_state->owner needs updating to reflect
2484 * that (@argowner == current),
2485 *
2486 * or:
2487 *
2488 * - someone stole our lock and we need to fix things to point to the
2489 * new owner (@argowner == NULL).
2490 *
2491 * Either way, we have to replace the TID in the user space variable.
2492 * This must be atomic as we have to preserve the owner died bit here.
2493 *
2494 * Note: We write the user space value _before_ changing the pi_state
2495 * because we can fault here. Imagine swapped out pages or a fork
2496 * that marked all the anonymous memory readonly for cow.
2497 *
2498 * Modifying pi_state _before_ the user space value would leave the
2499 * pi_state in an inconsistent state when we fault here, because we
2500 * need to drop the locks to handle the fault. This might be observed
2501 * in the PID check in lookup_pi_state.
2502 */
2503 retry:
2504 if (!argowner) {
2505 if (oldowner != current) {
2506 /*
2507 * We raced against a concurrent self; things are
2508 * already fixed up. Nothing to do.
2509 */
2510 return 0;
2511 }
2512
2513 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2514 /* We got the lock. pi_state is correct. Tell caller. */
2515 return 1;
2516 }
2517
2518 /*
2519 * The trylock just failed, so either there is an owner or
2520 * there is a higher priority waiter than this one.
2521 */
2522 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2523 /*
2524 * If the higher priority waiter has not yet taken over the
2525 * rtmutex then newowner is NULL. We can't return here with
2526 * that state because it's inconsistent vs. the user space
2527 * state. So drop the locks and try again. It's a valid
2528 * situation and not any different from the other retry
2529 * conditions.
2530 */
2531 if (unlikely(!newowner)) {
2532 err = -EAGAIN;
2533 goto handle_err;
2534 }
2535 } else {
2536 WARN_ON_ONCE(argowner != current);
2537 if (oldowner == current) {
2538 /*
2539 * We raced against a concurrent self; things are
2540 * already fixed up. Nothing to do.
2541 */
2542 return 1;
2543 }
2544 newowner = argowner;
2545 }
2546
2547 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
2548 /* Owner died? */
2549 if (!pi_state->owner)
2550 newtid |= FUTEX_OWNER_DIED;
2551
2552 err = get_futex_value_locked(&uval, uaddr);
2553 if (err)
2554 goto handle_err;
2555
2556 for (;;) {
2557 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2558
2559 err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
2560 if (err)
2561 goto handle_err;
2562
2563 if (curval == uval)
2564 break;
2565 uval = curval;
2566 }
2567
2568 /*
2569 * We fixed up user space. Now we need to fix the pi_state
2570 * itself.
2571 */
2572 pi_state_update_owner(pi_state, newowner);
2573
2574 return argowner == current;
2575
2576 /*
2577 * In order to reschedule or handle a page fault, we need to drop the
2578 * locks here. In the case of a fault, this gives the other task
2579 * (either the highest priority waiter itself or the task which stole
2580 * the rtmutex) the chance to try the fixup of the pi_state. So once we
2581 * are back from handling the fault we need to check the pi_state after
2582 * reacquiring the locks and before trying to do another fixup. When
2583 * the fixup has been done already we simply return.
2584 *
2585 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2586 * drop hb->lock since the caller owns the hb -> futex_q relation.
2587 * Dropping the pi_mutex->wait_lock requires the state revalidate.
2588 */
2589 handle_err:
2590 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2591 spin_unlock(q->lock_ptr);
2592
2593 switch (err) {
2594 case -EFAULT:
2595 err = fault_in_user_writeable(uaddr);
2596 break;
2597
2598 case -EAGAIN:
2599 cond_resched();
2600 err = 0;
2601 break;
2602
2603 default:
2604 WARN_ON_ONCE(1);
2605 break;
2606 }
2607
2608 spin_lock(q->lock_ptr);
2609 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2610
2611 /*
2612 * Check if someone else fixed it for us:
2613 */
2614 if (pi_state->owner != oldowner)
2615 return argowner == current;
2616
2617 /* Retry if err was -EAGAIN or the fault in succeeded */
2618 if (!err)
2619 goto retry;
2620
2621 /*
2622 * fault_in_user_writeable() failed so user state is immutable. At
2623 * best we can make the kernel state consistent but user state will
2624 * be most likely hosed and any subsequent unlock operation will be
2625 * rejected due to PI futex rule [10].
2626 *
2627 * Ensure that the rtmutex owner is also the pi_state owner despite
2628 * the user space value claiming something different. There is no
2629 * point in unlocking the rtmutex if current is the owner as it
2630 * would need to wait until the next waiter has taken the rtmutex
2631 * to guarantee consistent state. Keep it simple. Userspace asked
2632 * for this wreckaged state.
2633 *
2634 * The rtmutex has an owner - either current or some other
2635 * task. See the EAGAIN loop above.
2636 */
2637 pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
2638
2639 return err;
2640 }
2641
fixup_pi_state_owner(u32 __user * uaddr,struct futex_q * q,struct task_struct * argowner)2642 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
2643 struct task_struct *argowner)
2644 {
2645 struct futex_pi_state *pi_state = q->pi_state;
2646 int ret;
2647
2648 lockdep_assert_held(q->lock_ptr);
2649
2650 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2651 ret = __fixup_pi_state_owner(uaddr, q, argowner);
2652 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2653 return ret;
2654 }
2655
2656 static long futex_wait_restart(struct restart_block *restart);
2657
2658 /**
2659 * fixup_owner() - Post lock pi_state and corner case management
2660 * @uaddr: user address of the futex
2661 * @q: futex_q (contains pi_state and access to the rt_mutex)
2662 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2663 *
2664 * After attempting to lock an rt_mutex, this function is called to cleanup
2665 * the pi_state owner as well as handle race conditions that may allow us to
2666 * acquire the lock. Must be called with the hb lock held.
2667 *
2668 * Return:
2669 * - 1 - success, lock taken;
2670 * - 0 - success, lock not taken;
2671 * - <0 - on error (-EFAULT)
2672 */
fixup_owner(u32 __user * uaddr,struct futex_q * q,int locked)2673 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
2674 {
2675 if (locked) {
2676 /*
2677 * Got the lock. We might not be the anticipated owner if we
2678 * did a lock-steal - fix up the PI-state in that case:
2679 *
2680 * Speculative pi_state->owner read (we don't hold wait_lock);
2681 * since we own the lock pi_state->owner == current is the
2682 * stable state, anything else needs more attention.
2683 */
2684 if (q->pi_state->owner != current)
2685 return fixup_pi_state_owner(uaddr, q, current);
2686 return 1;
2687 }
2688
2689 /*
2690 * If we didn't get the lock; check if anybody stole it from us. In
2691 * that case, we need to fix up the uval to point to them instead of
2692 * us, otherwise bad things happen. [10]
2693 *
2694 * Another speculative read; pi_state->owner == current is unstable
2695 * but needs our attention.
2696 */
2697 if (q->pi_state->owner == current)
2698 return fixup_pi_state_owner(uaddr, q, NULL);
2699
2700 /*
2701 * Paranoia check. If we did not take the lock, then we should not be
2702 * the owner of the rt_mutex. Warn and establish consistent state.
2703 */
2704 if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
2705 return fixup_pi_state_owner(uaddr, q, current);
2706
2707 return 0;
2708 }
2709
2710 /**
2711 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2712 * @hb: the futex hash bucket, must be locked by the caller
2713 * @q: the futex_q to queue up on
2714 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
2715 */
futex_wait_queue_me(struct futex_hash_bucket * hb,struct futex_q * q,struct hrtimer_sleeper * timeout)2716 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
2717 struct hrtimer_sleeper *timeout)
2718 {
2719 /*
2720 * The task state is guaranteed to be set before another task can
2721 * wake it. set_current_state() is implemented using smp_store_mb() and
2722 * queue_me() calls spin_unlock() upon completion, both serializing
2723 * access to the hash list and forcing another memory barrier.
2724 */
2725 set_current_state(TASK_INTERRUPTIBLE);
2726 queue_me(q, hb);
2727
2728 /* Arm the timer */
2729 if (timeout)
2730 hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
2731
2732 /*
2733 * If we have been removed from the hash list, then another task
2734 * has tried to wake us, and we can skip the call to schedule().
2735 */
2736 if (likely(!plist_node_empty(&q->list))) {
2737 /*
2738 * If the timer has already expired, current will already be
2739 * flagged for rescheduling. Only call schedule if there
2740 * is no timeout, or if it has yet to expire.
2741 */
2742 if (!timeout || timeout->task)
2743 freezable_schedule();
2744 }
2745 __set_current_state(TASK_RUNNING);
2746 }
2747
2748 /**
2749 * futex_wait_setup() - Prepare to wait on a futex
2750 * @uaddr: the futex userspace address
2751 * @val: the expected value
2752 * @flags: futex flags (FLAGS_SHARED, etc.)
2753 * @q: the associated futex_q
2754 * @hb: storage for hash_bucket pointer to be returned to caller
2755 *
2756 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2757 * compare it with the expected value. Handle atomic faults internally.
2758 * Return with the hb lock held and a q.key reference on success, and unlocked
2759 * with no q.key reference on failure.
2760 *
2761 * Return:
2762 * - 0 - uaddr contains val and hb has been locked;
2763 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
2764 */
futex_wait_setup(u32 __user * uaddr,u32 val,unsigned int flags,struct futex_q * q,struct futex_hash_bucket ** hb)2765 static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
2766 struct futex_q *q, struct futex_hash_bucket **hb)
2767 {
2768 u32 uval;
2769 int ret;
2770
2771 /*
2772 * Access the page AFTER the hash-bucket is locked.
2773 * Order is important:
2774 *
2775 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2776 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2777 *
2778 * The basic logical guarantee of a futex is that it blocks ONLY
2779 * if cond(var) is known to be true at the time of blocking, for
2780 * any cond. If we locked the hash-bucket after testing *uaddr, that
2781 * would open a race condition where we could block indefinitely with
2782 * cond(var) false, which would violate the guarantee.
2783 *
2784 * On the other hand, we insert q and release the hash-bucket only
2785 * after testing *uaddr. This guarantees that futex_wait() will NOT
2786 * absorb a wakeup if *uaddr does not match the desired values
2787 * while the syscall executes.
2788 */
2789 retry:
2790 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
2791 if (unlikely(ret != 0))
2792 return ret;
2793
2794 retry_private:
2795 *hb = queue_lock(q);
2796
2797 ret = get_futex_value_locked(&uval, uaddr);
2798
2799 if (ret) {
2800 queue_unlock(*hb);
2801
2802 ret = get_user(uval, uaddr);
2803 if (ret)
2804 goto out;
2805
2806 if (!(flags & FLAGS_SHARED))
2807 goto retry_private;
2808
2809 put_futex_key(&q->key);
2810 goto retry;
2811 }
2812
2813 if (uval != val) {
2814 queue_unlock(*hb);
2815 ret = -EWOULDBLOCK;
2816 }
2817
2818 out:
2819 if (ret)
2820 put_futex_key(&q->key);
2821 return ret;
2822 }
2823
futex_wait(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset)2824 static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2825 ktime_t *abs_time, u32 bitset)
2826 {
2827 struct hrtimer_sleeper timeout, *to;
2828 struct restart_block *restart;
2829 struct futex_hash_bucket *hb;
2830 struct futex_q q = futex_q_init;
2831 int ret;
2832
2833 if (!bitset)
2834 return -EINVAL;
2835 q.bitset = bitset;
2836
2837 to = futex_setup_timer(abs_time, &timeout, flags,
2838 current->timer_slack_ns);
2839 retry:
2840 /*
2841 * Prepare to wait on uaddr. On success, holds hb lock and increments
2842 * q.key refs.
2843 */
2844 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
2845 if (ret)
2846 goto out;
2847
2848 /* queue_me and wait for wakeup, timeout, or a signal. */
2849 futex_wait_queue_me(hb, &q, to);
2850
2851 /* If we were woken (and unqueued), we succeeded, whatever. */
2852 ret = 0;
2853 /* unqueue_me() drops q.key ref */
2854 if (!unqueue_me(&q))
2855 goto out;
2856 ret = -ETIMEDOUT;
2857 if (to && !to->task)
2858 goto out;
2859
2860 /*
2861 * We expect signal_pending(current), but we might be the
2862 * victim of a spurious wakeup as well.
2863 */
2864 if (!signal_pending(current))
2865 goto retry;
2866
2867 ret = -ERESTARTSYS;
2868 if (!abs_time)
2869 goto out;
2870
2871 restart = ¤t->restart_block;
2872 restart->futex.uaddr = uaddr;
2873 restart->futex.val = val;
2874 restart->futex.time = *abs_time;
2875 restart->futex.bitset = bitset;
2876 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
2877
2878 ret = set_restart_fn(restart, futex_wait_restart);
2879
2880 out:
2881 if (to) {
2882 hrtimer_cancel(&to->timer);
2883 destroy_hrtimer_on_stack(&to->timer);
2884 }
2885 return ret;
2886 }
2887
2888
futex_wait_restart(struct restart_block * restart)2889 static long futex_wait_restart(struct restart_block *restart)
2890 {
2891 u32 __user *uaddr = restart->futex.uaddr;
2892 ktime_t t, *tp = NULL;
2893
2894 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2895 t = restart->futex.time;
2896 tp = &t;
2897 }
2898 restart->fn = do_no_restart_syscall;
2899
2900 return (long)futex_wait(uaddr, restart->futex.flags,
2901 restart->futex.val, tp, restart->futex.bitset);
2902 }
2903
2904
2905 /*
2906 * Userspace tried a 0 -> TID atomic transition of the futex value
2907 * and failed. The kernel side here does the whole locking operation:
2908 * if there are waiters then it will block as a consequence of relying
2909 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2910 * a 0 value of the futex too.).
2911 *
2912 * Also serves as futex trylock_pi()'ing, and due semantics.
2913 */
futex_lock_pi(u32 __user * uaddr,unsigned int flags,ktime_t * time,int trylock)2914 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
2915 ktime_t *time, int trylock)
2916 {
2917 struct hrtimer_sleeper timeout, *to;
2918 struct task_struct *exiting = NULL;
2919 struct rt_mutex_waiter rt_waiter;
2920 struct futex_hash_bucket *hb;
2921 struct futex_q q = futex_q_init;
2922 int res, ret;
2923
2924 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2925 return -ENOSYS;
2926
2927 if (refill_pi_state_cache())
2928 return -ENOMEM;
2929
2930 to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0);
2931
2932 retry:
2933 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
2934 if (unlikely(ret != 0))
2935 goto out;
2936
2937 retry_private:
2938 hb = queue_lock(&q);
2939
2940 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
2941 &exiting, 0);
2942 if (unlikely(ret)) {
2943 /*
2944 * Atomic work succeeded and we got the lock,
2945 * or failed. Either way, we do _not_ block.
2946 */
2947 switch (ret) {
2948 case 1:
2949 /* We got the lock. */
2950 ret = 0;
2951 goto out_unlock_put_key;
2952 case -EFAULT:
2953 goto uaddr_faulted;
2954 case -EBUSY:
2955 case -EAGAIN:
2956 /*
2957 * Two reasons for this:
2958 * - EBUSY: Task is exiting and we just wait for the
2959 * exit to complete.
2960 * - EAGAIN: The user space value changed.
2961 */
2962 queue_unlock(hb);
2963 put_futex_key(&q.key);
2964 /*
2965 * Handle the case where the owner is in the middle of
2966 * exiting. Wait for the exit to complete otherwise
2967 * this task might loop forever, aka. live lock.
2968 */
2969 wait_for_owner_exiting(ret, exiting);
2970 cond_resched();
2971 goto retry;
2972 default:
2973 goto out_unlock_put_key;
2974 }
2975 }
2976
2977 WARN_ON(!q.pi_state);
2978
2979 /*
2980 * Only actually queue now that the atomic ops are done:
2981 */
2982 __queue_me(&q, hb);
2983
2984 if (trylock) {
2985 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
2986 /* Fixup the trylock return value: */
2987 ret = ret ? 0 : -EWOULDBLOCK;
2988 goto no_block;
2989 }
2990
2991 rt_mutex_init_waiter(&rt_waiter);
2992
2993 /*
2994 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2995 * hold it while doing rt_mutex_start_proxy(), because then it will
2996 * include hb->lock in the blocking chain, even through we'll not in
2997 * fact hold it while blocking. This will lead it to report -EDEADLK
2998 * and BUG when futex_unlock_pi() interleaves with this.
2999 *
3000 * Therefore acquire wait_lock while holding hb->lock, but drop the
3001 * latter before calling __rt_mutex_start_proxy_lock(). This
3002 * interleaves with futex_unlock_pi() -- which does a similar lock
3003 * handoff -- such that the latter can observe the futex_q::pi_state
3004 * before __rt_mutex_start_proxy_lock() is done.
3005 */
3006 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
3007 spin_unlock(q.lock_ptr);
3008 /*
3009 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
3010 * such that futex_unlock_pi() is guaranteed to observe the waiter when
3011 * it sees the futex_q::pi_state.
3012 */
3013 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
3014 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
3015
3016 if (ret) {
3017 if (ret == 1)
3018 ret = 0;
3019 goto cleanup;
3020 }
3021
3022 if (unlikely(to))
3023 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
3024
3025 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
3026
3027 cleanup:
3028 spin_lock(q.lock_ptr);
3029 /*
3030 * If we failed to acquire the lock (deadlock/signal/timeout), we must
3031 * first acquire the hb->lock before removing the lock from the
3032 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
3033 * lists consistent.
3034 *
3035 * In particular; it is important that futex_unlock_pi() can not
3036 * observe this inconsistency.
3037 */
3038 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
3039 ret = 0;
3040
3041 no_block:
3042 /*
3043 * Fixup the pi_state owner and possibly acquire the lock if we
3044 * haven't already.
3045 */
3046 res = fixup_owner(uaddr, &q, !ret);
3047 /*
3048 * If fixup_owner() returned an error, proprogate that. If it acquired
3049 * the lock, clear our -ETIMEDOUT or -EINTR.
3050 */
3051 if (res)
3052 ret = (res < 0) ? res : 0;
3053
3054 /* Unqueue and drop the lock */
3055 unqueue_me_pi(&q);
3056
3057 goto out_put_key;
3058
3059 out_unlock_put_key:
3060 queue_unlock(hb);
3061
3062 out_put_key:
3063 put_futex_key(&q.key);
3064 out:
3065 if (to) {
3066 hrtimer_cancel(&to->timer);
3067 destroy_hrtimer_on_stack(&to->timer);
3068 }
3069 return ret != -EINTR ? ret : -ERESTARTNOINTR;
3070
3071 uaddr_faulted:
3072 queue_unlock(hb);
3073
3074 ret = fault_in_user_writeable(uaddr);
3075 if (ret)
3076 goto out_put_key;
3077
3078 if (!(flags & FLAGS_SHARED))
3079 goto retry_private;
3080
3081 put_futex_key(&q.key);
3082 goto retry;
3083 }
3084
3085 /*
3086 * Userspace attempted a TID -> 0 atomic transition, and failed.
3087 * This is the in-kernel slowpath: we look up the PI state (if any),
3088 * and do the rt-mutex unlock.
3089 */
futex_unlock_pi(u32 __user * uaddr,unsigned int flags)3090 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
3091 {
3092 u32 curval, uval, vpid = task_pid_vnr(current);
3093 union futex_key key = FUTEX_KEY_INIT;
3094 struct futex_hash_bucket *hb;
3095 struct futex_q *top_waiter;
3096 int ret;
3097
3098 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3099 return -ENOSYS;
3100
3101 retry:
3102 if (get_user(uval, uaddr))
3103 return -EFAULT;
3104 /*
3105 * We release only a lock we actually own:
3106 */
3107 if ((uval & FUTEX_TID_MASK) != vpid)
3108 return -EPERM;
3109
3110 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
3111 if (ret)
3112 return ret;
3113
3114 hb = hash_futex(&key);
3115 spin_lock(&hb->lock);
3116
3117 /*
3118 * Check waiters first. We do not trust user space values at
3119 * all and we at least want to know if user space fiddled
3120 * with the futex value instead of blindly unlocking.
3121 */
3122 top_waiter = futex_top_waiter(hb, &key);
3123 if (top_waiter) {
3124 struct futex_pi_state *pi_state = top_waiter->pi_state;
3125
3126 ret = -EINVAL;
3127 if (!pi_state)
3128 goto out_unlock;
3129
3130 /*
3131 * If current does not own the pi_state then the futex is
3132 * inconsistent and user space fiddled with the futex value.
3133 */
3134 if (pi_state->owner != current)
3135 goto out_unlock;
3136
3137 get_pi_state(pi_state);
3138 /*
3139 * By taking wait_lock while still holding hb->lock, we ensure
3140 * there is no point where we hold neither; and therefore
3141 * wake_futex_pi() must observe a state consistent with what we
3142 * observed.
3143 *
3144 * In particular; this forces __rt_mutex_start_proxy() to
3145 * complete such that we're guaranteed to observe the
3146 * rt_waiter. Also see the WARN in wake_futex_pi().
3147 */
3148 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3149 spin_unlock(&hb->lock);
3150
3151 /* drops pi_state->pi_mutex.wait_lock */
3152 ret = wake_futex_pi(uaddr, uval, pi_state);
3153
3154 put_pi_state(pi_state);
3155
3156 /*
3157 * Success, we're done! No tricky corner cases.
3158 */
3159 if (!ret)
3160 goto out_putkey;
3161 /*
3162 * The atomic access to the futex value generated a
3163 * pagefault, so retry the user-access and the wakeup:
3164 */
3165 if (ret == -EFAULT)
3166 goto pi_faulted;
3167 /*
3168 * A unconditional UNLOCK_PI op raced against a waiter
3169 * setting the FUTEX_WAITERS bit. Try again.
3170 */
3171 if (ret == -EAGAIN)
3172 goto pi_retry;
3173 /*
3174 * wake_futex_pi has detected invalid state. Tell user
3175 * space.
3176 */
3177 goto out_putkey;
3178 }
3179
3180 /*
3181 * We have no kernel internal state, i.e. no waiters in the
3182 * kernel. Waiters which are about to queue themselves are stuck
3183 * on hb->lock. So we can safely ignore them. We do neither
3184 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3185 * owner.
3186 */
3187 if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) {
3188 spin_unlock(&hb->lock);
3189 switch (ret) {
3190 case -EFAULT:
3191 goto pi_faulted;
3192
3193 case -EAGAIN:
3194 goto pi_retry;
3195
3196 default:
3197 WARN_ON_ONCE(1);
3198 goto out_putkey;
3199 }
3200 }
3201
3202 /*
3203 * If uval has changed, let user space handle it.
3204 */
3205 ret = (curval == uval) ? 0 : -EAGAIN;
3206
3207 out_unlock:
3208 spin_unlock(&hb->lock);
3209 out_putkey:
3210 put_futex_key(&key);
3211 return ret;
3212
3213 pi_retry:
3214 put_futex_key(&key);
3215 cond_resched();
3216 goto retry;
3217
3218 pi_faulted:
3219 put_futex_key(&key);
3220
3221 ret = fault_in_user_writeable(uaddr);
3222 if (!ret)
3223 goto retry;
3224
3225 return ret;
3226 }
3227
3228 /**
3229 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3230 * @hb: the hash_bucket futex_q was original enqueued on
3231 * @q: the futex_q woken while waiting to be requeued
3232 * @key2: the futex_key of the requeue target futex
3233 * @timeout: the timeout associated with the wait (NULL if none)
3234 *
3235 * Detect if the task was woken on the initial futex as opposed to the requeue
3236 * target futex. If so, determine if it was a timeout or a signal that caused
3237 * the wakeup and return the appropriate error code to the caller. Must be
3238 * called with the hb lock held.
3239 *
3240 * Return:
3241 * - 0 = no early wakeup detected;
3242 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
3243 */
3244 static inline
handle_early_requeue_pi_wakeup(struct futex_hash_bucket * hb,struct futex_q * q,union futex_key * key2,struct hrtimer_sleeper * timeout)3245 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3246 struct futex_q *q, union futex_key *key2,
3247 struct hrtimer_sleeper *timeout)
3248 {
3249 int ret = 0;
3250
3251 /*
3252 * With the hb lock held, we avoid races while we process the wakeup.
3253 * We only need to hold hb (and not hb2) to ensure atomicity as the
3254 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3255 * It can't be requeued from uaddr2 to something else since we don't
3256 * support a PI aware source futex for requeue.
3257 */
3258 if (!match_futex(&q->key, key2)) {
3259 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3260 /*
3261 * We were woken prior to requeue by a timeout or a signal.
3262 * Unqueue the futex_q and determine which it was.
3263 */
3264 plist_del(&q->list, &hb->chain);
3265 hb_waiters_dec(hb);
3266
3267 /* Handle spurious wakeups gracefully */
3268 ret = -EWOULDBLOCK;
3269 if (timeout && !timeout->task)
3270 ret = -ETIMEDOUT;
3271 else if (signal_pending(current))
3272 ret = -ERESTARTNOINTR;
3273 }
3274 return ret;
3275 }
3276
3277 /**
3278 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
3279 * @uaddr: the futex we initially wait on (non-pi)
3280 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
3281 * the same type, no requeueing from private to shared, etc.
3282 * @val: the expected value of uaddr
3283 * @abs_time: absolute timeout
3284 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
3285 * @uaddr2: the pi futex we will take prior to returning to user-space
3286 *
3287 * The caller will wait on uaddr and will be requeued by futex_requeue() to
3288 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3289 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3290 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3291 * without one, the pi logic would not know which task to boost/deboost, if
3292 * there was a need to.
3293 *
3294 * We call schedule in futex_wait_queue_me() when we enqueue and return there
3295 * via the following--
3296 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
3297 * 2) wakeup on uaddr2 after a requeue
3298 * 3) signal
3299 * 4) timeout
3300 *
3301 * If 3, cleanup and return -ERESTARTNOINTR.
3302 *
3303 * If 2, we may then block on trying to take the rt_mutex and return via:
3304 * 5) successful lock
3305 * 6) signal
3306 * 7) timeout
3307 * 8) other lock acquisition failure
3308 *
3309 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
3310 *
3311 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3312 *
3313 * Return:
3314 * - 0 - On success;
3315 * - <0 - On error
3316 */
futex_wait_requeue_pi(u32 __user * uaddr,unsigned int flags,u32 val,ktime_t * abs_time,u32 bitset,u32 __user * uaddr2)3317 static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
3318 u32 val, ktime_t *abs_time, u32 bitset,
3319 u32 __user *uaddr2)
3320 {
3321 struct hrtimer_sleeper timeout, *to;
3322 struct rt_mutex_waiter rt_waiter;
3323 struct futex_hash_bucket *hb;
3324 union futex_key key2 = FUTEX_KEY_INIT;
3325 struct futex_q q = futex_q_init;
3326 int res, ret;
3327
3328 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3329 return -ENOSYS;
3330
3331 if (uaddr == uaddr2)
3332 return -EINVAL;
3333
3334 if (!bitset)
3335 return -EINVAL;
3336
3337 to = futex_setup_timer(abs_time, &timeout, flags,
3338 current->timer_slack_ns);
3339
3340 /*
3341 * The waiter is allocated on our stack, manipulated by the requeue
3342 * code while we sleep on uaddr.
3343 */
3344 rt_mutex_init_waiter(&rt_waiter);
3345
3346 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
3347 if (unlikely(ret != 0))
3348 goto out;
3349
3350 q.bitset = bitset;
3351 q.rt_waiter = &rt_waiter;
3352 q.requeue_pi_key = &key2;
3353
3354 /*
3355 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3356 * count.
3357 */
3358 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
3359 if (ret)
3360 goto out_key2;
3361
3362 /*
3363 * The check above which compares uaddrs is not sufficient for
3364 * shared futexes. We need to compare the keys:
3365 */
3366 if (match_futex(&q.key, &key2)) {
3367 queue_unlock(hb);
3368 ret = -EINVAL;
3369 goto out_put_keys;
3370 }
3371
3372 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
3373 futex_wait_queue_me(hb, &q, to);
3374
3375 spin_lock(&hb->lock);
3376 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3377 spin_unlock(&hb->lock);
3378 if (ret)
3379 goto out_put_keys;
3380
3381 /*
3382 * In order for us to be here, we know our q.key == key2, and since
3383 * we took the hb->lock above, we also know that futex_requeue() has
3384 * completed and we no longer have to concern ourselves with a wakeup
3385 * race with the atomic proxy lock acquisition by the requeue code. The
3386 * futex_requeue dropped our key1 reference and incremented our key2
3387 * reference count.
3388 */
3389
3390 /* Check if the requeue code acquired the second futex for us. */
3391 if (!q.rt_waiter) {
3392 /*
3393 * Got the lock. We might not be the anticipated owner if we
3394 * did a lock-steal - fix up the PI-state in that case.
3395 */
3396 if (q.pi_state && (q.pi_state->owner != current)) {
3397 spin_lock(q.lock_ptr);
3398 ret = fixup_pi_state_owner(uaddr2, &q, current);
3399 /*
3400 * Drop the reference to the pi state which
3401 * the requeue_pi() code acquired for us.
3402 */
3403 put_pi_state(q.pi_state);
3404 spin_unlock(q.lock_ptr);
3405 /*
3406 * Adjust the return value. It's either -EFAULT or
3407 * success (1) but the caller expects 0 for success.
3408 */
3409 ret = ret < 0 ? ret : 0;
3410 }
3411 } else {
3412 struct rt_mutex *pi_mutex;
3413
3414 /*
3415 * We have been woken up by futex_unlock_pi(), a timeout, or a
3416 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3417 * the pi_state.
3418 */
3419 WARN_ON(!q.pi_state);
3420 pi_mutex = &q.pi_state->pi_mutex;
3421 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
3422
3423 spin_lock(q.lock_ptr);
3424 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3425 ret = 0;
3426
3427 debug_rt_mutex_free_waiter(&rt_waiter);
3428 /*
3429 * Fixup the pi_state owner and possibly acquire the lock if we
3430 * haven't already.
3431 */
3432 res = fixup_owner(uaddr2, &q, !ret);
3433 /*
3434 * If fixup_owner() returned an error, proprogate that. If it
3435 * acquired the lock, clear -ETIMEDOUT or -EINTR.
3436 */
3437 if (res)
3438 ret = (res < 0) ? res : 0;
3439
3440 /* Unqueue and drop the lock. */
3441 unqueue_me_pi(&q);
3442 }
3443
3444 if (ret == -EINTR) {
3445 /*
3446 * We've already been requeued, but cannot restart by calling
3447 * futex_lock_pi() directly. We could restart this syscall, but
3448 * it would detect that the user space "val" changed and return
3449 * -EWOULDBLOCK. Save the overhead of the restart and return
3450 * -EWOULDBLOCK directly.
3451 */
3452 ret = -EWOULDBLOCK;
3453 }
3454
3455 out_put_keys:
3456 put_futex_key(&q.key);
3457 out_key2:
3458 put_futex_key(&key2);
3459
3460 out:
3461 if (to) {
3462 hrtimer_cancel(&to->timer);
3463 destroy_hrtimer_on_stack(&to->timer);
3464 }
3465 return ret;
3466 }
3467
3468 /*
3469 * Support for robust futexes: the kernel cleans up held futexes at
3470 * thread exit time.
3471 *
3472 * Implementation: user-space maintains a per-thread list of locks it
3473 * is holding. Upon do_exit(), the kernel carefully walks this list,
3474 * and marks all locks that are owned by this thread with the
3475 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
3476 * always manipulated with the lock held, so the list is private and
3477 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3478 * field, to allow the kernel to clean up if the thread dies after
3479 * acquiring the lock, but just before it could have added itself to
3480 * the list. There can only be one such pending lock.
3481 */
3482
3483 /**
3484 * sys_set_robust_list() - Set the robust-futex list head of a task
3485 * @head: pointer to the list-head
3486 * @len: length of the list-head, as userspace expects
3487 */
SYSCALL_DEFINE2(set_robust_list,struct robust_list_head __user *,head,size_t,len)3488 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3489 size_t, len)
3490 {
3491 if (!futex_cmpxchg_enabled)
3492 return -ENOSYS;
3493 /*
3494 * The kernel knows only one size for now:
3495 */
3496 if (unlikely(len != sizeof(*head)))
3497 return -EINVAL;
3498
3499 current->robust_list = head;
3500
3501 return 0;
3502 }
3503
3504 /**
3505 * sys_get_robust_list() - Get the robust-futex list head of a task
3506 * @pid: pid of the process [zero for current task]
3507 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3508 * @len_ptr: pointer to a length field, the kernel fills in the header size
3509 */
SYSCALL_DEFINE3(get_robust_list,int,pid,struct robust_list_head __user * __user *,head_ptr,size_t __user *,len_ptr)3510 SYSCALL_DEFINE3(get_robust_list, int, pid,
3511 struct robust_list_head __user * __user *, head_ptr,
3512 size_t __user *, len_ptr)
3513 {
3514 struct robust_list_head __user *head;
3515 unsigned long ret;
3516 struct task_struct *p;
3517
3518 if (!futex_cmpxchg_enabled)
3519 return -ENOSYS;
3520
3521 rcu_read_lock();
3522
3523 ret = -ESRCH;
3524 if (!pid)
3525 p = current;
3526 else {
3527 p = find_task_by_vpid(pid);
3528 if (!p)
3529 goto err_unlock;
3530 }
3531
3532 ret = -EPERM;
3533 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3534 goto err_unlock;
3535
3536 head = p->robust_list;
3537 rcu_read_unlock();
3538
3539 if (put_user(sizeof(*head), len_ptr))
3540 return -EFAULT;
3541 return put_user(head, head_ptr);
3542
3543 err_unlock:
3544 rcu_read_unlock();
3545
3546 return ret;
3547 }
3548
3549 /* Constants for the pending_op argument of handle_futex_death */
3550 #define HANDLE_DEATH_PENDING true
3551 #define HANDLE_DEATH_LIST false
3552
3553 /*
3554 * Process a futex-list entry, check whether it's owned by the
3555 * dying task, and do notification if so:
3556 */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)3557 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
3558 bool pi, bool pending_op)
3559 {
3560 u32 uval, nval, mval;
3561 int err;
3562
3563 /* Futex address must be 32bit aligned */
3564 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3565 return -1;
3566
3567 retry:
3568 if (get_user(uval, uaddr))
3569 return -1;
3570
3571 /*
3572 * Special case for regular (non PI) futexes. The unlock path in
3573 * user space has two race scenarios:
3574 *
3575 * 1. The unlock path releases the user space futex value and
3576 * before it can execute the futex() syscall to wake up
3577 * waiters it is killed.
3578 *
3579 * 2. A woken up waiter is killed before it can acquire the
3580 * futex in user space.
3581 *
3582 * In both cases the TID validation below prevents a wakeup of
3583 * potential waiters which can cause these waiters to block
3584 * forever.
3585 *
3586 * In both cases the following conditions are met:
3587 *
3588 * 1) task->robust_list->list_op_pending != NULL
3589 * @pending_op == true
3590 * 2) User space futex value == 0
3591 * 3) Regular futex: @pi == false
3592 *
3593 * If these conditions are met, it is safe to attempt waking up a
3594 * potential waiter without touching the user space futex value and
3595 * trying to set the OWNER_DIED bit. The user space futex value is
3596 * uncontended and the rest of the user space mutex state is
3597 * consistent, so a woken waiter will just take over the
3598 * uncontended futex. Setting the OWNER_DIED bit would create
3599 * inconsistent state and malfunction of the user space owner died
3600 * handling.
3601 */
3602 if (pending_op && !pi && !uval) {
3603 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3604 return 0;
3605 }
3606
3607 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
3608 return 0;
3609
3610 /*
3611 * Ok, this dying thread is truly holding a futex
3612 * of interest. Set the OWNER_DIED bit atomically
3613 * via cmpxchg, and if the value had FUTEX_WAITERS
3614 * set, wake up a waiter (if any). (We have to do a
3615 * futex_wake() even if OWNER_DIED is already set -
3616 * to handle the rare but possible case of recursive
3617 * thread-death.) The rest of the cleanup is done in
3618 * userspace.
3619 */
3620 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
3621
3622 /*
3623 * We are not holding a lock here, but we want to have
3624 * the pagefault_disable/enable() protection because
3625 * we want to handle the fault gracefully. If the
3626 * access fails we try to fault in the futex with R/W
3627 * verification via get_user_pages. get_user() above
3628 * does not guarantee R/W access. If that fails we
3629 * give up and leave the futex locked.
3630 */
3631 if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) {
3632 switch (err) {
3633 case -EFAULT:
3634 if (fault_in_user_writeable(uaddr))
3635 return -1;
3636 goto retry;
3637
3638 case -EAGAIN:
3639 cond_resched();
3640 goto retry;
3641
3642 default:
3643 WARN_ON_ONCE(1);
3644 return err;
3645 }
3646 }
3647
3648 if (nval != uval)
3649 goto retry;
3650
3651 /*
3652 * Wake robust non-PI futexes here. The wakeup of
3653 * PI futexes happens in exit_pi_state():
3654 */
3655 if (!pi && (uval & FUTEX_WAITERS))
3656 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
3657
3658 return 0;
3659 }
3660
3661 /*
3662 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3663 */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)3664 static inline int fetch_robust_entry(struct robust_list __user **entry,
3665 struct robust_list __user * __user *head,
3666 unsigned int *pi)
3667 {
3668 unsigned long uentry;
3669
3670 if (get_user(uentry, (unsigned long __user *)head))
3671 return -EFAULT;
3672
3673 *entry = (void __user *)(uentry & ~1UL);
3674 *pi = uentry & 1;
3675
3676 return 0;
3677 }
3678
3679 /*
3680 * Walk curr->robust_list (very carefully, it's a userspace list!)
3681 * and mark any locks found there dead, and notify any waiters.
3682 *
3683 * We silently return on any sign of list-walking problem.
3684 */
exit_robust_list(struct task_struct * curr)3685 static void exit_robust_list(struct task_struct *curr)
3686 {
3687 struct robust_list_head __user *head = curr->robust_list;
3688 struct robust_list __user *entry, *next_entry, *pending;
3689 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3690 unsigned int next_pi;
3691 unsigned long futex_offset;
3692 int rc;
3693
3694 if (!futex_cmpxchg_enabled)
3695 return;
3696
3697 /*
3698 * Fetch the list head (which was registered earlier, via
3699 * sys_set_robust_list()):
3700 */
3701 if (fetch_robust_entry(&entry, &head->list.next, &pi))
3702 return;
3703 /*
3704 * Fetch the relative futex offset:
3705 */
3706 if (get_user(futex_offset, &head->futex_offset))
3707 return;
3708 /*
3709 * Fetch any possibly pending lock-add first, and handle it
3710 * if it exists:
3711 */
3712 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
3713 return;
3714
3715 next_entry = NULL; /* avoid warning with gcc */
3716 while (entry != &head->list) {
3717 /*
3718 * Fetch the next entry in the list before calling
3719 * handle_futex_death:
3720 */
3721 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3722 /*
3723 * A pending lock might already be on the list, so
3724 * don't process it twice:
3725 */
3726 if (entry != pending) {
3727 if (handle_futex_death((void __user *)entry + futex_offset,
3728 curr, pi, HANDLE_DEATH_LIST))
3729 return;
3730 }
3731 if (rc)
3732 return;
3733 entry = next_entry;
3734 pi = next_pi;
3735 /*
3736 * Avoid excessively long or circular lists:
3737 */
3738 if (!--limit)
3739 break;
3740
3741 cond_resched();
3742 }
3743
3744 if (pending) {
3745 handle_futex_death((void __user *)pending + futex_offset,
3746 curr, pip, HANDLE_DEATH_PENDING);
3747 }
3748 }
3749
futex_cleanup(struct task_struct * tsk)3750 static void futex_cleanup(struct task_struct *tsk)
3751 {
3752 if (unlikely(tsk->robust_list)) {
3753 exit_robust_list(tsk);
3754 tsk->robust_list = NULL;
3755 }
3756
3757 #ifdef CONFIG_COMPAT
3758 if (unlikely(tsk->compat_robust_list)) {
3759 compat_exit_robust_list(tsk);
3760 tsk->compat_robust_list = NULL;
3761 }
3762 #endif
3763
3764 if (unlikely(!list_empty(&tsk->pi_state_list)))
3765 exit_pi_state_list(tsk);
3766 }
3767
3768 /**
3769 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
3770 * @tsk: task to set the state on
3771 *
3772 * Set the futex exit state of the task lockless. The futex waiter code
3773 * observes that state when a task is exiting and loops until the task has
3774 * actually finished the futex cleanup. The worst case for this is that the
3775 * waiter runs through the wait loop until the state becomes visible.
3776 *
3777 * This is called from the recursive fault handling path in do_exit().
3778 *
3779 * This is best effort. Either the futex exit code has run already or
3780 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
3781 * take it over. If not, the problem is pushed back to user space. If the
3782 * futex exit code did not run yet, then an already queued waiter might
3783 * block forever, but there is nothing which can be done about that.
3784 */
futex_exit_recursive(struct task_struct * tsk)3785 void futex_exit_recursive(struct task_struct *tsk)
3786 {
3787 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
3788 if (tsk->futex_state == FUTEX_STATE_EXITING)
3789 mutex_unlock(&tsk->futex_exit_mutex);
3790 tsk->futex_state = FUTEX_STATE_DEAD;
3791 }
3792
futex_cleanup_begin(struct task_struct * tsk)3793 static void futex_cleanup_begin(struct task_struct *tsk)
3794 {
3795 /*
3796 * Prevent various race issues against a concurrent incoming waiter
3797 * including live locks by forcing the waiter to block on
3798 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
3799 * attach_to_pi_owner().
3800 */
3801 mutex_lock(&tsk->futex_exit_mutex);
3802
3803 /*
3804 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
3805 *
3806 * This ensures that all subsequent checks of tsk->futex_state in
3807 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
3808 * tsk->pi_lock held.
3809 *
3810 * It guarantees also that a pi_state which was queued right before
3811 * the state change under tsk->pi_lock by a concurrent waiter must
3812 * be observed in exit_pi_state_list().
3813 */
3814 raw_spin_lock_irq(&tsk->pi_lock);
3815 tsk->futex_state = FUTEX_STATE_EXITING;
3816 raw_spin_unlock_irq(&tsk->pi_lock);
3817 }
3818
futex_cleanup_end(struct task_struct * tsk,int state)3819 static void futex_cleanup_end(struct task_struct *tsk, int state)
3820 {
3821 /*
3822 * Lockless store. The only side effect is that an observer might
3823 * take another loop until it becomes visible.
3824 */
3825 tsk->futex_state = state;
3826 /*
3827 * Drop the exit protection. This unblocks waiters which observed
3828 * FUTEX_STATE_EXITING to reevaluate the state.
3829 */
3830 mutex_unlock(&tsk->futex_exit_mutex);
3831 }
3832
futex_exec_release(struct task_struct * tsk)3833 void futex_exec_release(struct task_struct *tsk)
3834 {
3835 /*
3836 * The state handling is done for consistency, but in the case of
3837 * exec() there is no way to prevent futher damage as the PID stays
3838 * the same. But for the unlikely and arguably buggy case that a
3839 * futex is held on exec(), this provides at least as much state
3840 * consistency protection which is possible.
3841 */
3842 futex_cleanup_begin(tsk);
3843 futex_cleanup(tsk);
3844 /*
3845 * Reset the state to FUTEX_STATE_OK. The task is alive and about
3846 * exec a new binary.
3847 */
3848 futex_cleanup_end(tsk, FUTEX_STATE_OK);
3849 }
3850
futex_exit_release(struct task_struct * tsk)3851 void futex_exit_release(struct task_struct *tsk)
3852 {
3853 futex_cleanup_begin(tsk);
3854 futex_cleanup(tsk);
3855 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
3856 }
3857
do_futex(u32 __user * uaddr,int op,u32 val,ktime_t * timeout,u32 __user * uaddr2,u32 val2,u32 val3)3858 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
3859 u32 __user *uaddr2, u32 val2, u32 val3)
3860 {
3861 int cmd = op & FUTEX_CMD_MASK;
3862 unsigned int flags = 0;
3863
3864 if (!(op & FUTEX_PRIVATE_FLAG))
3865 flags |= FLAGS_SHARED;
3866
3867 if (op & FUTEX_CLOCK_REALTIME) {
3868 flags |= FLAGS_CLOCKRT;
3869 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
3870 return -ENOSYS;
3871 }
3872
3873 switch (cmd) {
3874 case FUTEX_LOCK_PI:
3875 case FUTEX_UNLOCK_PI:
3876 case FUTEX_TRYLOCK_PI:
3877 case FUTEX_WAIT_REQUEUE_PI:
3878 case FUTEX_CMP_REQUEUE_PI:
3879 if (!futex_cmpxchg_enabled)
3880 return -ENOSYS;
3881 }
3882
3883 switch (cmd) {
3884 case FUTEX_WAIT:
3885 val3 = FUTEX_BITSET_MATCH_ANY;
3886 /* fall through */
3887 case FUTEX_WAIT_BITSET:
3888 return futex_wait(uaddr, flags, val, timeout, val3);
3889 case FUTEX_WAKE:
3890 val3 = FUTEX_BITSET_MATCH_ANY;
3891 /* fall through */
3892 case FUTEX_WAKE_BITSET:
3893 return futex_wake(uaddr, flags, val, val3);
3894 case FUTEX_REQUEUE:
3895 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
3896 case FUTEX_CMP_REQUEUE:
3897 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
3898 case FUTEX_WAKE_OP:
3899 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
3900 case FUTEX_LOCK_PI:
3901 return futex_lock_pi(uaddr, flags, timeout, 0);
3902 case FUTEX_UNLOCK_PI:
3903 return futex_unlock_pi(uaddr, flags);
3904 case FUTEX_TRYLOCK_PI:
3905 return futex_lock_pi(uaddr, flags, NULL, 1);
3906 case FUTEX_WAIT_REQUEUE_PI:
3907 val3 = FUTEX_BITSET_MATCH_ANY;
3908 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3909 uaddr2);
3910 case FUTEX_CMP_REQUEUE_PI:
3911 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
3912 }
3913 return -ENOSYS;
3914 }
3915
3916
SYSCALL_DEFINE6(futex,u32 __user *,uaddr,int,op,u32,val,struct __kernel_timespec __user *,utime,u32 __user *,uaddr2,u32,val3)3917 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3918 struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
3919 u32, val3)
3920 {
3921 struct timespec64 ts;
3922 ktime_t t, *tp = NULL;
3923 u32 val2 = 0;
3924 int cmd = op & FUTEX_CMD_MASK;
3925
3926 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3927 cmd == FUTEX_WAIT_BITSET ||
3928 cmd == FUTEX_WAIT_REQUEUE_PI)) {
3929 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3930 return -EFAULT;
3931 if (get_timespec64(&ts, utime))
3932 return -EFAULT;
3933 if (!timespec64_valid(&ts))
3934 return -EINVAL;
3935
3936 t = timespec64_to_ktime(ts);
3937 if (cmd == FUTEX_WAIT)
3938 t = ktime_add_safe(ktime_get(), t);
3939 tp = &t;
3940 }
3941 /*
3942 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
3943 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
3944 */
3945 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3946 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3947 val2 = (u32) (unsigned long) utime;
3948
3949 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3950 }
3951
3952 #ifdef CONFIG_COMPAT
3953 /*
3954 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3955 */
3956 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)3957 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3958 compat_uptr_t __user *head, unsigned int *pi)
3959 {
3960 if (get_user(*uentry, head))
3961 return -EFAULT;
3962
3963 *entry = compat_ptr((*uentry) & ~1);
3964 *pi = (unsigned int)(*uentry) & 1;
3965
3966 return 0;
3967 }
3968
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)3969 static void __user *futex_uaddr(struct robust_list __user *entry,
3970 compat_long_t futex_offset)
3971 {
3972 compat_uptr_t base = ptr_to_compat(entry);
3973 void __user *uaddr = compat_ptr(base + futex_offset);
3974
3975 return uaddr;
3976 }
3977
3978 /*
3979 * Walk curr->robust_list (very carefully, it's a userspace list!)
3980 * and mark any locks found there dead, and notify any waiters.
3981 *
3982 * We silently return on any sign of list-walking problem.
3983 */
compat_exit_robust_list(struct task_struct * curr)3984 static void compat_exit_robust_list(struct task_struct *curr)
3985 {
3986 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3987 struct robust_list __user *entry, *next_entry, *pending;
3988 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3989 unsigned int next_pi;
3990 compat_uptr_t uentry, next_uentry, upending;
3991 compat_long_t futex_offset;
3992 int rc;
3993
3994 if (!futex_cmpxchg_enabled)
3995 return;
3996
3997 /*
3998 * Fetch the list head (which was registered earlier, via
3999 * sys_set_robust_list()):
4000 */
4001 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
4002 return;
4003 /*
4004 * Fetch the relative futex offset:
4005 */
4006 if (get_user(futex_offset, &head->futex_offset))
4007 return;
4008 /*
4009 * Fetch any possibly pending lock-add first, and handle it
4010 * if it exists:
4011 */
4012 if (compat_fetch_robust_entry(&upending, &pending,
4013 &head->list_op_pending, &pip))
4014 return;
4015
4016 next_entry = NULL; /* avoid warning with gcc */
4017 while (entry != (struct robust_list __user *) &head->list) {
4018 /*
4019 * Fetch the next entry in the list before calling
4020 * handle_futex_death:
4021 */
4022 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
4023 (compat_uptr_t __user *)&entry->next, &next_pi);
4024 /*
4025 * A pending lock might already be on the list, so
4026 * dont process it twice:
4027 */
4028 if (entry != pending) {
4029 void __user *uaddr = futex_uaddr(entry, futex_offset);
4030
4031 if (handle_futex_death(uaddr, curr, pi,
4032 HANDLE_DEATH_LIST))
4033 return;
4034 }
4035 if (rc)
4036 return;
4037 uentry = next_uentry;
4038 entry = next_entry;
4039 pi = next_pi;
4040 /*
4041 * Avoid excessively long or circular lists:
4042 */
4043 if (!--limit)
4044 break;
4045
4046 cond_resched();
4047 }
4048 if (pending) {
4049 void __user *uaddr = futex_uaddr(pending, futex_offset);
4050
4051 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
4052 }
4053 }
4054
COMPAT_SYSCALL_DEFINE2(set_robust_list,struct compat_robust_list_head __user *,head,compat_size_t,len)4055 COMPAT_SYSCALL_DEFINE2(set_robust_list,
4056 struct compat_robust_list_head __user *, head,
4057 compat_size_t, len)
4058 {
4059 if (!futex_cmpxchg_enabled)
4060 return -ENOSYS;
4061
4062 if (unlikely(len != sizeof(*head)))
4063 return -EINVAL;
4064
4065 current->compat_robust_list = head;
4066
4067 return 0;
4068 }
4069
COMPAT_SYSCALL_DEFINE3(get_robust_list,int,pid,compat_uptr_t __user *,head_ptr,compat_size_t __user *,len_ptr)4070 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
4071 compat_uptr_t __user *, head_ptr,
4072 compat_size_t __user *, len_ptr)
4073 {
4074 struct compat_robust_list_head __user *head;
4075 unsigned long ret;
4076 struct task_struct *p;
4077
4078 if (!futex_cmpxchg_enabled)
4079 return -ENOSYS;
4080
4081 rcu_read_lock();
4082
4083 ret = -ESRCH;
4084 if (!pid)
4085 p = current;
4086 else {
4087 p = find_task_by_vpid(pid);
4088 if (!p)
4089 goto err_unlock;
4090 }
4091
4092 ret = -EPERM;
4093 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
4094 goto err_unlock;
4095
4096 head = p->compat_robust_list;
4097 rcu_read_unlock();
4098
4099 if (put_user(sizeof(*head), len_ptr))
4100 return -EFAULT;
4101 return put_user(ptr_to_compat(head), head_ptr);
4102
4103 err_unlock:
4104 rcu_read_unlock();
4105
4106 return ret;
4107 }
4108 #endif /* CONFIG_COMPAT */
4109
4110 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32,u32 __user *,uaddr,int,op,u32,val,struct old_timespec32 __user *,utime,u32 __user *,uaddr2,u32,val3)4111 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
4112 struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
4113 u32, val3)
4114 {
4115 struct timespec64 ts;
4116 ktime_t t, *tp = NULL;
4117 int val2 = 0;
4118 int cmd = op & FUTEX_CMD_MASK;
4119
4120 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
4121 cmd == FUTEX_WAIT_BITSET ||
4122 cmd == FUTEX_WAIT_REQUEUE_PI)) {
4123 if (get_old_timespec32(&ts, utime))
4124 return -EFAULT;
4125 if (!timespec64_valid(&ts))
4126 return -EINVAL;
4127
4128 t = timespec64_to_ktime(ts);
4129 if (cmd == FUTEX_WAIT)
4130 t = ktime_add_safe(ktime_get(), t);
4131 tp = &t;
4132 }
4133 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
4134 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
4135 val2 = (int) (unsigned long) utime;
4136
4137 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
4138 }
4139 #endif /* CONFIG_COMPAT_32BIT_TIME */
4140
futex_detect_cmpxchg(void)4141 static void __init futex_detect_cmpxchg(void)
4142 {
4143 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
4144 u32 curval;
4145
4146 /*
4147 * This will fail and we want it. Some arch implementations do
4148 * runtime detection of the futex_atomic_cmpxchg_inatomic()
4149 * functionality. We want to know that before we call in any
4150 * of the complex code paths. Also we want to prevent
4151 * registration of robust lists in that case. NULL is
4152 * guaranteed to fault and we get -EFAULT on functional
4153 * implementation, the non-functional ones will return
4154 * -ENOSYS.
4155 */
4156 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
4157 futex_cmpxchg_enabled = 1;
4158 #endif
4159 }
4160
futex_init(void)4161 static int __init futex_init(void)
4162 {
4163 unsigned int futex_shift;
4164 unsigned long i;
4165
4166 #if CONFIG_BASE_SMALL
4167 futex_hashsize = 16;
4168 #else
4169 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
4170 #endif
4171
4172 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
4173 futex_hashsize, 0,
4174 futex_hashsize < 256 ? HASH_SMALL : 0,
4175 &futex_shift, NULL,
4176 futex_hashsize, futex_hashsize);
4177 futex_hashsize = 1UL << futex_shift;
4178
4179 futex_detect_cmpxchg();
4180
4181 for (i = 0; i < futex_hashsize; i++) {
4182 atomic_set(&futex_queues[i].waiters, 0);
4183 plist_head_init(&futex_queues[i].chain);
4184 spin_lock_init(&futex_queues[i].lock);
4185 }
4186
4187 return 0;
4188 }
4189 core_initcall(futex_init);
4190