1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/memblock.h>
38 #include <linux/fault-inject.h>
39 #include <linux/slab.h>
40
41 #include "futex.h"
42 #include "../locking/rtmutex_common.h"
43 #include <trace/hooks/futex.h>
44
45 /*
46 * The base of the bucket array and its size are always used together
47 * (after initialization only in futex_hash()), so ensure that they
48 * reside in the same cacheline.
49 */
50 static struct {
51 struct futex_hash_bucket *queues;
52 unsigned long hashsize;
53 } __futex_data __read_mostly __aligned(2*sizeof(long));
54 #define futex_queues (__futex_data.queues)
55 #define futex_hashsize (__futex_data.hashsize)
56
57
58 /*
59 * Fault injections for futexes.
60 */
61 #ifdef CONFIG_FAIL_FUTEX
62
63 static struct {
64 struct fault_attr attr;
65
66 bool ignore_private;
67 } fail_futex = {
68 .attr = FAULT_ATTR_INITIALIZER,
69 .ignore_private = false,
70 };
71
setup_fail_futex(char * str)72 static int __init setup_fail_futex(char *str)
73 {
74 return setup_fault_attr(&fail_futex.attr, str);
75 }
76 __setup("fail_futex=", setup_fail_futex);
77
should_fail_futex(bool fshared)78 bool should_fail_futex(bool fshared)
79 {
80 if (fail_futex.ignore_private && !fshared)
81 return false;
82
83 return should_fail(&fail_futex.attr, 1);
84 }
85
86 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
87
fail_futex_debugfs(void)88 static int __init fail_futex_debugfs(void)
89 {
90 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
91 struct dentry *dir;
92
93 dir = fault_create_debugfs_attr("fail_futex", NULL,
94 &fail_futex.attr);
95 if (IS_ERR(dir))
96 return PTR_ERR(dir);
97
98 debugfs_create_bool("ignore-private", mode, dir,
99 &fail_futex.ignore_private);
100 return 0;
101 }
102
103 late_initcall(fail_futex_debugfs);
104
105 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
106
107 #endif /* CONFIG_FAIL_FUTEX */
108
109 /**
110 * futex_hash - Return the hash bucket in the global hash
111 * @key: Pointer to the futex key for which the hash is calculated
112 *
113 * We hash on the keys returned from get_futex_key (see below) and return the
114 * corresponding hash bucket in the global hash.
115 */
futex_hash(union futex_key * key)116 struct futex_hash_bucket *futex_hash(union futex_key *key)
117 {
118 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
119 key->both.offset);
120
121 return &futex_queues[hash & (futex_hashsize - 1)];
122 }
123
124
125 /**
126 * futex_setup_timer - set up the sleeping hrtimer.
127 * @time: ptr to the given timeout value
128 * @timeout: the hrtimer_sleeper structure to be set up
129 * @flags: futex flags
130 * @range_ns: optional range in ns
131 *
132 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
133 * value given
134 */
135 struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)136 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
137 int flags, u64 range_ns)
138 {
139 if (!time)
140 return NULL;
141
142 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
143 CLOCK_REALTIME : CLOCK_MONOTONIC,
144 HRTIMER_MODE_ABS);
145 /*
146 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
147 * effectively the same as calling hrtimer_set_expires().
148 */
149 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
150
151 return timeout;
152 }
153
154 /*
155 * Generate a machine wide unique identifier for this inode.
156 *
157 * This relies on u64 not wrapping in the life-time of the machine; which with
158 * 1ns resolution means almost 585 years.
159 *
160 * This further relies on the fact that a well formed program will not unmap
161 * the file while it has a (shared) futex waiting on it. This mapping will have
162 * a file reference which pins the mount and inode.
163 *
164 * If for some reason an inode gets evicted and read back in again, it will get
165 * a new sequence number and will _NOT_ match, even though it is the exact same
166 * file.
167 *
168 * It is important that futex_match() will never have a false-positive, esp.
169 * for PI futexes that can mess up the state. The above argues that false-negatives
170 * are only possible for malformed programs.
171 */
get_inode_sequence_number(struct inode * inode)172 static u64 get_inode_sequence_number(struct inode *inode)
173 {
174 static atomic64_t i_seq;
175 u64 old;
176
177 /* Does the inode already have a sequence number? */
178 old = atomic64_read(&inode->i_sequence);
179 if (likely(old))
180 return old;
181
182 for (;;) {
183 u64 new = atomic64_add_return(1, &i_seq);
184 if (WARN_ON_ONCE(!new))
185 continue;
186
187 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
188 if (old)
189 return old;
190 return new;
191 }
192 }
193
194 /**
195 * get_futex_key() - Get parameters which are the keys for a futex
196 * @uaddr: virtual address of the futex
197 * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
198 * @key: address where result is stored.
199 * @rw: mapping needs to be read/write (values: FUTEX_READ,
200 * FUTEX_WRITE)
201 *
202 * Return: a negative error code or 0
203 *
204 * The key words are stored in @key on success.
205 *
206 * For shared mappings (when @fshared), the key is:
207 *
208 * ( inode->i_sequence, page->index, offset_within_page )
209 *
210 * [ also see get_inode_sequence_number() ]
211 *
212 * For private mappings (or when !@fshared), the key is:
213 *
214 * ( current->mm, address, 0 )
215 *
216 * This allows (cross process, where applicable) identification of the futex
217 * without keeping the page pinned for the duration of the FUTEX_WAIT.
218 *
219 * lock_page() might sleep, the caller should not hold a spinlock.
220 */
get_futex_key(u32 __user * uaddr,bool fshared,union futex_key * key,enum futex_access rw)221 int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
222 enum futex_access rw)
223 {
224 unsigned long address = (unsigned long)uaddr;
225 struct mm_struct *mm = current->mm;
226 struct page *page, *tail;
227 struct address_space *mapping;
228 int err, ro = 0;
229
230 /*
231 * The futex address must be "naturally" aligned.
232 */
233 key->both.offset = address % PAGE_SIZE;
234 if (unlikely((address % sizeof(u32)) != 0))
235 return -EINVAL;
236 address -= key->both.offset;
237
238 if (unlikely(!access_ok(uaddr, sizeof(u32))))
239 return -EFAULT;
240
241 if (unlikely(should_fail_futex(fshared)))
242 return -EFAULT;
243
244 /*
245 * PROCESS_PRIVATE futexes are fast.
246 * As the mm cannot disappear under us and the 'key' only needs
247 * virtual address, we dont even have to find the underlying vma.
248 * Note : We do have to check 'uaddr' is a valid user address,
249 * but access_ok() should be faster than find_vma()
250 */
251 if (!fshared) {
252 /*
253 * On no-MMU, shared futexes are treated as private, therefore
254 * we must not include the current process in the key. Since
255 * there is only one address space, the address is a unique key
256 * on its own.
257 */
258 if (IS_ENABLED(CONFIG_MMU))
259 key->private.mm = mm;
260 else
261 key->private.mm = NULL;
262
263 key->private.address = address;
264 return 0;
265 }
266
267 again:
268 /* Ignore any VERIFY_READ mapping (futex common case) */
269 if (unlikely(should_fail_futex(true)))
270 return -EFAULT;
271
272 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
273 /*
274 * If write access is not required (eg. FUTEX_WAIT), try
275 * and get read-only access.
276 */
277 if (err == -EFAULT && rw == FUTEX_READ) {
278 err = get_user_pages_fast(address, 1, 0, &page);
279 ro = 1;
280 }
281 if (err < 0)
282 return err;
283 else
284 err = 0;
285
286 /*
287 * The treatment of mapping from this point on is critical. The page
288 * lock protects many things but in this context the page lock
289 * stabilizes mapping, prevents inode freeing in the shared
290 * file-backed region case and guards against movement to swap cache.
291 *
292 * Strictly speaking the page lock is not needed in all cases being
293 * considered here and page lock forces unnecessarily serialization
294 * From this point on, mapping will be re-verified if necessary and
295 * page lock will be acquired only if it is unavoidable
296 *
297 * Mapping checks require the head page for any compound page so the
298 * head page and mapping is looked up now. For anonymous pages, it
299 * does not matter if the page splits in the future as the key is
300 * based on the address. For filesystem-backed pages, the tail is
301 * required as the index of the page determines the key. For
302 * base pages, there is no tail page and tail == page.
303 */
304 tail = page;
305 page = compound_head(page);
306 mapping = READ_ONCE(page->mapping);
307
308 /*
309 * If page->mapping is NULL, then it cannot be a PageAnon
310 * page; but it might be the ZERO_PAGE or in the gate area or
311 * in a special mapping (all cases which we are happy to fail);
312 * or it may have been a good file page when get_user_pages_fast
313 * found it, but truncated or holepunched or subjected to
314 * invalidate_complete_page2 before we got the page lock (also
315 * cases which we are happy to fail). And we hold a reference,
316 * so refcount care in invalidate_inode_page's remove_mapping
317 * prevents drop_caches from setting mapping to NULL beneath us.
318 *
319 * The case we do have to guard against is when memory pressure made
320 * shmem_writepage move it from filecache to swapcache beneath us:
321 * an unlikely race, but we do need to retry for page->mapping.
322 */
323 if (unlikely(!mapping)) {
324 int shmem_swizzled;
325
326 /*
327 * Page lock is required to identify which special case above
328 * applies. If this is really a shmem page then the page lock
329 * will prevent unexpected transitions.
330 */
331 lock_page(page);
332 shmem_swizzled = PageSwapCache(page) || page->mapping;
333 unlock_page(page);
334 put_page(page);
335
336 if (shmem_swizzled)
337 goto again;
338
339 return -EFAULT;
340 }
341
342 /*
343 * Private mappings are handled in a simple way.
344 *
345 * If the futex key is stored on an anonymous page, then the associated
346 * object is the mm which is implicitly pinned by the calling process.
347 *
348 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
349 * it's a read-only handle, it's expected that futexes attach to
350 * the object not the particular process.
351 */
352 if (PageAnon(page)) {
353 /*
354 * A RO anonymous page will never change and thus doesn't make
355 * sense for futex operations.
356 */
357 if (unlikely(should_fail_futex(true)) || ro) {
358 err = -EFAULT;
359 goto out;
360 }
361
362 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
363 key->private.mm = mm;
364 key->private.address = address;
365
366 } else {
367 struct inode *inode;
368
369 /*
370 * The associated futex object in this case is the inode and
371 * the page->mapping must be traversed. Ordinarily this should
372 * be stabilised under page lock but it's not strictly
373 * necessary in this case as we just want to pin the inode, not
374 * update the radix tree or anything like that.
375 *
376 * The RCU read lock is taken as the inode is finally freed
377 * under RCU. If the mapping still matches expectations then the
378 * mapping->host can be safely accessed as being a valid inode.
379 */
380 rcu_read_lock();
381
382 if (READ_ONCE(page->mapping) != mapping) {
383 rcu_read_unlock();
384 put_page(page);
385
386 goto again;
387 }
388
389 inode = READ_ONCE(mapping->host);
390 if (!inode) {
391 rcu_read_unlock();
392 put_page(page);
393
394 goto again;
395 }
396
397 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
398 key->shared.i_seq = get_inode_sequence_number(inode);
399 key->shared.pgoff = page_to_pgoff(tail);
400 rcu_read_unlock();
401 }
402
403 out:
404 put_page(page);
405 return err;
406 }
407
408 /**
409 * fault_in_user_writeable() - Fault in user address and verify RW access
410 * @uaddr: pointer to faulting user space address
411 *
412 * Slow path to fixup the fault we just took in the atomic write
413 * access to @uaddr.
414 *
415 * We have no generic implementation of a non-destructive write to the
416 * user address. We know that we faulted in the atomic pagefault
417 * disabled section so we can as well avoid the #PF overhead by
418 * calling get_user_pages() right away.
419 */
fault_in_user_writeable(u32 __user * uaddr)420 int fault_in_user_writeable(u32 __user *uaddr)
421 {
422 struct mm_struct *mm = current->mm;
423 int ret;
424
425 mmap_read_lock(mm);
426 ret = fixup_user_fault(mm, (unsigned long)uaddr,
427 FAULT_FLAG_WRITE, NULL);
428 mmap_read_unlock(mm);
429
430 return ret < 0 ? ret : 0;
431 }
432
433 /**
434 * futex_top_waiter() - Return the highest priority waiter on a futex
435 * @hb: the hash bucket the futex_q's reside in
436 * @key: the futex key (to distinguish it from other futex futex_q's)
437 *
438 * Must be called with the hb lock held.
439 */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)440 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
441 {
442 struct futex_q *this;
443
444 plist_for_each_entry(this, &hb->chain, list) {
445 if (futex_match(&this->key, key))
446 return this;
447 }
448 return NULL;
449 }
450
futex_cmpxchg_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)451 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
452 {
453 int ret;
454
455 pagefault_disable();
456 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
457 pagefault_enable();
458
459 return ret;
460 }
461
futex_get_value_locked(u32 * dest,u32 __user * from)462 int futex_get_value_locked(u32 *dest, u32 __user *from)
463 {
464 int ret;
465
466 pagefault_disable();
467 ret = __get_user(*dest, from);
468 pagefault_enable();
469
470 return ret ? -EFAULT : 0;
471 }
472
473 /**
474 * wait_for_owner_exiting - Block until the owner has exited
475 * @ret: owner's current futex lock status
476 * @exiting: Pointer to the exiting task
477 *
478 * Caller must hold a refcount on @exiting.
479 */
wait_for_owner_exiting(int ret,struct task_struct * exiting)480 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
481 {
482 if (ret != -EBUSY) {
483 WARN_ON_ONCE(exiting);
484 return;
485 }
486
487 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
488 return;
489
490 mutex_lock(&exiting->futex_exit_mutex);
491 /*
492 * No point in doing state checking here. If the waiter got here
493 * while the task was in exec()->exec_futex_release() then it can
494 * have any FUTEX_STATE_* value when the waiter has acquired the
495 * mutex. OK, if running, EXITING or DEAD if it reached exit()
496 * already. Highly unlikely and not a problem. Just one more round
497 * through the futex maze.
498 */
499 mutex_unlock(&exiting->futex_exit_mutex);
500
501 put_task_struct(exiting);
502 }
503
504 /**
505 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
506 * @q: The futex_q to unqueue
507 *
508 * The q->lock_ptr must not be NULL and must be held by the caller.
509 */
__futex_unqueue(struct futex_q * q)510 void __futex_unqueue(struct futex_q *q)
511 {
512 struct futex_hash_bucket *hb;
513
514 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
515 return;
516 lockdep_assert_held(q->lock_ptr);
517
518 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
519 plist_del(&q->list, &hb->chain);
520 futex_hb_waiters_dec(hb);
521 }
522
523 /* The key must be already stored in q->key. */
futex_q_lock(struct futex_q * q)524 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
525 __acquires(&hb->lock)
526 {
527 struct futex_hash_bucket *hb;
528
529 hb = futex_hash(&q->key);
530
531 /*
532 * Increment the counter before taking the lock so that
533 * a potential waker won't miss a to-be-slept task that is
534 * waiting for the spinlock. This is safe as all futex_q_lock()
535 * users end up calling futex_queue(). Similarly, for housekeeping,
536 * decrement the counter at futex_q_unlock() when some error has
537 * occurred and we don't end up adding the task to the list.
538 */
539 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
540
541 q->lock_ptr = &hb->lock;
542
543 spin_lock(&hb->lock);
544 return hb;
545 }
546
futex_q_unlock(struct futex_hash_bucket * hb)547 void futex_q_unlock(struct futex_hash_bucket *hb)
548 __releases(&hb->lock)
549 {
550 spin_unlock(&hb->lock);
551 futex_hb_waiters_dec(hb);
552 }
553
__futex_queue(struct futex_q * q,struct futex_hash_bucket * hb)554 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
555 {
556 int prio;
557 bool already_on_hb = false;
558
559 /*
560 * The priority used to register this element is
561 * - either the real thread-priority for the real-time threads
562 * (i.e. threads with a priority lower than MAX_RT_PRIO)
563 * - or MAX_RT_PRIO for non-RT threads.
564 * Thus, all RT-threads are woken first in priority order, and
565 * the others are woken last, in FIFO order.
566 */
567 prio = min(current->normal_prio, MAX_RT_PRIO);
568
569 plist_node_init(&q->list, prio);
570 trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb);
571 if (!already_on_hb)
572 plist_add(&q->list, &hb->chain);
573 q->task = current;
574 }
575
576 /**
577 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
578 * @q: The futex_q to unqueue
579 *
580 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
581 * be paired with exactly one earlier call to futex_queue().
582 *
583 * Return:
584 * - 1 - if the futex_q was still queued (and we removed unqueued it);
585 * - 0 - if the futex_q was already removed by the waking thread
586 */
futex_unqueue(struct futex_q * q)587 int futex_unqueue(struct futex_q *q)
588 {
589 spinlock_t *lock_ptr;
590 int ret = 0;
591
592 /* In the common case we don't take the spinlock, which is nice. */
593 retry:
594 /*
595 * q->lock_ptr can change between this read and the following spin_lock.
596 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
597 * optimizing lock_ptr out of the logic below.
598 */
599 lock_ptr = READ_ONCE(q->lock_ptr);
600 if (lock_ptr != NULL) {
601 spin_lock(lock_ptr);
602 /*
603 * q->lock_ptr can change between reading it and
604 * spin_lock(), causing us to take the wrong lock. This
605 * corrects the race condition.
606 *
607 * Reasoning goes like this: if we have the wrong lock,
608 * q->lock_ptr must have changed (maybe several times)
609 * between reading it and the spin_lock(). It can
610 * change again after the spin_lock() but only if it was
611 * already changed before the spin_lock(). It cannot,
612 * however, change back to the original value. Therefore
613 * we can detect whether we acquired the correct lock.
614 */
615 if (unlikely(lock_ptr != q->lock_ptr)) {
616 spin_unlock(lock_ptr);
617 goto retry;
618 }
619 __futex_unqueue(q);
620
621 BUG_ON(q->pi_state);
622
623 spin_unlock(lock_ptr);
624 ret = 1;
625 }
626
627 return ret;
628 }
629
630 /*
631 * PI futexes can not be requeued and must remove themselves from the
632 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
633 */
futex_unqueue_pi(struct futex_q * q)634 void futex_unqueue_pi(struct futex_q *q)
635 {
636 __futex_unqueue(q);
637
638 BUG_ON(!q->pi_state);
639 put_pi_state(q->pi_state);
640 q->pi_state = NULL;
641 }
642
643 /* Constants for the pending_op argument of handle_futex_death */
644 #define HANDLE_DEATH_PENDING true
645 #define HANDLE_DEATH_LIST false
646
647 /*
648 * Process a futex-list entry, check whether it's owned by the
649 * dying task, and do notification if so:
650 */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)651 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
652 bool pi, bool pending_op)
653 {
654 u32 uval, nval, mval;
655 pid_t owner;
656 int err;
657
658 /* Futex address must be 32bit aligned */
659 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
660 return -1;
661
662 retry:
663 if (get_user(uval, uaddr))
664 return -1;
665
666 /*
667 * Special case for regular (non PI) futexes. The unlock path in
668 * user space has two race scenarios:
669 *
670 * 1. The unlock path releases the user space futex value and
671 * before it can execute the futex() syscall to wake up
672 * waiters it is killed.
673 *
674 * 2. A woken up waiter is killed before it can acquire the
675 * futex in user space.
676 *
677 * In the second case, the wake up notification could be generated
678 * by the unlock path in user space after setting the futex value
679 * to zero or by the kernel after setting the OWNER_DIED bit below.
680 *
681 * In both cases the TID validation below prevents a wakeup of
682 * potential waiters which can cause these waiters to block
683 * forever.
684 *
685 * In both cases the following conditions are met:
686 *
687 * 1) task->robust_list->list_op_pending != NULL
688 * @pending_op == true
689 * 2) The owner part of user space futex value == 0
690 * 3) Regular futex: @pi == false
691 *
692 * If these conditions are met, it is safe to attempt waking up a
693 * potential waiter without touching the user space futex value and
694 * trying to set the OWNER_DIED bit. If the futex value is zero,
695 * the rest of the user space mutex state is consistent, so a woken
696 * waiter will just take over the uncontended futex. Setting the
697 * OWNER_DIED bit would create inconsistent state and malfunction
698 * of the user space owner died handling. Otherwise, the OWNER_DIED
699 * bit is already set, and the woken waiter is expected to deal with
700 * this.
701 */
702 owner = uval & FUTEX_TID_MASK;
703
704 if (pending_op && !pi && !owner) {
705 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
706 return 0;
707 }
708
709 if (owner != task_pid_vnr(curr))
710 return 0;
711
712 /*
713 * Ok, this dying thread is truly holding a futex
714 * of interest. Set the OWNER_DIED bit atomically
715 * via cmpxchg, and if the value had FUTEX_WAITERS
716 * set, wake up a waiter (if any). (We have to do a
717 * futex_wake() even if OWNER_DIED is already set -
718 * to handle the rare but possible case of recursive
719 * thread-death.) The rest of the cleanup is done in
720 * userspace.
721 */
722 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
723
724 /*
725 * We are not holding a lock here, but we want to have
726 * the pagefault_disable/enable() protection because
727 * we want to handle the fault gracefully. If the
728 * access fails we try to fault in the futex with R/W
729 * verification via get_user_pages. get_user() above
730 * does not guarantee R/W access. If that fails we
731 * give up and leave the futex locked.
732 */
733 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
734 switch (err) {
735 case -EFAULT:
736 if (fault_in_user_writeable(uaddr))
737 return -1;
738 goto retry;
739
740 case -EAGAIN:
741 cond_resched();
742 goto retry;
743
744 default:
745 WARN_ON_ONCE(1);
746 return err;
747 }
748 }
749
750 if (nval != uval)
751 goto retry;
752
753 /*
754 * Wake robust non-PI futexes here. The wakeup of
755 * PI futexes happens in exit_pi_state():
756 */
757 if (!pi && (uval & FUTEX_WAITERS))
758 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
759
760 return 0;
761 }
762
763 /*
764 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
765 */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)766 static inline int fetch_robust_entry(struct robust_list __user **entry,
767 struct robust_list __user * __user *head,
768 unsigned int *pi)
769 {
770 unsigned long uentry;
771
772 if (get_user(uentry, (unsigned long __user *)head))
773 return -EFAULT;
774
775 *entry = (void __user *)(uentry & ~1UL);
776 *pi = uentry & 1;
777
778 return 0;
779 }
780
781 /*
782 * Walk curr->robust_list (very carefully, it's a userspace list!)
783 * and mark any locks found there dead, and notify any waiters.
784 *
785 * We silently return on any sign of list-walking problem.
786 */
exit_robust_list(struct task_struct * curr)787 static void exit_robust_list(struct task_struct *curr)
788 {
789 struct robust_list_head __user *head = curr->robust_list;
790 struct robust_list __user *entry, *next_entry, *pending;
791 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
792 unsigned int next_pi;
793 unsigned long futex_offset;
794 int rc;
795
796 /*
797 * Fetch the list head (which was registered earlier, via
798 * sys_set_robust_list()):
799 */
800 if (fetch_robust_entry(&entry, &head->list.next, &pi))
801 return;
802 /*
803 * Fetch the relative futex offset:
804 */
805 if (get_user(futex_offset, &head->futex_offset))
806 return;
807 /*
808 * Fetch any possibly pending lock-add first, and handle it
809 * if it exists:
810 */
811 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
812 return;
813
814 next_entry = NULL; /* avoid warning with gcc */
815 while (entry != &head->list) {
816 /*
817 * Fetch the next entry in the list before calling
818 * handle_futex_death:
819 */
820 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
821 /*
822 * A pending lock might already be on the list, so
823 * don't process it twice:
824 */
825 if (entry != pending) {
826 if (handle_futex_death((void __user *)entry + futex_offset,
827 curr, pi, HANDLE_DEATH_LIST))
828 return;
829 }
830 if (rc)
831 return;
832 entry = next_entry;
833 pi = next_pi;
834 /*
835 * Avoid excessively long or circular lists:
836 */
837 if (!--limit)
838 break;
839
840 cond_resched();
841 }
842
843 if (pending) {
844 handle_futex_death((void __user *)pending + futex_offset,
845 curr, pip, HANDLE_DEATH_PENDING);
846 }
847 }
848
849 #ifdef CONFIG_COMPAT
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)850 static void __user *futex_uaddr(struct robust_list __user *entry,
851 compat_long_t futex_offset)
852 {
853 compat_uptr_t base = ptr_to_compat(entry);
854 void __user *uaddr = compat_ptr(base + futex_offset);
855
856 return uaddr;
857 }
858
859 /*
860 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
861 */
862 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)863 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
864 compat_uptr_t __user *head, unsigned int *pi)
865 {
866 if (get_user(*uentry, head))
867 return -EFAULT;
868
869 *entry = compat_ptr((*uentry) & ~1);
870 *pi = (unsigned int)(*uentry) & 1;
871
872 return 0;
873 }
874
875 /*
876 * Walk curr->robust_list (very carefully, it's a userspace list!)
877 * and mark any locks found there dead, and notify any waiters.
878 *
879 * We silently return on any sign of list-walking problem.
880 */
compat_exit_robust_list(struct task_struct * curr)881 static void compat_exit_robust_list(struct task_struct *curr)
882 {
883 struct compat_robust_list_head __user *head = curr->compat_robust_list;
884 struct robust_list __user *entry, *next_entry, *pending;
885 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
886 unsigned int next_pi;
887 compat_uptr_t uentry, next_uentry, upending;
888 compat_long_t futex_offset;
889 int rc;
890
891 /*
892 * Fetch the list head (which was registered earlier, via
893 * sys_set_robust_list()):
894 */
895 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
896 return;
897 /*
898 * Fetch the relative futex offset:
899 */
900 if (get_user(futex_offset, &head->futex_offset))
901 return;
902 /*
903 * Fetch any possibly pending lock-add first, and handle it
904 * if it exists:
905 */
906 if (compat_fetch_robust_entry(&upending, &pending,
907 &head->list_op_pending, &pip))
908 return;
909
910 next_entry = NULL; /* avoid warning with gcc */
911 while (entry != (struct robust_list __user *) &head->list) {
912 /*
913 * Fetch the next entry in the list before calling
914 * handle_futex_death:
915 */
916 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
917 (compat_uptr_t __user *)&entry->next, &next_pi);
918 /*
919 * A pending lock might already be on the list, so
920 * dont process it twice:
921 */
922 if (entry != pending) {
923 void __user *uaddr = futex_uaddr(entry, futex_offset);
924
925 if (handle_futex_death(uaddr, curr, pi,
926 HANDLE_DEATH_LIST))
927 return;
928 }
929 if (rc)
930 return;
931 uentry = next_uentry;
932 entry = next_entry;
933 pi = next_pi;
934 /*
935 * Avoid excessively long or circular lists:
936 */
937 if (!--limit)
938 break;
939
940 cond_resched();
941 }
942 if (pending) {
943 void __user *uaddr = futex_uaddr(pending, futex_offset);
944
945 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
946 }
947 }
948 #endif
949
950 #ifdef CONFIG_FUTEX_PI
951
952 /*
953 * This task is holding PI mutexes at exit time => bad.
954 * Kernel cleans up PI-state, but userspace is likely hosed.
955 * (Robust-futex cleanup is separate and might save the day for userspace.)
956 */
exit_pi_state_list(struct task_struct * curr)957 static void exit_pi_state_list(struct task_struct *curr)
958 {
959 struct list_head *next, *head = &curr->pi_state_list;
960 struct futex_pi_state *pi_state;
961 struct futex_hash_bucket *hb;
962 union futex_key key = FUTEX_KEY_INIT;
963
964 /*
965 * We are a ZOMBIE and nobody can enqueue itself on
966 * pi_state_list anymore, but we have to be careful
967 * versus waiters unqueueing themselves:
968 */
969 raw_spin_lock_irq(&curr->pi_lock);
970 while (!list_empty(head)) {
971 next = head->next;
972 pi_state = list_entry(next, struct futex_pi_state, list);
973 key = pi_state->key;
974 hb = futex_hash(&key);
975
976 /*
977 * We can race against put_pi_state() removing itself from the
978 * list (a waiter going away). put_pi_state() will first
979 * decrement the reference count and then modify the list, so
980 * its possible to see the list entry but fail this reference
981 * acquire.
982 *
983 * In that case; drop the locks to let put_pi_state() make
984 * progress and retry the loop.
985 */
986 if (!refcount_inc_not_zero(&pi_state->refcount)) {
987 raw_spin_unlock_irq(&curr->pi_lock);
988 cpu_relax();
989 raw_spin_lock_irq(&curr->pi_lock);
990 continue;
991 }
992 raw_spin_unlock_irq(&curr->pi_lock);
993
994 spin_lock(&hb->lock);
995 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
996 raw_spin_lock(&curr->pi_lock);
997 /*
998 * We dropped the pi-lock, so re-check whether this
999 * task still owns the PI-state:
1000 */
1001 if (head->next != next) {
1002 /* retain curr->pi_lock for the loop invariant */
1003 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1004 spin_unlock(&hb->lock);
1005 put_pi_state(pi_state);
1006 continue;
1007 }
1008
1009 WARN_ON(pi_state->owner != curr);
1010 WARN_ON(list_empty(&pi_state->list));
1011 list_del_init(&pi_state->list);
1012 pi_state->owner = NULL;
1013
1014 raw_spin_unlock(&curr->pi_lock);
1015 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1016 spin_unlock(&hb->lock);
1017
1018 rt_mutex_futex_unlock(&pi_state->pi_mutex);
1019 put_pi_state(pi_state);
1020
1021 raw_spin_lock_irq(&curr->pi_lock);
1022 }
1023 raw_spin_unlock_irq(&curr->pi_lock);
1024 }
1025 #else
exit_pi_state_list(struct task_struct * curr)1026 static inline void exit_pi_state_list(struct task_struct *curr) { }
1027 #endif
1028
futex_cleanup(struct task_struct * tsk)1029 static void futex_cleanup(struct task_struct *tsk)
1030 {
1031 if (unlikely(tsk->robust_list)) {
1032 exit_robust_list(tsk);
1033 tsk->robust_list = NULL;
1034 }
1035
1036 #ifdef CONFIG_COMPAT
1037 if (unlikely(tsk->compat_robust_list)) {
1038 compat_exit_robust_list(tsk);
1039 tsk->compat_robust_list = NULL;
1040 }
1041 #endif
1042
1043 if (unlikely(!list_empty(&tsk->pi_state_list)))
1044 exit_pi_state_list(tsk);
1045 }
1046
1047 /**
1048 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1049 * @tsk: task to set the state on
1050 *
1051 * Set the futex exit state of the task lockless. The futex waiter code
1052 * observes that state when a task is exiting and loops until the task has
1053 * actually finished the futex cleanup. The worst case for this is that the
1054 * waiter runs through the wait loop until the state becomes visible.
1055 *
1056 * This is called from the recursive fault handling path in make_task_dead().
1057 *
1058 * This is best effort. Either the futex exit code has run already or
1059 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1060 * take it over. If not, the problem is pushed back to user space. If the
1061 * futex exit code did not run yet, then an already queued waiter might
1062 * block forever, but there is nothing which can be done about that.
1063 */
futex_exit_recursive(struct task_struct * tsk)1064 void futex_exit_recursive(struct task_struct *tsk)
1065 {
1066 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1067 if (tsk->futex_state == FUTEX_STATE_EXITING)
1068 mutex_unlock(&tsk->futex_exit_mutex);
1069 tsk->futex_state = FUTEX_STATE_DEAD;
1070 }
1071
futex_cleanup_begin(struct task_struct * tsk)1072 static void futex_cleanup_begin(struct task_struct *tsk)
1073 {
1074 /*
1075 * Prevent various race issues against a concurrent incoming waiter
1076 * including live locks by forcing the waiter to block on
1077 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1078 * attach_to_pi_owner().
1079 */
1080 mutex_lock(&tsk->futex_exit_mutex);
1081
1082 /*
1083 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1084 *
1085 * This ensures that all subsequent checks of tsk->futex_state in
1086 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1087 * tsk->pi_lock held.
1088 *
1089 * It guarantees also that a pi_state which was queued right before
1090 * the state change under tsk->pi_lock by a concurrent waiter must
1091 * be observed in exit_pi_state_list().
1092 */
1093 raw_spin_lock_irq(&tsk->pi_lock);
1094 tsk->futex_state = FUTEX_STATE_EXITING;
1095 raw_spin_unlock_irq(&tsk->pi_lock);
1096 }
1097
futex_cleanup_end(struct task_struct * tsk,int state)1098 static void futex_cleanup_end(struct task_struct *tsk, int state)
1099 {
1100 /*
1101 * Lockless store. The only side effect is that an observer might
1102 * take another loop until it becomes visible.
1103 */
1104 tsk->futex_state = state;
1105 /*
1106 * Drop the exit protection. This unblocks waiters which observed
1107 * FUTEX_STATE_EXITING to reevaluate the state.
1108 */
1109 mutex_unlock(&tsk->futex_exit_mutex);
1110 }
1111
futex_exec_release(struct task_struct * tsk)1112 void futex_exec_release(struct task_struct *tsk)
1113 {
1114 /*
1115 * The state handling is done for consistency, but in the case of
1116 * exec() there is no way to prevent further damage as the PID stays
1117 * the same. But for the unlikely and arguably buggy case that a
1118 * futex is held on exec(), this provides at least as much state
1119 * consistency protection which is possible.
1120 */
1121 futex_cleanup_begin(tsk);
1122 futex_cleanup(tsk);
1123 /*
1124 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1125 * exec a new binary.
1126 */
1127 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1128 }
1129
futex_exit_release(struct task_struct * tsk)1130 void futex_exit_release(struct task_struct *tsk)
1131 {
1132 futex_cleanup_begin(tsk);
1133 futex_cleanup(tsk);
1134 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1135 }
1136
futex_init(void)1137 static int __init futex_init(void)
1138 {
1139 unsigned int futex_shift;
1140 unsigned long i;
1141
1142 #if CONFIG_BASE_SMALL
1143 futex_hashsize = 16;
1144 #else
1145 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1146 #endif
1147
1148 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1149 futex_hashsize, 0,
1150 futex_hashsize < 256 ? HASH_SMALL : 0,
1151 &futex_shift, NULL,
1152 futex_hashsize, futex_hashsize);
1153 futex_hashsize = 1UL << futex_shift;
1154
1155 for (i = 0; i < futex_hashsize; i++) {
1156 atomic_set(&futex_queues[i].waiters, 0);
1157 plist_head_init(&futex_queues[i].chain);
1158 spin_lock_init(&futex_queues[i].lock);
1159 }
1160
1161 return 0;
1162 }
1163 core_initcall(futex_init);
1164