1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
5 *
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 *
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
11 *
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
15 *
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
19 *
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
22 *
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
26 *
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
30 *
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
33 */
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/memblock.h>
40 #include <linux/fault-inject.h>
41 #include <linux/slab.h>
42
43 #include "futex.h"
44 #include "../locking/rtmutex_common.h"
45 #include <trace/hooks/futex.h>
46
47 /*
48 * The base of the bucket array and its size are always used together
49 * (after initialization only in futex_hash()), so ensure that they
50 * reside in the same cacheline.
51 */
52 static struct {
53 struct futex_hash_bucket *queues;
54 unsigned long hashsize;
55 } __futex_data __read_mostly __aligned(2*sizeof(long));
56 #define futex_queues (__futex_data.queues)
57 #define futex_hashsize (__futex_data.hashsize)
58
59
60 /*
61 * Fault injections for futexes.
62 */
63 #ifdef CONFIG_FAIL_FUTEX
64
65 static struct {
66 struct fault_attr attr;
67
68 bool ignore_private;
69 } fail_futex = {
70 .attr = FAULT_ATTR_INITIALIZER,
71 .ignore_private = false,
72 };
73
setup_fail_futex(char * str)74 static int __init setup_fail_futex(char *str)
75 {
76 return setup_fault_attr(&fail_futex.attr, str);
77 }
78 __setup("fail_futex=", setup_fail_futex);
79
should_fail_futex(bool fshared)80 bool should_fail_futex(bool fshared)
81 {
82 if (fail_futex.ignore_private && !fshared)
83 return false;
84
85 return should_fail(&fail_futex.attr, 1);
86 }
87
88 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
89
fail_futex_debugfs(void)90 static int __init fail_futex_debugfs(void)
91 {
92 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
93 struct dentry *dir;
94
95 dir = fault_create_debugfs_attr("fail_futex", NULL,
96 &fail_futex.attr);
97 if (IS_ERR(dir))
98 return PTR_ERR(dir);
99
100 debugfs_create_bool("ignore-private", mode, dir,
101 &fail_futex.ignore_private);
102 return 0;
103 }
104
105 late_initcall(fail_futex_debugfs);
106
107 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
108
109 #endif /* CONFIG_FAIL_FUTEX */
110
111 /**
112 * futex_hash - Return the hash bucket in the global hash
113 * @key: Pointer to the futex key for which the hash is calculated
114 *
115 * We hash on the keys returned from get_futex_key (see below) and return the
116 * corresponding hash bucket in the global hash.
117 */
futex_hash(union futex_key * key)118 struct futex_hash_bucket *futex_hash(union futex_key *key)
119 {
120 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
121 key->both.offset);
122
123 return &futex_queues[hash & (futex_hashsize - 1)];
124 }
125
126
127 /**
128 * futex_setup_timer - set up the sleeping hrtimer.
129 * @time: ptr to the given timeout value
130 * @timeout: the hrtimer_sleeper structure to be set up
131 * @flags: futex flags
132 * @range_ns: optional range in ns
133 *
134 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
135 * value given
136 */
137 struct hrtimer_sleeper *
futex_setup_timer(ktime_t * time,struct hrtimer_sleeper * timeout,int flags,u64 range_ns)138 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
139 int flags, u64 range_ns)
140 {
141 if (!time)
142 return NULL;
143
144 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
145 CLOCK_REALTIME : CLOCK_MONOTONIC,
146 HRTIMER_MODE_ABS);
147 /*
148 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
149 * effectively the same as calling hrtimer_set_expires().
150 */
151 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
152
153 return timeout;
154 }
155
156 /*
157 * Generate a machine wide unique identifier for this inode.
158 *
159 * This relies on u64 not wrapping in the life-time of the machine; which with
160 * 1ns resolution means almost 585 years.
161 *
162 * This further relies on the fact that a well formed program will not unmap
163 * the file while it has a (shared) futex waiting on it. This mapping will have
164 * a file reference which pins the mount and inode.
165 *
166 * If for some reason an inode gets evicted and read back in again, it will get
167 * a new sequence number and will _NOT_ match, even though it is the exact same
168 * file.
169 *
170 * It is important that futex_match() will never have a false-positive, esp.
171 * for PI futexes that can mess up the state. The above argues that false-negatives
172 * are only possible for malformed programs.
173 */
get_inode_sequence_number(struct inode * inode)174 static u64 get_inode_sequence_number(struct inode *inode)
175 {
176 static atomic64_t i_seq;
177 u64 old;
178
179 /* Does the inode already have a sequence number? */
180 old = atomic64_read(&inode->i_sequence);
181 if (likely(old))
182 return old;
183
184 for (;;) {
185 u64 new = atomic64_add_return(1, &i_seq);
186 if (WARN_ON_ONCE(!new))
187 continue;
188
189 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
190 if (old)
191 return old;
192 return new;
193 }
194 }
195
196 /**
197 * get_futex_key() - Get parameters which are the keys for a futex
198 * @uaddr: virtual address of the futex
199 * @flags: FLAGS_*
200 * @key: address where result is stored.
201 * @rw: mapping needs to be read/write (values: FUTEX_READ,
202 * FUTEX_WRITE)
203 *
204 * Return: a negative error code or 0
205 *
206 * The key words are stored in @key on success.
207 *
208 * For shared mappings (when @fshared), the key is:
209 *
210 * ( inode->i_sequence, page->index, offset_within_page )
211 *
212 * [ also see get_inode_sequence_number() ]
213 *
214 * For private mappings (or when !@fshared), the key is:
215 *
216 * ( current->mm, address, 0 )
217 *
218 * This allows (cross process, where applicable) identification of the futex
219 * without keeping the page pinned for the duration of the FUTEX_WAIT.
220 *
221 * lock_page() might sleep, the caller should not hold a spinlock.
222 */
get_futex_key(u32 __user * uaddr,unsigned int flags,union futex_key * key,enum futex_access rw)223 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
224 enum futex_access rw)
225 {
226 unsigned long address = (unsigned long)uaddr;
227 struct mm_struct *mm = current->mm;
228 struct page *page;
229 struct folio *folio;
230 struct address_space *mapping;
231 int err, ro = 0;
232 bool fshared;
233
234 fshared = flags & FLAGS_SHARED;
235
236 /*
237 * The futex address must be "naturally" aligned.
238 */
239 key->both.offset = address % PAGE_SIZE;
240 if (unlikely((address % sizeof(u32)) != 0))
241 return -EINVAL;
242 address -= key->both.offset;
243
244 if (unlikely(!access_ok(uaddr, sizeof(u32))))
245 return -EFAULT;
246
247 if (unlikely(should_fail_futex(fshared)))
248 return -EFAULT;
249
250 /*
251 * PROCESS_PRIVATE futexes are fast.
252 * As the mm cannot disappear under us and the 'key' only needs
253 * virtual address, we dont even have to find the underlying vma.
254 * Note : We do have to check 'uaddr' is a valid user address,
255 * but access_ok() should be faster than find_vma()
256 */
257 if (!fshared) {
258 /*
259 * On no-MMU, shared futexes are treated as private, therefore
260 * we must not include the current process in the key. Since
261 * there is only one address space, the address is a unique key
262 * on its own.
263 */
264 if (IS_ENABLED(CONFIG_MMU))
265 key->private.mm = mm;
266 else
267 key->private.mm = NULL;
268
269 key->private.address = address;
270 return 0;
271 }
272
273 again:
274 /* Ignore any VERIFY_READ mapping (futex common case) */
275 if (unlikely(should_fail_futex(true)))
276 return -EFAULT;
277
278 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
279 /*
280 * If write access is not required (eg. FUTEX_WAIT), try
281 * and get read-only access.
282 */
283 if (err == -EFAULT && rw == FUTEX_READ) {
284 err = get_user_pages_fast(address, 1, 0, &page);
285 ro = 1;
286 }
287 if (err < 0)
288 return err;
289 else
290 err = 0;
291
292 /*
293 * The treatment of mapping from this point on is critical. The folio
294 * lock protects many things but in this context the folio lock
295 * stabilizes mapping, prevents inode freeing in the shared
296 * file-backed region case and guards against movement to swap cache.
297 *
298 * Strictly speaking the folio lock is not needed in all cases being
299 * considered here and folio lock forces unnecessarily serialization.
300 * From this point on, mapping will be re-verified if necessary and
301 * folio lock will be acquired only if it is unavoidable
302 *
303 * Mapping checks require the folio so it is looked up now. For
304 * anonymous pages, it does not matter if the folio is split
305 * in the future as the key is based on the address. For
306 * filesystem-backed pages, the precise page is required as the
307 * index of the page determines the key.
308 */
309 folio = page_folio(page);
310 mapping = READ_ONCE(folio->mapping);
311
312 /*
313 * If folio->mapping is NULL, then it cannot be an anonymous
314 * page; but it might be the ZERO_PAGE or in the gate area or
315 * in a special mapping (all cases which we are happy to fail);
316 * or it may have been a good file page when get_user_pages_fast
317 * found it, but truncated or holepunched or subjected to
318 * invalidate_complete_page2 before we got the folio lock (also
319 * cases which we are happy to fail). And we hold a reference,
320 * so refcount care in invalidate_inode_page's remove_mapping
321 * prevents drop_caches from setting mapping to NULL beneath us.
322 *
323 * The case we do have to guard against is when memory pressure made
324 * shmem_writepage move it from filecache to swapcache beneath us:
325 * an unlikely race, but we do need to retry for folio->mapping.
326 */
327 if (unlikely(!mapping)) {
328 int shmem_swizzled;
329
330 /*
331 * Folio lock is required to identify which special case above
332 * applies. If this is really a shmem page then the folio lock
333 * will prevent unexpected transitions.
334 */
335 folio_lock(folio);
336 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
337 folio_unlock(folio);
338 folio_put(folio);
339
340 if (shmem_swizzled)
341 goto again;
342
343 return -EFAULT;
344 }
345
346 /*
347 * Private mappings are handled in a simple way.
348 *
349 * If the futex key is stored in anonymous memory, then the associated
350 * object is the mm which is implicitly pinned by the calling process.
351 *
352 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
353 * it's a read-only handle, it's expected that futexes attach to
354 * the object not the particular process.
355 */
356 if (folio_test_anon(folio)) {
357 /*
358 * A RO anonymous page will never change and thus doesn't make
359 * sense for futex operations.
360 */
361 if (unlikely(should_fail_futex(true)) || ro) {
362 err = -EFAULT;
363 goto out;
364 }
365
366 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
367 key->private.mm = mm;
368 key->private.address = address;
369
370 } else {
371 struct inode *inode;
372
373 /*
374 * The associated futex object in this case is the inode and
375 * the folio->mapping must be traversed. Ordinarily this should
376 * be stabilised under folio lock but it's not strictly
377 * necessary in this case as we just want to pin the inode, not
378 * update i_pages or anything like that.
379 *
380 * The RCU read lock is taken as the inode is finally freed
381 * under RCU. If the mapping still matches expectations then the
382 * mapping->host can be safely accessed as being a valid inode.
383 */
384 rcu_read_lock();
385
386 if (READ_ONCE(folio->mapping) != mapping) {
387 rcu_read_unlock();
388 folio_put(folio);
389
390 goto again;
391 }
392
393 inode = READ_ONCE(mapping->host);
394 if (!inode) {
395 rcu_read_unlock();
396 folio_put(folio);
397
398 goto again;
399 }
400
401 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
402 key->shared.i_seq = get_inode_sequence_number(inode);
403 key->shared.pgoff = folio->index + folio_page_idx(folio, page);
404 rcu_read_unlock();
405 }
406
407 out:
408 folio_put(folio);
409 return err;
410 }
411
412 /**
413 * fault_in_user_writeable() - Fault in user address and verify RW access
414 * @uaddr: pointer to faulting user space address
415 *
416 * Slow path to fixup the fault we just took in the atomic write
417 * access to @uaddr.
418 *
419 * We have no generic implementation of a non-destructive write to the
420 * user address. We know that we faulted in the atomic pagefault
421 * disabled section so we can as well avoid the #PF overhead by
422 * calling get_user_pages() right away.
423 */
fault_in_user_writeable(u32 __user * uaddr)424 int fault_in_user_writeable(u32 __user *uaddr)
425 {
426 struct mm_struct *mm = current->mm;
427 int ret;
428
429 mmap_read_lock(mm);
430 ret = fixup_user_fault(mm, (unsigned long)uaddr,
431 FAULT_FLAG_WRITE, NULL);
432 mmap_read_unlock(mm);
433
434 return ret < 0 ? ret : 0;
435 }
436
437 /**
438 * futex_top_waiter() - Return the highest priority waiter on a futex
439 * @hb: the hash bucket the futex_q's reside in
440 * @key: the futex key (to distinguish it from other futex futex_q's)
441 *
442 * Must be called with the hb lock held.
443 */
futex_top_waiter(struct futex_hash_bucket * hb,union futex_key * key)444 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
445 {
446 struct futex_q *this;
447
448 plist_for_each_entry(this, &hb->chain, list) {
449 if (futex_match(&this->key, key))
450 return this;
451 }
452 return NULL;
453 }
454
futex_cmpxchg_value_locked(u32 * curval,u32 __user * uaddr,u32 uval,u32 newval)455 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
456 {
457 int ret;
458
459 pagefault_disable();
460 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
461 pagefault_enable();
462
463 return ret;
464 }
465
futex_get_value_locked(u32 * dest,u32 __user * from)466 int futex_get_value_locked(u32 *dest, u32 __user *from)
467 {
468 int ret;
469
470 pagefault_disable();
471 ret = __get_user(*dest, from);
472 pagefault_enable();
473
474 return ret ? -EFAULT : 0;
475 }
476
477 /**
478 * wait_for_owner_exiting - Block until the owner has exited
479 * @ret: owner's current futex lock status
480 * @exiting: Pointer to the exiting task
481 *
482 * Caller must hold a refcount on @exiting.
483 */
wait_for_owner_exiting(int ret,struct task_struct * exiting)484 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
485 {
486 if (ret != -EBUSY) {
487 WARN_ON_ONCE(exiting);
488 return;
489 }
490
491 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
492 return;
493
494 mutex_lock(&exiting->futex_exit_mutex);
495 /*
496 * No point in doing state checking here. If the waiter got here
497 * while the task was in exec()->exec_futex_release() then it can
498 * have any FUTEX_STATE_* value when the waiter has acquired the
499 * mutex. OK, if running, EXITING or DEAD if it reached exit()
500 * already. Highly unlikely and not a problem. Just one more round
501 * through the futex maze.
502 */
503 mutex_unlock(&exiting->futex_exit_mutex);
504
505 put_task_struct(exiting);
506 }
507
508 /**
509 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
510 * @q: The futex_q to unqueue
511 *
512 * The q->lock_ptr must not be NULL and must be held by the caller.
513 */
__futex_unqueue(struct futex_q * q)514 void __futex_unqueue(struct futex_q *q)
515 {
516 struct futex_hash_bucket *hb;
517
518 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
519 return;
520 lockdep_assert_held(q->lock_ptr);
521
522 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
523 plist_del(&q->list, &hb->chain);
524 futex_hb_waiters_dec(hb);
525 }
526
527 /* The key must be already stored in q->key. */
futex_q_lock(struct futex_q * q)528 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
529 __acquires(&hb->lock)
530 {
531 struct futex_hash_bucket *hb;
532
533 hb = futex_hash(&q->key);
534
535 /*
536 * Increment the counter before taking the lock so that
537 * a potential waker won't miss a to-be-slept task that is
538 * waiting for the spinlock. This is safe as all futex_q_lock()
539 * users end up calling futex_queue(). Similarly, for housekeeping,
540 * decrement the counter at futex_q_unlock() when some error has
541 * occurred and we don't end up adding the task to the list.
542 */
543 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
544
545 q->lock_ptr = &hb->lock;
546
547 spin_lock(&hb->lock);
548 return hb;
549 }
550
futex_q_unlock(struct futex_hash_bucket * hb)551 void futex_q_unlock(struct futex_hash_bucket *hb)
552 __releases(&hb->lock)
553 {
554 spin_unlock(&hb->lock);
555 futex_hb_waiters_dec(hb);
556 }
557
__futex_queue(struct futex_q * q,struct futex_hash_bucket * hb,struct task_struct * task)558 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb,
559 struct task_struct *task)
560 {
561 int prio;
562 bool already_on_hb = false;
563
564 /*
565 * The priority used to register this element is
566 * - either the real thread-priority for the real-time threads
567 * (i.e. threads with a priority lower than MAX_RT_PRIO)
568 * - or MAX_RT_PRIO for non-RT threads.
569 * Thus, all RT-threads are woken first in priority order, and
570 * the others are woken last, in FIFO order.
571 */
572 prio = min(current->normal_prio, MAX_RT_PRIO);
573
574 plist_node_init(&q->list, prio);
575 trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb);
576 if (!already_on_hb)
577 plist_add(&q->list, &hb->chain);
578 q->task = task;
579 }
580
581 /**
582 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
583 * @q: The futex_q to unqueue
584 *
585 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
586 * be paired with exactly one earlier call to futex_queue().
587 *
588 * Return:
589 * - 1 - if the futex_q was still queued (and we removed unqueued it);
590 * - 0 - if the futex_q was already removed by the waking thread
591 */
futex_unqueue(struct futex_q * q)592 int futex_unqueue(struct futex_q *q)
593 {
594 spinlock_t *lock_ptr;
595 int ret = 0;
596
597 /* In the common case we don't take the spinlock, which is nice. */
598 retry:
599 /*
600 * q->lock_ptr can change between this read and the following spin_lock.
601 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
602 * optimizing lock_ptr out of the logic below.
603 */
604 lock_ptr = READ_ONCE(q->lock_ptr);
605 if (lock_ptr != NULL) {
606 spin_lock(lock_ptr);
607 /*
608 * q->lock_ptr can change between reading it and
609 * spin_lock(), causing us to take the wrong lock. This
610 * corrects the race condition.
611 *
612 * Reasoning goes like this: if we have the wrong lock,
613 * q->lock_ptr must have changed (maybe several times)
614 * between reading it and the spin_lock(). It can
615 * change again after the spin_lock() but only if it was
616 * already changed before the spin_lock(). It cannot,
617 * however, change back to the original value. Therefore
618 * we can detect whether we acquired the correct lock.
619 */
620 if (unlikely(lock_ptr != q->lock_ptr)) {
621 spin_unlock(lock_ptr);
622 goto retry;
623 }
624 __futex_unqueue(q);
625
626 BUG_ON(q->pi_state);
627
628 spin_unlock(lock_ptr);
629 ret = 1;
630 }
631
632 return ret;
633 }
634
635 /*
636 * PI futexes can not be requeued and must remove themselves from the hash
637 * bucket. The hash bucket lock (i.e. lock_ptr) is held.
638 */
futex_unqueue_pi(struct futex_q * q)639 void futex_unqueue_pi(struct futex_q *q)
640 {
641 /*
642 * If the lock was not acquired (due to timeout or signal) then the
643 * rt_waiter is removed before futex_q is. If this is observed by
644 * an unlocker after dropping the rtmutex wait lock and before
645 * acquiring the hash bucket lock, then the unlocker dequeues the
646 * futex_q from the hash bucket list to guarantee consistent state
647 * vs. userspace. Therefore the dequeue here must be conditional.
648 */
649 if (!plist_node_empty(&q->list))
650 __futex_unqueue(q);
651
652 BUG_ON(!q->pi_state);
653 put_pi_state(q->pi_state);
654 q->pi_state = NULL;
655 }
656
657 /* Constants for the pending_op argument of handle_futex_death */
658 #define HANDLE_DEATH_PENDING true
659 #define HANDLE_DEATH_LIST false
660
661 /*
662 * Process a futex-list entry, check whether it's owned by the
663 * dying task, and do notification if so:
664 */
handle_futex_death(u32 __user * uaddr,struct task_struct * curr,bool pi,bool pending_op)665 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
666 bool pi, bool pending_op)
667 {
668 u32 uval, nval, mval;
669 pid_t owner;
670 int err;
671
672 /* Futex address must be 32bit aligned */
673 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
674 return -1;
675
676 retry:
677 if (get_user(uval, uaddr))
678 return -1;
679
680 /*
681 * Special case for regular (non PI) futexes. The unlock path in
682 * user space has two race scenarios:
683 *
684 * 1. The unlock path releases the user space futex value and
685 * before it can execute the futex() syscall to wake up
686 * waiters it is killed.
687 *
688 * 2. A woken up waiter is killed before it can acquire the
689 * futex in user space.
690 *
691 * In the second case, the wake up notification could be generated
692 * by the unlock path in user space after setting the futex value
693 * to zero or by the kernel after setting the OWNER_DIED bit below.
694 *
695 * In both cases the TID validation below prevents a wakeup of
696 * potential waiters which can cause these waiters to block
697 * forever.
698 *
699 * In both cases the following conditions are met:
700 *
701 * 1) task->robust_list->list_op_pending != NULL
702 * @pending_op == true
703 * 2) The owner part of user space futex value == 0
704 * 3) Regular futex: @pi == false
705 *
706 * If these conditions are met, it is safe to attempt waking up a
707 * potential waiter without touching the user space futex value and
708 * trying to set the OWNER_DIED bit. If the futex value is zero,
709 * the rest of the user space mutex state is consistent, so a woken
710 * waiter will just take over the uncontended futex. Setting the
711 * OWNER_DIED bit would create inconsistent state and malfunction
712 * of the user space owner died handling. Otherwise, the OWNER_DIED
713 * bit is already set, and the woken waiter is expected to deal with
714 * this.
715 */
716 owner = uval & FUTEX_TID_MASK;
717
718 if (pending_op && !pi && !owner) {
719 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
720 FUTEX_BITSET_MATCH_ANY);
721 return 0;
722 }
723
724 if (owner != task_pid_vnr(curr))
725 return 0;
726
727 /*
728 * Ok, this dying thread is truly holding a futex
729 * of interest. Set the OWNER_DIED bit atomically
730 * via cmpxchg, and if the value had FUTEX_WAITERS
731 * set, wake up a waiter (if any). (We have to do a
732 * futex_wake() even if OWNER_DIED is already set -
733 * to handle the rare but possible case of recursive
734 * thread-death.) The rest of the cleanup is done in
735 * userspace.
736 */
737 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
738
739 /*
740 * We are not holding a lock here, but we want to have
741 * the pagefault_disable/enable() protection because
742 * we want to handle the fault gracefully. If the
743 * access fails we try to fault in the futex with R/W
744 * verification via get_user_pages. get_user() above
745 * does not guarantee R/W access. If that fails we
746 * give up and leave the futex locked.
747 */
748 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
749 switch (err) {
750 case -EFAULT:
751 if (fault_in_user_writeable(uaddr))
752 return -1;
753 goto retry;
754
755 case -EAGAIN:
756 cond_resched();
757 goto retry;
758
759 default:
760 WARN_ON_ONCE(1);
761 return err;
762 }
763 }
764
765 if (nval != uval)
766 goto retry;
767
768 /*
769 * Wake robust non-PI futexes here. The wakeup of
770 * PI futexes happens in exit_pi_state():
771 */
772 if (!pi && (uval & FUTEX_WAITERS)) {
773 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
774 FUTEX_BITSET_MATCH_ANY);
775 }
776
777 return 0;
778 }
779
780 /*
781 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
782 */
fetch_robust_entry(struct robust_list __user ** entry,struct robust_list __user * __user * head,unsigned int * pi)783 static inline int fetch_robust_entry(struct robust_list __user **entry,
784 struct robust_list __user * __user *head,
785 unsigned int *pi)
786 {
787 unsigned long uentry;
788
789 if (get_user(uentry, (unsigned long __user *)head))
790 return -EFAULT;
791
792 *entry = (void __user *)(uentry & ~1UL);
793 *pi = uentry & 1;
794
795 return 0;
796 }
797
798 /*
799 * Walk curr->robust_list (very carefully, it's a userspace list!)
800 * and mark any locks found there dead, and notify any waiters.
801 *
802 * We silently return on any sign of list-walking problem.
803 */
exit_robust_list(struct task_struct * curr)804 static void exit_robust_list(struct task_struct *curr)
805 {
806 struct robust_list_head __user *head = curr->robust_list;
807 struct robust_list __user *entry, *next_entry, *pending;
808 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
809 unsigned int next_pi;
810 unsigned long futex_offset;
811 int rc;
812
813 /*
814 * Fetch the list head (which was registered earlier, via
815 * sys_set_robust_list()):
816 */
817 if (fetch_robust_entry(&entry, &head->list.next, &pi))
818 return;
819 /*
820 * Fetch the relative futex offset:
821 */
822 if (get_user(futex_offset, &head->futex_offset))
823 return;
824 /*
825 * Fetch any possibly pending lock-add first, and handle it
826 * if it exists:
827 */
828 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
829 return;
830
831 next_entry = NULL; /* avoid warning with gcc */
832 while (entry != &head->list) {
833 /*
834 * Fetch the next entry in the list before calling
835 * handle_futex_death:
836 */
837 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
838 /*
839 * A pending lock might already be on the list, so
840 * don't process it twice:
841 */
842 if (entry != pending) {
843 if (handle_futex_death((void __user *)entry + futex_offset,
844 curr, pi, HANDLE_DEATH_LIST))
845 return;
846 }
847 if (rc)
848 return;
849 entry = next_entry;
850 pi = next_pi;
851 /*
852 * Avoid excessively long or circular lists:
853 */
854 if (!--limit)
855 break;
856
857 cond_resched();
858 }
859
860 if (pending) {
861 handle_futex_death((void __user *)pending + futex_offset,
862 curr, pip, HANDLE_DEATH_PENDING);
863 }
864 }
865
866 #ifdef CONFIG_COMPAT
futex_uaddr(struct robust_list __user * entry,compat_long_t futex_offset)867 static void __user *futex_uaddr(struct robust_list __user *entry,
868 compat_long_t futex_offset)
869 {
870 compat_uptr_t base = ptr_to_compat(entry);
871 void __user *uaddr = compat_ptr(base + futex_offset);
872
873 return uaddr;
874 }
875
876 /*
877 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
878 */
879 static inline int
compat_fetch_robust_entry(compat_uptr_t * uentry,struct robust_list __user ** entry,compat_uptr_t __user * head,unsigned int * pi)880 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
881 compat_uptr_t __user *head, unsigned int *pi)
882 {
883 if (get_user(*uentry, head))
884 return -EFAULT;
885
886 *entry = compat_ptr((*uentry) & ~1);
887 *pi = (unsigned int)(*uentry) & 1;
888
889 return 0;
890 }
891
892 /*
893 * Walk curr->robust_list (very carefully, it's a userspace list!)
894 * and mark any locks found there dead, and notify any waiters.
895 *
896 * We silently return on any sign of list-walking problem.
897 */
compat_exit_robust_list(struct task_struct * curr)898 static void compat_exit_robust_list(struct task_struct *curr)
899 {
900 struct compat_robust_list_head __user *head = curr->compat_robust_list;
901 struct robust_list __user *entry, *next_entry, *pending;
902 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
903 unsigned int next_pi;
904 compat_uptr_t uentry, next_uentry, upending;
905 compat_long_t futex_offset;
906 int rc;
907
908 /*
909 * Fetch the list head (which was registered earlier, via
910 * sys_set_robust_list()):
911 */
912 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
913 return;
914 /*
915 * Fetch the relative futex offset:
916 */
917 if (get_user(futex_offset, &head->futex_offset))
918 return;
919 /*
920 * Fetch any possibly pending lock-add first, and handle it
921 * if it exists:
922 */
923 if (compat_fetch_robust_entry(&upending, &pending,
924 &head->list_op_pending, &pip))
925 return;
926
927 next_entry = NULL; /* avoid warning with gcc */
928 while (entry != (struct robust_list __user *) &head->list) {
929 /*
930 * Fetch the next entry in the list before calling
931 * handle_futex_death:
932 */
933 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
934 (compat_uptr_t __user *)&entry->next, &next_pi);
935 /*
936 * A pending lock might already be on the list, so
937 * dont process it twice:
938 */
939 if (entry != pending) {
940 void __user *uaddr = futex_uaddr(entry, futex_offset);
941
942 if (handle_futex_death(uaddr, curr, pi,
943 HANDLE_DEATH_LIST))
944 return;
945 }
946 if (rc)
947 return;
948 uentry = next_uentry;
949 entry = next_entry;
950 pi = next_pi;
951 /*
952 * Avoid excessively long or circular lists:
953 */
954 if (!--limit)
955 break;
956
957 cond_resched();
958 }
959 if (pending) {
960 void __user *uaddr = futex_uaddr(pending, futex_offset);
961
962 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
963 }
964 }
965 #endif
966
967 #ifdef CONFIG_FUTEX_PI
968
969 /*
970 * This task is holding PI mutexes at exit time => bad.
971 * Kernel cleans up PI-state, but userspace is likely hosed.
972 * (Robust-futex cleanup is separate and might save the day for userspace.)
973 */
exit_pi_state_list(struct task_struct * curr)974 static void exit_pi_state_list(struct task_struct *curr)
975 {
976 struct list_head *next, *head = &curr->pi_state_list;
977 struct futex_pi_state *pi_state;
978 struct futex_hash_bucket *hb;
979 union futex_key key = FUTEX_KEY_INIT;
980
981 /*
982 * We are a ZOMBIE and nobody can enqueue itself on
983 * pi_state_list anymore, but we have to be careful
984 * versus waiters unqueueing themselves:
985 */
986 raw_spin_lock_irq(&curr->pi_lock);
987 while (!list_empty(head)) {
988 next = head->next;
989 pi_state = list_entry(next, struct futex_pi_state, list);
990 key = pi_state->key;
991 hb = futex_hash(&key);
992
993 /*
994 * We can race against put_pi_state() removing itself from the
995 * list (a waiter going away). put_pi_state() will first
996 * decrement the reference count and then modify the list, so
997 * its possible to see the list entry but fail this reference
998 * acquire.
999 *
1000 * In that case; drop the locks to let put_pi_state() make
1001 * progress and retry the loop.
1002 */
1003 if (!refcount_inc_not_zero(&pi_state->refcount)) {
1004 raw_spin_unlock_irq(&curr->pi_lock);
1005 cpu_relax();
1006 raw_spin_lock_irq(&curr->pi_lock);
1007 continue;
1008 }
1009 raw_spin_unlock_irq(&curr->pi_lock);
1010
1011 spin_lock(&hb->lock);
1012 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1013 raw_spin_lock(&curr->pi_lock);
1014 /*
1015 * We dropped the pi-lock, so re-check whether this
1016 * task still owns the PI-state:
1017 */
1018 if (head->next != next) {
1019 /* retain curr->pi_lock for the loop invariant */
1020 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1021 spin_unlock(&hb->lock);
1022 put_pi_state(pi_state);
1023 continue;
1024 }
1025
1026 WARN_ON(pi_state->owner != curr);
1027 WARN_ON(list_empty(&pi_state->list));
1028 list_del_init(&pi_state->list);
1029 pi_state->owner = NULL;
1030
1031 raw_spin_unlock(&curr->pi_lock);
1032 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1033 spin_unlock(&hb->lock);
1034
1035 rt_mutex_futex_unlock(&pi_state->pi_mutex);
1036 put_pi_state(pi_state);
1037
1038 raw_spin_lock_irq(&curr->pi_lock);
1039 }
1040 raw_spin_unlock_irq(&curr->pi_lock);
1041 }
1042 #else
exit_pi_state_list(struct task_struct * curr)1043 static inline void exit_pi_state_list(struct task_struct *curr) { }
1044 #endif
1045
futex_cleanup(struct task_struct * tsk)1046 static void futex_cleanup(struct task_struct *tsk)
1047 {
1048 if (unlikely(tsk->robust_list)) {
1049 exit_robust_list(tsk);
1050 tsk->robust_list = NULL;
1051 }
1052
1053 #ifdef CONFIG_COMPAT
1054 if (unlikely(tsk->compat_robust_list)) {
1055 compat_exit_robust_list(tsk);
1056 tsk->compat_robust_list = NULL;
1057 }
1058 #endif
1059
1060 if (unlikely(!list_empty(&tsk->pi_state_list)))
1061 exit_pi_state_list(tsk);
1062 }
1063
1064 /**
1065 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1066 * @tsk: task to set the state on
1067 *
1068 * Set the futex exit state of the task lockless. The futex waiter code
1069 * observes that state when a task is exiting and loops until the task has
1070 * actually finished the futex cleanup. The worst case for this is that the
1071 * waiter runs through the wait loop until the state becomes visible.
1072 *
1073 * This is called from the recursive fault handling path in make_task_dead().
1074 *
1075 * This is best effort. Either the futex exit code has run already or
1076 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1077 * take it over. If not, the problem is pushed back to user space. If the
1078 * futex exit code did not run yet, then an already queued waiter might
1079 * block forever, but there is nothing which can be done about that.
1080 */
futex_exit_recursive(struct task_struct * tsk)1081 void futex_exit_recursive(struct task_struct *tsk)
1082 {
1083 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1084 if (tsk->futex_state == FUTEX_STATE_EXITING)
1085 mutex_unlock(&tsk->futex_exit_mutex);
1086 tsk->futex_state = FUTEX_STATE_DEAD;
1087 }
1088
futex_cleanup_begin(struct task_struct * tsk)1089 static void futex_cleanup_begin(struct task_struct *tsk)
1090 {
1091 /*
1092 * Prevent various race issues against a concurrent incoming waiter
1093 * including live locks by forcing the waiter to block on
1094 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1095 * attach_to_pi_owner().
1096 */
1097 mutex_lock(&tsk->futex_exit_mutex);
1098
1099 /*
1100 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1101 *
1102 * This ensures that all subsequent checks of tsk->futex_state in
1103 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1104 * tsk->pi_lock held.
1105 *
1106 * It guarantees also that a pi_state which was queued right before
1107 * the state change under tsk->pi_lock by a concurrent waiter must
1108 * be observed in exit_pi_state_list().
1109 */
1110 raw_spin_lock_irq(&tsk->pi_lock);
1111 tsk->futex_state = FUTEX_STATE_EXITING;
1112 raw_spin_unlock_irq(&tsk->pi_lock);
1113 }
1114
futex_cleanup_end(struct task_struct * tsk,int state)1115 static void futex_cleanup_end(struct task_struct *tsk, int state)
1116 {
1117 /*
1118 * Lockless store. The only side effect is that an observer might
1119 * take another loop until it becomes visible.
1120 */
1121 tsk->futex_state = state;
1122 /*
1123 * Drop the exit protection. This unblocks waiters which observed
1124 * FUTEX_STATE_EXITING to reevaluate the state.
1125 */
1126 mutex_unlock(&tsk->futex_exit_mutex);
1127 }
1128
futex_exec_release(struct task_struct * tsk)1129 void futex_exec_release(struct task_struct *tsk)
1130 {
1131 /*
1132 * The state handling is done for consistency, but in the case of
1133 * exec() there is no way to prevent further damage as the PID stays
1134 * the same. But for the unlikely and arguably buggy case that a
1135 * futex is held on exec(), this provides at least as much state
1136 * consistency protection which is possible.
1137 */
1138 futex_cleanup_begin(tsk);
1139 futex_cleanup(tsk);
1140 /*
1141 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1142 * exec a new binary.
1143 */
1144 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1145 }
1146
futex_exit_release(struct task_struct * tsk)1147 void futex_exit_release(struct task_struct *tsk)
1148 {
1149 futex_cleanup_begin(tsk);
1150 futex_cleanup(tsk);
1151 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1152 }
1153
futex_init(void)1154 static int __init futex_init(void)
1155 {
1156 unsigned int futex_shift;
1157 unsigned long i;
1158
1159 #ifdef CONFIG_BASE_SMALL
1160 futex_hashsize = 16;
1161 #else
1162 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1163 #endif
1164
1165 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1166 futex_hashsize, 0, 0,
1167 &futex_shift, NULL,
1168 futex_hashsize, futex_hashsize);
1169 futex_hashsize = 1UL << futex_shift;
1170
1171 for (i = 0; i < futex_hashsize; i++) {
1172 atomic_set(&futex_queues[i].waiters, 0);
1173 plist_head_init(&futex_queues[i].chain);
1174 spin_lock_init(&futex_queues[i].lock);
1175 }
1176
1177 return 0;
1178 }
1179 core_initcall(futex_init);
1180