1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/compat.h>
4 #include <linux/syscalls.h>
5 #include <linux/time_namespace.h>
6
7 #include "futex.h"
8 #include <trace/hooks/futex.h>
9
10 /*
11 * Support for robust futexes: the kernel cleans up held futexes at
12 * thread exit time.
13 *
14 * Implementation: user-space maintains a per-thread list of locks it
15 * is holding. Upon do_exit(), the kernel carefully walks this list,
16 * and marks all locks that are owned by this thread with the
17 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
18 * always manipulated with the lock held, so the list is private and
19 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
20 * field, to allow the kernel to clean up if the thread dies after
21 * acquiring the lock, but just before it could have added itself to
22 * the list. There can only be one such pending lock.
23 */
24
25 /**
26 * sys_set_robust_list() - Set the robust-futex list head of a task
27 * @head: pointer to the list-head
28 * @len: length of the list-head, as userspace expects
29 */
SYSCALL_DEFINE2(set_robust_list,struct robust_list_head __user *,head,size_t,len)30 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
31 size_t, len)
32 {
33 /*
34 * The kernel knows only one size for now:
35 */
36 if (unlikely(len != sizeof(*head)))
37 return -EINVAL;
38
39 current->robust_list = head;
40
41 return 0;
42 }
43
44 /**
45 * sys_get_robust_list() - Get the robust-futex list head of a task
46 * @pid: pid of the process [zero for current task]
47 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
48 * @len_ptr: pointer to a length field, the kernel fills in the header size
49 */
SYSCALL_DEFINE3(get_robust_list,int,pid,struct robust_list_head __user * __user *,head_ptr,size_t __user *,len_ptr)50 SYSCALL_DEFINE3(get_robust_list, int, pid,
51 struct robust_list_head __user * __user *, head_ptr,
52 size_t __user *, len_ptr)
53 {
54 struct robust_list_head __user *head;
55 unsigned long ret;
56 struct task_struct *p;
57
58 rcu_read_lock();
59
60 ret = -ESRCH;
61 if (!pid)
62 p = current;
63 else {
64 p = find_task_by_vpid(pid);
65 if (!p)
66 goto err_unlock;
67 }
68
69 ret = -EPERM;
70 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
71 goto err_unlock;
72
73 head = p->robust_list;
74 rcu_read_unlock();
75
76 if (put_user(sizeof(*head), len_ptr))
77 return -EFAULT;
78 return put_user(head, head_ptr);
79
80 err_unlock:
81 rcu_read_unlock();
82
83 return ret;
84 }
85
do_futex(u32 __user * uaddr,int op,u32 val,ktime_t * timeout,u32 __user * uaddr2,u32 val2,u32 val3)86 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
87 u32 __user *uaddr2, u32 val2, u32 val3)
88 {
89 int cmd = op & FUTEX_CMD_MASK;
90 unsigned int flags = 0;
91
92 if (!(op & FUTEX_PRIVATE_FLAG))
93 flags |= FLAGS_SHARED;
94
95 if (op & FUTEX_CLOCK_REALTIME) {
96 flags |= FLAGS_CLOCKRT;
97 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI &&
98 cmd != FUTEX_LOCK_PI2)
99 return -ENOSYS;
100 }
101
102 trace_android_vh_do_futex(cmd, &flags, uaddr2);
103 switch (cmd) {
104 case FUTEX_WAIT:
105 val3 = FUTEX_BITSET_MATCH_ANY;
106 fallthrough;
107 case FUTEX_WAIT_BITSET:
108 return futex_wait(uaddr, flags, val, timeout, val3);
109 case FUTEX_WAKE:
110 val3 = FUTEX_BITSET_MATCH_ANY;
111 fallthrough;
112 case FUTEX_WAKE_BITSET:
113 return futex_wake(uaddr, flags, val, val3);
114 case FUTEX_REQUEUE:
115 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
116 case FUTEX_CMP_REQUEUE:
117 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
118 case FUTEX_WAKE_OP:
119 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
120 case FUTEX_LOCK_PI:
121 flags |= FLAGS_CLOCKRT;
122 fallthrough;
123 case FUTEX_LOCK_PI2:
124 return futex_lock_pi(uaddr, flags, timeout, 0);
125 case FUTEX_UNLOCK_PI:
126 return futex_unlock_pi(uaddr, flags);
127 case FUTEX_TRYLOCK_PI:
128 return futex_lock_pi(uaddr, flags, NULL, 1);
129 case FUTEX_WAIT_REQUEUE_PI:
130 val3 = FUTEX_BITSET_MATCH_ANY;
131 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
132 uaddr2);
133 case FUTEX_CMP_REQUEUE_PI:
134 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
135 }
136 return -ENOSYS;
137 }
138
futex_cmd_has_timeout(u32 cmd)139 static __always_inline bool futex_cmd_has_timeout(u32 cmd)
140 {
141 switch (cmd) {
142 case FUTEX_WAIT:
143 case FUTEX_LOCK_PI:
144 case FUTEX_LOCK_PI2:
145 case FUTEX_WAIT_BITSET:
146 case FUTEX_WAIT_REQUEUE_PI:
147 return true;
148 }
149 return false;
150 }
151
152 static __always_inline int
futex_init_timeout(u32 cmd,u32 op,struct timespec64 * ts,ktime_t * t)153 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t)
154 {
155 if (!timespec64_valid(ts))
156 return -EINVAL;
157
158 *t = timespec64_to_ktime(*ts);
159 if (cmd == FUTEX_WAIT)
160 *t = ktime_add_safe(ktime_get(), *t);
161 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
162 *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t);
163 return 0;
164 }
165
SYSCALL_DEFINE6(futex,u32 __user *,uaddr,int,op,u32,val,const struct __kernel_timespec __user *,utime,u32 __user *,uaddr2,u32,val3)166 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
167 const struct __kernel_timespec __user *, utime,
168 u32 __user *, uaddr2, u32, val3)
169 {
170 int ret, cmd = op & FUTEX_CMD_MASK;
171 ktime_t t, *tp = NULL;
172 struct timespec64 ts;
173
174 if (utime && futex_cmd_has_timeout(cmd)) {
175 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
176 return -EFAULT;
177 if (get_timespec64(&ts, utime))
178 return -EFAULT;
179 ret = futex_init_timeout(cmd, op, &ts, &t);
180 if (ret)
181 return ret;
182 tp = &t;
183 }
184
185 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
186 }
187
188 /* Mask of available flags for each futex in futex_waitv list */
189 #define FUTEXV_WAITER_MASK (FUTEX_32 | FUTEX_PRIVATE_FLAG)
190
191 /**
192 * futex_parse_waitv - Parse a waitv array from userspace
193 * @futexv: Kernel side list of waiters to be filled
194 * @uwaitv: Userspace list to be parsed
195 * @nr_futexes: Length of futexv
196 *
197 * Return: Error code on failure, 0 on success
198 */
futex_parse_waitv(struct futex_vector * futexv,struct futex_waitv __user * uwaitv,unsigned int nr_futexes)199 static int futex_parse_waitv(struct futex_vector *futexv,
200 struct futex_waitv __user *uwaitv,
201 unsigned int nr_futexes)
202 {
203 struct futex_waitv aux;
204 unsigned int i;
205
206 for (i = 0; i < nr_futexes; i++) {
207 if (copy_from_user(&aux, &uwaitv[i], sizeof(aux)))
208 return -EFAULT;
209
210 if ((aux.flags & ~FUTEXV_WAITER_MASK) || aux.__reserved)
211 return -EINVAL;
212
213 if (!(aux.flags & FUTEX_32))
214 return -EINVAL;
215
216 futexv[i].w.flags = aux.flags;
217 futexv[i].w.val = aux.val;
218 futexv[i].w.uaddr = aux.uaddr;
219 futexv[i].q = futex_q_init;
220 }
221
222 return 0;
223 }
224
225 /**
226 * sys_futex_waitv - Wait on a list of futexes
227 * @waiters: List of futexes to wait on
228 * @nr_futexes: Length of futexv
229 * @flags: Flag for timeout (monotonic/realtime)
230 * @timeout: Optional absolute timeout.
231 * @clockid: Clock to be used for the timeout, realtime or monotonic.
232 *
233 * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
234 * if a futex_wake() is performed at any uaddr. The syscall returns immediately
235 * if any waiter has *uaddr != val. *timeout is an optional timeout value for
236 * the operation. Each waiter has individual flags. The `flags` argument for
237 * the syscall should be used solely for specifying the timeout as realtime, if
238 * needed. Flags for private futexes, sizes, etc. should be used on the
239 * individual flags of each waiter.
240 *
241 * Returns the array index of one of the woken futexes. No further information
242 * is provided: any number of other futexes may also have been woken by the
243 * same event, and if more than one futex was woken, the retrned index may
244 * refer to any one of them. (It is not necessaryily the futex with the
245 * smallest index, nor the one most recently woken, nor...)
246 */
247
SYSCALL_DEFINE5(futex_waitv,struct futex_waitv __user *,waiters,unsigned int,nr_futexes,unsigned int,flags,struct __kernel_timespec __user *,timeout,clockid_t,clockid)248 SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
249 unsigned int, nr_futexes, unsigned int, flags,
250 struct __kernel_timespec __user *, timeout, clockid_t, clockid)
251 {
252 struct hrtimer_sleeper to;
253 struct futex_vector *futexv;
254 struct timespec64 ts;
255 ktime_t time;
256 int ret;
257
258 /* This syscall supports no flags for now */
259 if (flags)
260 return -EINVAL;
261
262 if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
263 return -EINVAL;
264
265 if (timeout) {
266 int flag_clkid = 0, flag_init = 0;
267
268 if (clockid == CLOCK_REALTIME) {
269 flag_clkid = FLAGS_CLOCKRT;
270 flag_init = FUTEX_CLOCK_REALTIME;
271 }
272
273 if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC)
274 return -EINVAL;
275
276 if (get_timespec64(&ts, timeout))
277 return -EFAULT;
278
279 /*
280 * Since there's no opcode for futex_waitv, use
281 * FUTEX_WAIT_BITSET that uses absolute timeout as well
282 */
283 ret = futex_init_timeout(FUTEX_WAIT_BITSET, flag_init, &ts, &time);
284 if (ret)
285 return ret;
286
287 futex_setup_timer(&time, &to, flag_clkid, 0);
288 }
289
290 futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
291 if (!futexv) {
292 ret = -ENOMEM;
293 goto destroy_timer;
294 }
295
296 ret = futex_parse_waitv(futexv, waiters, nr_futexes);
297 if (!ret)
298 ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
299
300 kfree(futexv);
301
302 destroy_timer:
303 if (timeout) {
304 hrtimer_cancel(&to.timer);
305 destroy_hrtimer_on_stack(&to.timer);
306 }
307 return ret;
308 }
309
310 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(set_robust_list,struct compat_robust_list_head __user *,head,compat_size_t,len)311 COMPAT_SYSCALL_DEFINE2(set_robust_list,
312 struct compat_robust_list_head __user *, head,
313 compat_size_t, len)
314 {
315 if (unlikely(len != sizeof(*head)))
316 return -EINVAL;
317
318 current->compat_robust_list = head;
319
320 return 0;
321 }
322
COMPAT_SYSCALL_DEFINE3(get_robust_list,int,pid,compat_uptr_t __user *,head_ptr,compat_size_t __user *,len_ptr)323 COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
324 compat_uptr_t __user *, head_ptr,
325 compat_size_t __user *, len_ptr)
326 {
327 struct compat_robust_list_head __user *head;
328 unsigned long ret;
329 struct task_struct *p;
330
331 rcu_read_lock();
332
333 ret = -ESRCH;
334 if (!pid)
335 p = current;
336 else {
337 p = find_task_by_vpid(pid);
338 if (!p)
339 goto err_unlock;
340 }
341
342 ret = -EPERM;
343 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
344 goto err_unlock;
345
346 head = p->compat_robust_list;
347 rcu_read_unlock();
348
349 if (put_user(sizeof(*head), len_ptr))
350 return -EFAULT;
351 return put_user(ptr_to_compat(head), head_ptr);
352
353 err_unlock:
354 rcu_read_unlock();
355
356 return ret;
357 }
358 #endif /* CONFIG_COMPAT */
359
360 #ifdef CONFIG_COMPAT_32BIT_TIME
SYSCALL_DEFINE6(futex_time32,u32 __user *,uaddr,int,op,u32,val,const struct old_timespec32 __user *,utime,u32 __user *,uaddr2,u32,val3)361 SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
362 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
363 u32, val3)
364 {
365 int ret, cmd = op & FUTEX_CMD_MASK;
366 ktime_t t, *tp = NULL;
367 struct timespec64 ts;
368
369 if (utime && futex_cmd_has_timeout(cmd)) {
370 if (get_old_timespec32(&ts, utime))
371 return -EFAULT;
372 ret = futex_init_timeout(cmd, op, &ts, &t);
373 if (ret)
374 return ret;
375 tp = &t;
376 }
377
378 return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3);
379 }
380 #endif /* CONFIG_COMPAT_32BIT_TIME */
381
382