1 /*
2 * linux/kernel/ptrace.c
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
9
10 #include <linux/capability.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
20 #include <linux/uio.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24 #include <linux/uaccess.h>
25 #include <linux/regset.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/cn_proc.h>
28 #include <linux/compat.h>
29
30
31 /*
32 * ptrace a task: make the debugger its new parent and
33 * move it to the ptrace list.
34 *
35 * Must be called with the tasklist lock write-held.
36 */
__ptrace_link(struct task_struct * child,struct task_struct * new_parent)37 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
38 {
39 BUG_ON(!list_empty(&child->ptrace_entry));
40 list_add(&child->ptrace_entry, &new_parent->ptraced);
41 child->parent = new_parent;
42 }
43
44 /**
45 * __ptrace_unlink - unlink ptracee and restore its execution state
46 * @child: ptracee to be unlinked
47 *
48 * Remove @child from the ptrace list, move it back to the original parent,
49 * and restore the execution state so that it conforms to the group stop
50 * state.
51 *
52 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
53 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
54 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
55 * If the ptracer is exiting, the ptracee can be in any state.
56 *
57 * After detach, the ptracee should be in a state which conforms to the
58 * group stop. If the group is stopped or in the process of stopping, the
59 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
60 * up from TASK_TRACED.
61 *
62 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
63 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
64 * to but in the opposite direction of what happens while attaching to a
65 * stopped task. However, in this direction, the intermediate RUNNING
66 * state is not hidden even from the current ptracer and if it immediately
67 * re-attaches and performs a WNOHANG wait(2), it may fail.
68 *
69 * CONTEXT:
70 * write_lock_irq(tasklist_lock)
71 */
__ptrace_unlink(struct task_struct * child)72 void __ptrace_unlink(struct task_struct *child)
73 {
74 BUG_ON(!child->ptrace);
75
76 child->ptrace = 0;
77 child->parent = child->real_parent;
78 list_del_init(&child->ptrace_entry);
79
80 spin_lock(&child->sighand->siglock);
81
82 /*
83 * Clear all pending traps and TRAPPING. TRAPPING should be
84 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
85 */
86 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
87 task_clear_jobctl_trapping(child);
88
89 /*
90 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
91 * @child isn't dead.
92 */
93 if (!(child->flags & PF_EXITING) &&
94 (child->signal->flags & SIGNAL_STOP_STOPPED ||
95 child->signal->group_stop_count)) {
96 child->jobctl |= JOBCTL_STOP_PENDING;
97
98 /*
99 * This is only possible if this thread was cloned by the
100 * traced task running in the stopped group, set the signal
101 * for the future reports.
102 * FIXME: we should change ptrace_init_task() to handle this
103 * case.
104 */
105 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
106 child->jobctl |= SIGSTOP;
107 }
108
109 /*
110 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
111 * @child in the butt. Note that @resume should be used iff @child
112 * is in TASK_TRACED; otherwise, we might unduly disrupt
113 * TASK_KILLABLE sleeps.
114 */
115 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
116 ptrace_signal_wake_up(child, true);
117
118 spin_unlock(&child->sighand->siglock);
119 }
120
121 /* Ensure that nothing can wake it up, even SIGKILL */
ptrace_freeze_traced(struct task_struct * task)122 static bool ptrace_freeze_traced(struct task_struct *task)
123 {
124 bool ret = false;
125
126 /* Lockless, nobody but us can set this flag */
127 if (task->jobctl & JOBCTL_LISTENING)
128 return ret;
129
130 spin_lock_irq(&task->sighand->siglock);
131 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
132 task->state = __TASK_TRACED;
133 ret = true;
134 }
135 spin_unlock_irq(&task->sighand->siglock);
136
137 return ret;
138 }
139
ptrace_unfreeze_traced(struct task_struct * task)140 static void ptrace_unfreeze_traced(struct task_struct *task)
141 {
142 if (task->state != __TASK_TRACED)
143 return;
144
145 WARN_ON(!task->ptrace || task->parent != current);
146
147 /*
148 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
149 * Recheck state under the lock to close this race.
150 */
151 spin_lock_irq(&task->sighand->siglock);
152 if (task->state == __TASK_TRACED) {
153 if (__fatal_signal_pending(task))
154 wake_up_state(task, __TASK_TRACED);
155 else
156 task->state = TASK_TRACED;
157 }
158 spin_unlock_irq(&task->sighand->siglock);
159 }
160
161 /**
162 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
163 * @child: ptracee to check for
164 * @ignore_state: don't check whether @child is currently %TASK_TRACED
165 *
166 * Check whether @child is being ptraced by %current and ready for further
167 * ptrace operations. If @ignore_state is %false, @child also should be in
168 * %TASK_TRACED state and on return the child is guaranteed to be traced
169 * and not executing. If @ignore_state is %true, @child can be in any
170 * state.
171 *
172 * CONTEXT:
173 * Grabs and releases tasklist_lock and @child->sighand->siglock.
174 *
175 * RETURNS:
176 * 0 on success, -ESRCH if %child is not ready.
177 */
ptrace_check_attach(struct task_struct * child,bool ignore_state)178 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
179 {
180 int ret = -ESRCH;
181
182 /*
183 * We take the read lock around doing both checks to close a
184 * possible race where someone else was tracing our child and
185 * detached between these two checks. After this locked check,
186 * we are sure that this is our traced child and that can only
187 * be changed by us so it's not changing right after this.
188 */
189 read_lock(&tasklist_lock);
190 if (child->ptrace && child->parent == current) {
191 WARN_ON(child->state == __TASK_TRACED);
192 /*
193 * child->sighand can't be NULL, release_task()
194 * does ptrace_unlink() before __exit_signal().
195 */
196 if (ignore_state || ptrace_freeze_traced(child))
197 ret = 0;
198 }
199 read_unlock(&tasklist_lock);
200
201 if (!ret && !ignore_state) {
202 if (!wait_task_inactive(child, __TASK_TRACED)) {
203 /*
204 * This can only happen if may_ptrace_stop() fails and
205 * ptrace_stop() changes ->state back to TASK_RUNNING,
206 * so we should not worry about leaking __TASK_TRACED.
207 */
208 WARN_ON(child->state == __TASK_TRACED);
209 ret = -ESRCH;
210 }
211 }
212
213 return ret;
214 }
215
ptrace_has_cap(struct user_namespace * ns,unsigned int mode)216 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
217 {
218 if (mode & PTRACE_MODE_NOAUDIT)
219 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
220 else
221 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
222 }
223
224 /* Returns 0 on success, -errno on denial. */
__ptrace_may_access(struct task_struct * task,unsigned int mode)225 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
226 {
227 const struct cred *cred = current_cred(), *tcred;
228 int dumpable = 0;
229 kuid_t caller_uid;
230 kgid_t caller_gid;
231
232 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
233 // Do not print the warning here as it will spam when doing "adb bugreport"
234 // with a userdebug Android system image.
235 // BUG: 69808686
236 //WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
237 return -EPERM;
238 }
239
240 /* May we inspect the given task?
241 * This check is used both for attaching with ptrace
242 * and for allowing access to sensitive information in /proc.
243 *
244 * ptrace_attach denies several cases that /proc allows
245 * because setting up the necessary parent/child relationship
246 * or halting the specified task is impossible.
247 */
248
249 /* Don't let security modules deny introspection */
250 if (same_thread_group(task, current))
251 return 0;
252 rcu_read_lock();
253 if (mode & PTRACE_MODE_FSCREDS) {
254 caller_uid = cred->fsuid;
255 caller_gid = cred->fsgid;
256 } else {
257 /*
258 * Using the euid would make more sense here, but something
259 * in userland might rely on the old behavior, and this
260 * shouldn't be a security problem since
261 * PTRACE_MODE_REALCREDS implies that the caller explicitly
262 * used a syscall that requests access to another process
263 * (and not a filesystem syscall to procfs).
264 */
265 caller_uid = cred->uid;
266 caller_gid = cred->gid;
267 }
268 tcred = __task_cred(task);
269 if (uid_eq(caller_uid, tcred->euid) &&
270 uid_eq(caller_uid, tcred->suid) &&
271 uid_eq(caller_uid, tcred->uid) &&
272 gid_eq(caller_gid, tcred->egid) &&
273 gid_eq(caller_gid, tcred->sgid) &&
274 gid_eq(caller_gid, tcred->gid))
275 goto ok;
276 if (ptrace_has_cap(tcred->user_ns, mode))
277 goto ok;
278 rcu_read_unlock();
279 return -EPERM;
280 ok:
281 rcu_read_unlock();
282 smp_rmb();
283 if (task->mm)
284 dumpable = get_dumpable(task->mm);
285 rcu_read_lock();
286 if (dumpable != SUID_DUMP_USER &&
287 !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
288 rcu_read_unlock();
289 return -EPERM;
290 }
291 rcu_read_unlock();
292
293 return security_ptrace_access_check(task, mode);
294 }
295
ptrace_may_access(struct task_struct * task,unsigned int mode)296 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
297 {
298 int err;
299 task_lock(task);
300 err = __ptrace_may_access(task, mode);
301 task_unlock(task);
302 return !err;
303 }
304
ptrace_attach(struct task_struct * task,long request,unsigned long addr,unsigned long flags)305 static int ptrace_attach(struct task_struct *task, long request,
306 unsigned long addr,
307 unsigned long flags)
308 {
309 bool seize = (request == PTRACE_SEIZE);
310 int retval;
311
312 retval = -EIO;
313 if (seize) {
314 if (addr != 0)
315 goto out;
316 if (flags & ~(unsigned long)PTRACE_O_MASK)
317 goto out;
318 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
319 } else {
320 flags = PT_PTRACED;
321 }
322
323 audit_ptrace(task);
324
325 retval = -EPERM;
326 if (unlikely(task->flags & PF_KTHREAD))
327 goto out;
328 if (same_thread_group(task, current))
329 goto out;
330
331 /*
332 * Protect exec's credential calculations against our interference;
333 * SUID, SGID and LSM creds get determined differently
334 * under ptrace.
335 */
336 retval = -ERESTARTNOINTR;
337 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
338 goto out;
339
340 task_lock(task);
341 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
342 task_unlock(task);
343 if (retval)
344 goto unlock_creds;
345
346 write_lock_irq(&tasklist_lock);
347 retval = -EPERM;
348 if (unlikely(task->exit_state))
349 goto unlock_tasklist;
350 if (task->ptrace)
351 goto unlock_tasklist;
352
353 if (seize)
354 flags |= PT_SEIZED;
355 rcu_read_lock();
356 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
357 flags |= PT_PTRACE_CAP;
358 rcu_read_unlock();
359 task->ptrace = flags;
360
361 __ptrace_link(task, current);
362
363 /* SEIZE doesn't trap tracee on attach */
364 if (!seize)
365 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
366
367 spin_lock(&task->sighand->siglock);
368
369 /*
370 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
371 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
372 * will be cleared if the child completes the transition or any
373 * event which clears the group stop states happens. We'll wait
374 * for the transition to complete before returning from this
375 * function.
376 *
377 * This hides STOPPED -> RUNNING -> TRACED transition from the
378 * attaching thread but a different thread in the same group can
379 * still observe the transient RUNNING state. IOW, if another
380 * thread's WNOHANG wait(2) on the stopped tracee races against
381 * ATTACH, the wait(2) may fail due to the transient RUNNING.
382 *
383 * The following task_is_stopped() test is safe as both transitions
384 * in and out of STOPPED are protected by siglock.
385 */
386 if (task_is_stopped(task) &&
387 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
388 signal_wake_up_state(task, __TASK_STOPPED);
389
390 spin_unlock(&task->sighand->siglock);
391
392 retval = 0;
393 unlock_tasklist:
394 write_unlock_irq(&tasklist_lock);
395 unlock_creds:
396 mutex_unlock(&task->signal->cred_guard_mutex);
397 out:
398 if (!retval) {
399 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
400 TASK_UNINTERRUPTIBLE);
401 proc_ptrace_connector(task, PTRACE_ATTACH);
402 }
403
404 return retval;
405 }
406
407 /**
408 * ptrace_traceme -- helper for PTRACE_TRACEME
409 *
410 * Performs checks and sets PT_PTRACED.
411 * Should be used by all ptrace implementations for PTRACE_TRACEME.
412 */
ptrace_traceme(void)413 static int ptrace_traceme(void)
414 {
415 int ret = -EPERM;
416
417 write_lock_irq(&tasklist_lock);
418 /* Are we already being traced? */
419 if (!current->ptrace) {
420 ret = security_ptrace_traceme(current->parent);
421 /*
422 * Check PF_EXITING to ensure ->real_parent has not passed
423 * exit_ptrace(). Otherwise we don't report the error but
424 * pretend ->real_parent untraces us right after return.
425 */
426 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
427 current->ptrace = PT_PTRACED;
428 __ptrace_link(current, current->real_parent);
429 }
430 }
431 write_unlock_irq(&tasklist_lock);
432
433 return ret;
434 }
435
436 /*
437 * Called with irqs disabled, returns true if childs should reap themselves.
438 */
ignoring_children(struct sighand_struct * sigh)439 static int ignoring_children(struct sighand_struct *sigh)
440 {
441 int ret;
442 spin_lock(&sigh->siglock);
443 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
444 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
445 spin_unlock(&sigh->siglock);
446 return ret;
447 }
448
449 /*
450 * Called with tasklist_lock held for writing.
451 * Unlink a traced task, and clean it up if it was a traced zombie.
452 * Return true if it needs to be reaped with release_task().
453 * (We can't call release_task() here because we already hold tasklist_lock.)
454 *
455 * If it's a zombie, our attachedness prevented normal parent notification
456 * or self-reaping. Do notification now if it would have happened earlier.
457 * If it should reap itself, return true.
458 *
459 * If it's our own child, there is no notification to do. But if our normal
460 * children self-reap, then this child was prevented by ptrace and we must
461 * reap it now, in that case we must also wake up sub-threads sleeping in
462 * do_wait().
463 */
__ptrace_detach(struct task_struct * tracer,struct task_struct * p)464 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
465 {
466 bool dead;
467
468 __ptrace_unlink(p);
469
470 if (p->exit_state != EXIT_ZOMBIE)
471 return false;
472
473 dead = !thread_group_leader(p);
474
475 if (!dead && thread_group_empty(p)) {
476 if (!same_thread_group(p->real_parent, tracer))
477 dead = do_notify_parent(p, p->exit_signal);
478 else if (ignoring_children(tracer->sighand)) {
479 __wake_up_parent(p, tracer);
480 dead = true;
481 }
482 }
483 /* Mark it as in the process of being reaped. */
484 if (dead)
485 p->exit_state = EXIT_DEAD;
486 return dead;
487 }
488
ptrace_detach(struct task_struct * child,unsigned int data)489 static int ptrace_detach(struct task_struct *child, unsigned int data)
490 {
491 bool dead = false;
492
493 if (!valid_signal(data))
494 return -EIO;
495
496 /* Architecture-specific hardware disable .. */
497 ptrace_disable(child);
498 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
499
500 write_lock_irq(&tasklist_lock);
501 /*
502 * This child can be already killed. Make sure de_thread() or
503 * our sub-thread doing do_wait() didn't do release_task() yet.
504 */
505 if (child->ptrace) {
506 child->exit_code = data;
507 dead = __ptrace_detach(current, child);
508 }
509 write_unlock_irq(&tasklist_lock);
510
511 proc_ptrace_connector(child, PTRACE_DETACH);
512 if (unlikely(dead))
513 release_task(child);
514
515 return 0;
516 }
517
518 /*
519 * Detach all tasks we were using ptrace on. Called with tasklist held
520 * for writing, and returns with it held too. But note it can release
521 * and reacquire the lock.
522 */
exit_ptrace(struct task_struct * tracer)523 void exit_ptrace(struct task_struct *tracer)
524 __releases(&tasklist_lock)
525 __acquires(&tasklist_lock)
526 {
527 struct task_struct *p, *n;
528 LIST_HEAD(ptrace_dead);
529
530 if (likely(list_empty(&tracer->ptraced)))
531 return;
532
533 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
534 if (unlikely(p->ptrace & PT_EXITKILL))
535 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
536
537 if (__ptrace_detach(tracer, p))
538 list_add(&p->ptrace_entry, &ptrace_dead);
539 }
540
541 write_unlock_irq(&tasklist_lock);
542 BUG_ON(!list_empty(&tracer->ptraced));
543
544 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
545 list_del_init(&p->ptrace_entry);
546 release_task(p);
547 }
548
549 write_lock_irq(&tasklist_lock);
550 }
551
ptrace_readdata(struct task_struct * tsk,unsigned long src,char __user * dst,int len)552 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
553 {
554 int copied = 0;
555
556 while (len > 0) {
557 char buf[128];
558 int this_len, retval;
559
560 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
561 retval = access_process_vm(tsk, src, buf, this_len, 0);
562 if (!retval) {
563 if (copied)
564 break;
565 return -EIO;
566 }
567 if (copy_to_user(dst, buf, retval))
568 return -EFAULT;
569 copied += retval;
570 src += retval;
571 dst += retval;
572 len -= retval;
573 }
574 return copied;
575 }
576
ptrace_writedata(struct task_struct * tsk,char __user * src,unsigned long dst,int len)577 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
578 {
579 int copied = 0;
580
581 while (len > 0) {
582 char buf[128];
583 int this_len, retval;
584
585 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
586 if (copy_from_user(buf, src, this_len))
587 return -EFAULT;
588 retval = access_process_vm(tsk, dst, buf, this_len, 1);
589 if (!retval) {
590 if (copied)
591 break;
592 return -EIO;
593 }
594 copied += retval;
595 src += retval;
596 dst += retval;
597 len -= retval;
598 }
599 return copied;
600 }
601
ptrace_setoptions(struct task_struct * child,unsigned long data)602 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
603 {
604 unsigned flags;
605
606 if (data & ~(unsigned long)PTRACE_O_MASK)
607 return -EINVAL;
608
609 /* Avoid intermediate state when all opts are cleared */
610 flags = child->ptrace;
611 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
612 flags |= (data << PT_OPT_FLAG_SHIFT);
613 child->ptrace = flags;
614
615 return 0;
616 }
617
ptrace_getsiginfo(struct task_struct * child,siginfo_t * info)618 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
619 {
620 unsigned long flags;
621 int error = -ESRCH;
622
623 if (lock_task_sighand(child, &flags)) {
624 error = -EINVAL;
625 if (likely(child->last_siginfo != NULL)) {
626 *info = *child->last_siginfo;
627 error = 0;
628 }
629 unlock_task_sighand(child, &flags);
630 }
631 return error;
632 }
633
ptrace_setsiginfo(struct task_struct * child,const siginfo_t * info)634 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
635 {
636 unsigned long flags;
637 int error = -ESRCH;
638
639 if (lock_task_sighand(child, &flags)) {
640 error = -EINVAL;
641 if (likely(child->last_siginfo != NULL)) {
642 *child->last_siginfo = *info;
643 error = 0;
644 }
645 unlock_task_sighand(child, &flags);
646 }
647 return error;
648 }
649
ptrace_peek_siginfo(struct task_struct * child,unsigned long addr,unsigned long data)650 static int ptrace_peek_siginfo(struct task_struct *child,
651 unsigned long addr,
652 unsigned long data)
653 {
654 struct ptrace_peeksiginfo_args arg;
655 struct sigpending *pending;
656 struct sigqueue *q;
657 int ret, i;
658
659 ret = copy_from_user(&arg, (void __user *) addr,
660 sizeof(struct ptrace_peeksiginfo_args));
661 if (ret)
662 return -EFAULT;
663
664 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
665 return -EINVAL; /* unknown flags */
666
667 if (arg.nr < 0)
668 return -EINVAL;
669
670 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
671 pending = &child->signal->shared_pending;
672 else
673 pending = &child->pending;
674
675 for (i = 0; i < arg.nr; ) {
676 siginfo_t info;
677 s32 off = arg.off + i;
678
679 spin_lock_irq(&child->sighand->siglock);
680 list_for_each_entry(q, &pending->list, list) {
681 if (!off--) {
682 copy_siginfo(&info, &q->info);
683 break;
684 }
685 }
686 spin_unlock_irq(&child->sighand->siglock);
687
688 if (off >= 0) /* beyond the end of the list */
689 break;
690
691 #ifdef CONFIG_COMPAT
692 if (unlikely(is_compat_task())) {
693 compat_siginfo_t __user *uinfo = compat_ptr(data);
694
695 if (copy_siginfo_to_user32(uinfo, &info) ||
696 __put_user(info.si_code, &uinfo->si_code)) {
697 ret = -EFAULT;
698 break;
699 }
700
701 } else
702 #endif
703 {
704 siginfo_t __user *uinfo = (siginfo_t __user *) data;
705
706 if (copy_siginfo_to_user(uinfo, &info) ||
707 __put_user(info.si_code, &uinfo->si_code)) {
708 ret = -EFAULT;
709 break;
710 }
711 }
712
713 data += sizeof(siginfo_t);
714 i++;
715
716 if (signal_pending(current))
717 break;
718
719 cond_resched();
720 }
721
722 if (i > 0)
723 return i;
724
725 return ret;
726 }
727
728 #ifdef PTRACE_SINGLESTEP
729 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
730 #else
731 #define is_singlestep(request) 0
732 #endif
733
734 #ifdef PTRACE_SINGLEBLOCK
735 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
736 #else
737 #define is_singleblock(request) 0
738 #endif
739
740 #ifdef PTRACE_SYSEMU
741 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
742 #else
743 #define is_sysemu_singlestep(request) 0
744 #endif
745
ptrace_resume(struct task_struct * child,long request,unsigned long data)746 static int ptrace_resume(struct task_struct *child, long request,
747 unsigned long data)
748 {
749 bool need_siglock;
750
751 if (!valid_signal(data))
752 return -EIO;
753
754 if (request == PTRACE_SYSCALL)
755 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
756 else
757 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
758
759 #ifdef TIF_SYSCALL_EMU
760 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
761 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
762 else
763 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
764 #endif
765
766 if (is_singleblock(request)) {
767 if (unlikely(!arch_has_block_step()))
768 return -EIO;
769 user_enable_block_step(child);
770 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
771 if (unlikely(!arch_has_single_step()))
772 return -EIO;
773 user_enable_single_step(child);
774 } else {
775 user_disable_single_step(child);
776 }
777
778 /*
779 * Change ->exit_code and ->state under siglock to avoid the race
780 * with wait_task_stopped() in between; a non-zero ->exit_code will
781 * wrongly look like another report from tracee.
782 *
783 * Note that we need siglock even if ->exit_code == data and/or this
784 * status was not reported yet, the new status must not be cleared by
785 * wait_task_stopped() after resume.
786 *
787 * If data == 0 we do not care if wait_task_stopped() reports the old
788 * status and clears the code too; this can't race with the tracee, it
789 * takes siglock after resume.
790 */
791 need_siglock = data && !thread_group_empty(current);
792 if (need_siglock)
793 spin_lock_irq(&child->sighand->siglock);
794 child->exit_code = data;
795 wake_up_state(child, __TASK_TRACED);
796 if (need_siglock)
797 spin_unlock_irq(&child->sighand->siglock);
798
799 return 0;
800 }
801
802 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
803
804 static const struct user_regset *
find_regset(const struct user_regset_view * view,unsigned int type)805 find_regset(const struct user_regset_view *view, unsigned int type)
806 {
807 const struct user_regset *regset;
808 int n;
809
810 for (n = 0; n < view->n; ++n) {
811 regset = view->regsets + n;
812 if (regset->core_note_type == type)
813 return regset;
814 }
815
816 return NULL;
817 }
818
ptrace_regset(struct task_struct * task,int req,unsigned int type,struct iovec * kiov)819 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
820 struct iovec *kiov)
821 {
822 const struct user_regset_view *view = task_user_regset_view(task);
823 const struct user_regset *regset = find_regset(view, type);
824 int regset_no;
825
826 if (!regset || (kiov->iov_len % regset->size) != 0)
827 return -EINVAL;
828
829 regset_no = regset - view->regsets;
830 kiov->iov_len = min(kiov->iov_len,
831 (__kernel_size_t) (regset->n * regset->size));
832
833 if (req == PTRACE_GETREGSET)
834 return copy_regset_to_user(task, view, regset_no, 0,
835 kiov->iov_len, kiov->iov_base);
836 else
837 return copy_regset_from_user(task, view, regset_no, 0,
838 kiov->iov_len, kiov->iov_base);
839 }
840
841 /*
842 * This is declared in linux/regset.h and defined in machine-dependent
843 * code. We put the export here, near the primary machine-neutral use,
844 * to ensure no machine forgets it.
845 */
846 EXPORT_SYMBOL_GPL(task_user_regset_view);
847 #endif
848
ptrace_request(struct task_struct * child,long request,unsigned long addr,unsigned long data)849 int ptrace_request(struct task_struct *child, long request,
850 unsigned long addr, unsigned long data)
851 {
852 bool seized = child->ptrace & PT_SEIZED;
853 int ret = -EIO;
854 siginfo_t siginfo, *si;
855 void __user *datavp = (void __user *) data;
856 unsigned long __user *datalp = datavp;
857 unsigned long flags;
858
859 switch (request) {
860 case PTRACE_PEEKTEXT:
861 case PTRACE_PEEKDATA:
862 return generic_ptrace_peekdata(child, addr, data);
863 case PTRACE_POKETEXT:
864 case PTRACE_POKEDATA:
865 return generic_ptrace_pokedata(child, addr, data);
866
867 #ifdef PTRACE_OLDSETOPTIONS
868 case PTRACE_OLDSETOPTIONS:
869 #endif
870 case PTRACE_SETOPTIONS:
871 ret = ptrace_setoptions(child, data);
872 break;
873 case PTRACE_GETEVENTMSG:
874 ret = put_user(child->ptrace_message, datalp);
875 break;
876
877 case PTRACE_PEEKSIGINFO:
878 ret = ptrace_peek_siginfo(child, addr, data);
879 break;
880
881 case PTRACE_GETSIGINFO:
882 ret = ptrace_getsiginfo(child, &siginfo);
883 if (!ret)
884 ret = copy_siginfo_to_user(datavp, &siginfo);
885 break;
886
887 case PTRACE_SETSIGINFO:
888 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
889 ret = -EFAULT;
890 else
891 ret = ptrace_setsiginfo(child, &siginfo);
892 break;
893
894 case PTRACE_GETSIGMASK:
895 if (addr != sizeof(sigset_t)) {
896 ret = -EINVAL;
897 break;
898 }
899
900 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
901 ret = -EFAULT;
902 else
903 ret = 0;
904
905 break;
906
907 case PTRACE_SETSIGMASK: {
908 sigset_t new_set;
909
910 if (addr != sizeof(sigset_t)) {
911 ret = -EINVAL;
912 break;
913 }
914
915 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
916 ret = -EFAULT;
917 break;
918 }
919
920 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
921
922 /*
923 * Every thread does recalc_sigpending() after resume, so
924 * retarget_shared_pending() and recalc_sigpending() are not
925 * called here.
926 */
927 spin_lock_irq(&child->sighand->siglock);
928 child->blocked = new_set;
929 spin_unlock_irq(&child->sighand->siglock);
930
931 ret = 0;
932 break;
933 }
934
935 case PTRACE_INTERRUPT:
936 /*
937 * Stop tracee without any side-effect on signal or job
938 * control. At least one trap is guaranteed to happen
939 * after this request. If @child is already trapped, the
940 * current trap is not disturbed and another trap will
941 * happen after the current trap is ended with PTRACE_CONT.
942 *
943 * The actual trap might not be PTRACE_EVENT_STOP trap but
944 * the pending condition is cleared regardless.
945 */
946 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
947 break;
948
949 /*
950 * INTERRUPT doesn't disturb existing trap sans one
951 * exception. If ptracer issued LISTEN for the current
952 * STOP, this INTERRUPT should clear LISTEN and re-trap
953 * tracee into STOP.
954 */
955 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
956 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
957
958 unlock_task_sighand(child, &flags);
959 ret = 0;
960 break;
961
962 case PTRACE_LISTEN:
963 /*
964 * Listen for events. Tracee must be in STOP. It's not
965 * resumed per-se but is not considered to be in TRACED by
966 * wait(2) or ptrace(2). If an async event (e.g. group
967 * stop state change) happens, tracee will enter STOP trap
968 * again. Alternatively, ptracer can issue INTERRUPT to
969 * finish listening and re-trap tracee into STOP.
970 */
971 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
972 break;
973
974 si = child->last_siginfo;
975 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
976 child->jobctl |= JOBCTL_LISTENING;
977 /*
978 * If NOTIFY is set, it means event happened between
979 * start of this trap and now. Trigger re-trap.
980 */
981 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
982 ptrace_signal_wake_up(child, true);
983 ret = 0;
984 }
985 unlock_task_sighand(child, &flags);
986 break;
987
988 case PTRACE_DETACH: /* detach a process that was attached. */
989 ret = ptrace_detach(child, data);
990 break;
991
992 #ifdef CONFIG_BINFMT_ELF_FDPIC
993 case PTRACE_GETFDPIC: {
994 struct mm_struct *mm = get_task_mm(child);
995 unsigned long tmp = 0;
996
997 ret = -ESRCH;
998 if (!mm)
999 break;
1000
1001 switch (addr) {
1002 case PTRACE_GETFDPIC_EXEC:
1003 tmp = mm->context.exec_fdpic_loadmap;
1004 break;
1005 case PTRACE_GETFDPIC_INTERP:
1006 tmp = mm->context.interp_fdpic_loadmap;
1007 break;
1008 default:
1009 break;
1010 }
1011 mmput(mm);
1012
1013 ret = put_user(tmp, datalp);
1014 break;
1015 }
1016 #endif
1017
1018 #ifdef PTRACE_SINGLESTEP
1019 case PTRACE_SINGLESTEP:
1020 #endif
1021 #ifdef PTRACE_SINGLEBLOCK
1022 case PTRACE_SINGLEBLOCK:
1023 #endif
1024 #ifdef PTRACE_SYSEMU
1025 case PTRACE_SYSEMU:
1026 case PTRACE_SYSEMU_SINGLESTEP:
1027 #endif
1028 case PTRACE_SYSCALL:
1029 case PTRACE_CONT:
1030 return ptrace_resume(child, request, data);
1031
1032 case PTRACE_KILL:
1033 if (child->exit_state) /* already dead */
1034 return 0;
1035 return ptrace_resume(child, request, SIGKILL);
1036
1037 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1038 case PTRACE_GETREGSET:
1039 case PTRACE_SETREGSET: {
1040 struct iovec kiov;
1041 struct iovec __user *uiov = datavp;
1042
1043 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1044 return -EFAULT;
1045
1046 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1047 __get_user(kiov.iov_len, &uiov->iov_len))
1048 return -EFAULT;
1049
1050 ret = ptrace_regset(child, request, addr, &kiov);
1051 if (!ret)
1052 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1053 break;
1054 }
1055 #endif
1056 default:
1057 break;
1058 }
1059
1060 return ret;
1061 }
1062
ptrace_get_task_struct(pid_t pid)1063 static struct task_struct *ptrace_get_task_struct(pid_t pid)
1064 {
1065 struct task_struct *child;
1066
1067 rcu_read_lock();
1068 child = find_task_by_vpid(pid);
1069 if (child)
1070 get_task_struct(child);
1071 rcu_read_unlock();
1072
1073 if (!child)
1074 return ERR_PTR(-ESRCH);
1075 return child;
1076 }
1077
1078 #ifndef arch_ptrace_attach
1079 #define arch_ptrace_attach(child) do { } while (0)
1080 #endif
1081
SYSCALL_DEFINE4(ptrace,long,request,long,pid,unsigned long,addr,unsigned long,data)1082 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1083 unsigned long, data)
1084 {
1085 struct task_struct *child;
1086 long ret;
1087
1088 if (request == PTRACE_TRACEME) {
1089 ret = ptrace_traceme();
1090 if (!ret)
1091 arch_ptrace_attach(current);
1092 goto out;
1093 }
1094
1095 child = ptrace_get_task_struct(pid);
1096 if (IS_ERR(child)) {
1097 ret = PTR_ERR(child);
1098 goto out;
1099 }
1100
1101 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1102 ret = ptrace_attach(child, request, addr, data);
1103 /*
1104 * Some architectures need to do book-keeping after
1105 * a ptrace attach.
1106 */
1107 if (!ret)
1108 arch_ptrace_attach(child);
1109 goto out_put_task_struct;
1110 }
1111
1112 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1113 request == PTRACE_INTERRUPT);
1114 if (ret < 0)
1115 goto out_put_task_struct;
1116
1117 ret = arch_ptrace(child, request, addr, data);
1118 if (ret || request != PTRACE_DETACH)
1119 ptrace_unfreeze_traced(child);
1120
1121 out_put_task_struct:
1122 put_task_struct(child);
1123 out:
1124 return ret;
1125 }
1126
generic_ptrace_peekdata(struct task_struct * tsk,unsigned long addr,unsigned long data)1127 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1128 unsigned long data)
1129 {
1130 unsigned long tmp;
1131 int copied;
1132
1133 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
1134 if (copied != sizeof(tmp))
1135 return -EIO;
1136 return put_user(tmp, (unsigned long __user *)data);
1137 }
1138
generic_ptrace_pokedata(struct task_struct * tsk,unsigned long addr,unsigned long data)1139 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1140 unsigned long data)
1141 {
1142 int copied;
1143
1144 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
1145 return (copied == sizeof(data)) ? 0 : -EIO;
1146 }
1147
1148 #if defined CONFIG_COMPAT
1149 #include <linux/compat.h>
1150
compat_ptrace_request(struct task_struct * child,compat_long_t request,compat_ulong_t addr,compat_ulong_t data)1151 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1152 compat_ulong_t addr, compat_ulong_t data)
1153 {
1154 compat_ulong_t __user *datap = compat_ptr(data);
1155 compat_ulong_t word;
1156 siginfo_t siginfo;
1157 int ret;
1158
1159 switch (request) {
1160 case PTRACE_PEEKTEXT:
1161 case PTRACE_PEEKDATA:
1162 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
1163 if (ret != sizeof(word))
1164 ret = -EIO;
1165 else
1166 ret = put_user(word, datap);
1167 break;
1168
1169 case PTRACE_POKETEXT:
1170 case PTRACE_POKEDATA:
1171 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
1172 ret = (ret != sizeof(data) ? -EIO : 0);
1173 break;
1174
1175 case PTRACE_GETEVENTMSG:
1176 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1177 break;
1178
1179 case PTRACE_GETSIGINFO:
1180 ret = ptrace_getsiginfo(child, &siginfo);
1181 if (!ret)
1182 ret = copy_siginfo_to_user32(
1183 (struct compat_siginfo __user *) datap,
1184 &siginfo);
1185 break;
1186
1187 case PTRACE_SETSIGINFO:
1188 memset(&siginfo, 0, sizeof siginfo);
1189 if (copy_siginfo_from_user32(
1190 &siginfo, (struct compat_siginfo __user *) datap))
1191 ret = -EFAULT;
1192 else
1193 ret = ptrace_setsiginfo(child, &siginfo);
1194 break;
1195 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1196 case PTRACE_GETREGSET:
1197 case PTRACE_SETREGSET:
1198 {
1199 struct iovec kiov;
1200 struct compat_iovec __user *uiov =
1201 (struct compat_iovec __user *) datap;
1202 compat_uptr_t ptr;
1203 compat_size_t len;
1204
1205 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1206 return -EFAULT;
1207
1208 if (__get_user(ptr, &uiov->iov_base) ||
1209 __get_user(len, &uiov->iov_len))
1210 return -EFAULT;
1211
1212 kiov.iov_base = compat_ptr(ptr);
1213 kiov.iov_len = len;
1214
1215 ret = ptrace_regset(child, request, addr, &kiov);
1216 if (!ret)
1217 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1218 break;
1219 }
1220 #endif
1221
1222 default:
1223 ret = ptrace_request(child, request, addr, data);
1224 }
1225
1226 return ret;
1227 }
1228
COMPAT_SYSCALL_DEFINE4(ptrace,compat_long_t,request,compat_long_t,pid,compat_long_t,addr,compat_long_t,data)1229 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1230 compat_long_t, addr, compat_long_t, data)
1231 {
1232 struct task_struct *child;
1233 long ret;
1234
1235 if (request == PTRACE_TRACEME) {
1236 ret = ptrace_traceme();
1237 goto out;
1238 }
1239
1240 child = ptrace_get_task_struct(pid);
1241 if (IS_ERR(child)) {
1242 ret = PTR_ERR(child);
1243 goto out;
1244 }
1245
1246 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1247 ret = ptrace_attach(child, request, addr, data);
1248 /*
1249 * Some architectures need to do book-keeping after
1250 * a ptrace attach.
1251 */
1252 if (!ret)
1253 arch_ptrace_attach(child);
1254 goto out_put_task_struct;
1255 }
1256
1257 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1258 request == PTRACE_INTERRUPT);
1259 if (!ret) {
1260 ret = compat_arch_ptrace(child, request, addr, data);
1261 if (ret || request != PTRACE_DETACH)
1262 ptrace_unfreeze_traced(child);
1263 }
1264
1265 out_put_task_struct:
1266 put_task_struct(child);
1267 out:
1268 return ret;
1269 }
1270 #endif /* CONFIG_COMPAT */
1271