• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/kernel/ptrace.c
3  *
4  * (C) Copyright 1999 Linus Torvalds
5  *
6  * Common interfaces for "ptrace()" which we do not want
7  * to continually duplicate across every architecture.
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
24 
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
27 
28 
29 /*
30  * Initialize a new task whose father had been ptraced.
31  *
32  * Called from copy_process().
33  */
ptrace_fork(struct task_struct * child,unsigned long clone_flags)34 void ptrace_fork(struct task_struct *child, unsigned long clone_flags)
35 {
36 	arch_ptrace_fork(child, clone_flags);
37 }
38 
39 /*
40  * ptrace a task: make the debugger its new parent and
41  * move it to the ptrace list.
42  *
43  * Must be called with the tasklist lock write-held.
44  */
__ptrace_link(struct task_struct * child,struct task_struct * new_parent)45 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
46 {
47 	BUG_ON(!list_empty(&child->ptrace_entry));
48 	list_add(&child->ptrace_entry, &new_parent->ptraced);
49 	child->parent = new_parent;
50 }
51 
52 /*
53  * Turn a tracing stop into a normal stop now, since with no tracer there
54  * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
55  * signal sent that would resume the child, but didn't because it was in
56  * TASK_TRACED, resume it now.
57  * Requires that irqs be disabled.
58  */
ptrace_untrace(struct task_struct * child)59 static void ptrace_untrace(struct task_struct *child)
60 {
61 	spin_lock(&child->sighand->siglock);
62 	if (task_is_traced(child)) {
63 		if (child->signal->flags & SIGNAL_STOP_STOPPED) {
64 			__set_task_state(child, TASK_STOPPED);
65 		} else {
66 			signal_wake_up(child, 1);
67 		}
68 	}
69 	spin_unlock(&child->sighand->siglock);
70 }
71 
72 /*
73  * unptrace a task: move it back to its original parent and
74  * remove it from the ptrace list.
75  *
76  * Must be called with the tasklist lock write-held.
77  */
__ptrace_unlink(struct task_struct * child)78 void __ptrace_unlink(struct task_struct *child)
79 {
80 	BUG_ON(!child->ptrace);
81 
82 	child->ptrace = 0;
83 	child->parent = child->real_parent;
84 	list_del_init(&child->ptrace_entry);
85 
86 	arch_ptrace_untrace(child);
87 	if (task_is_traced(child))
88 		ptrace_untrace(child);
89 }
90 
91 /*
92  * Check that we have indeed attached to the thing..
93  */
ptrace_check_attach(struct task_struct * child,int kill)94 int ptrace_check_attach(struct task_struct *child, int kill)
95 {
96 	int ret = -ESRCH;
97 
98 	/*
99 	 * We take the read lock around doing both checks to close a
100 	 * possible race where someone else was tracing our child and
101 	 * detached between these two checks.  After this locked check,
102 	 * we are sure that this is our traced child and that can only
103 	 * be changed by us so it's not changing right after this.
104 	 */
105 	read_lock(&tasklist_lock);
106 	if ((child->ptrace & PT_PTRACED) && child->parent == current) {
107 		ret = 0;
108 		/*
109 		 * child->sighand can't be NULL, release_task()
110 		 * does ptrace_unlink() before __exit_signal().
111 		 */
112 		spin_lock_irq(&child->sighand->siglock);
113 		if (task_is_stopped(child))
114 			child->state = TASK_TRACED;
115 		else if (!task_is_traced(child) && !kill)
116 			ret = -ESRCH;
117 		spin_unlock_irq(&child->sighand->siglock);
118 	}
119 	read_unlock(&tasklist_lock);
120 
121 	if (!ret && !kill)
122 		ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
123 
124 	/* All systems go.. */
125 	return ret;
126 }
127 
__ptrace_may_access(struct task_struct * task,unsigned int mode)128 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
129 {
130 	const struct cred *cred = current_cred(), *tcred;
131 
132 	/* May we inspect the given task?
133 	 * This check is used both for attaching with ptrace
134 	 * and for allowing access to sensitive information in /proc.
135 	 *
136 	 * ptrace_attach denies several cases that /proc allows
137 	 * because setting up the necessary parent/child relationship
138 	 * or halting the specified task is impossible.
139 	 */
140 	int dumpable = 0;
141 	/* Don't let security modules deny introspection */
142 	if (task == current)
143 		return 0;
144 	rcu_read_lock();
145 	tcred = __task_cred(task);
146 	if ((cred->uid != tcred->euid ||
147 	     cred->uid != tcred->suid ||
148 	     cred->uid != tcred->uid  ||
149 	     cred->gid != tcred->egid ||
150 	     cred->gid != tcred->sgid ||
151 	     cred->gid != tcred->gid) &&
152 	    !capable(CAP_SYS_PTRACE)) {
153 		rcu_read_unlock();
154 		return -EPERM;
155 	}
156 	rcu_read_unlock();
157 	smp_rmb();
158 	if (task->mm)
159 		dumpable = get_dumpable(task->mm);
160 	if (!dumpable && !capable(CAP_SYS_PTRACE))
161 		return -EPERM;
162 
163 	return security_ptrace_may_access(task, mode);
164 }
165 
ptrace_may_access(struct task_struct * task,unsigned int mode)166 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
167 {
168 	int err;
169 	task_lock(task);
170 	err = __ptrace_may_access(task, mode);
171 	task_unlock(task);
172 	return (!err ? true : false);
173 }
174 
ptrace_attach(struct task_struct * task)175 int ptrace_attach(struct task_struct *task)
176 {
177 	int retval;
178 	unsigned long flags;
179 
180 	audit_ptrace(task);
181 
182 	retval = -EPERM;
183 	if (same_thread_group(task, current))
184 		goto out;
185 
186 	/* Protect exec's credential calculations against our interference;
187 	 * SUID, SGID and LSM creds get determined differently under ptrace.
188 	 */
189 	retval = mutex_lock_interruptible(&task->cred_exec_mutex);
190 	if (retval  < 0)
191 		goto out;
192 
193 	retval = -EPERM;
194 repeat:
195 	/*
196 	 * Nasty, nasty.
197 	 *
198 	 * We want to hold both the task-lock and the
199 	 * tasklist_lock for writing at the same time.
200 	 * But that's against the rules (tasklist_lock
201 	 * is taken for reading by interrupts on other
202 	 * cpu's that may have task_lock).
203 	 */
204 	task_lock(task);
205 	if (!write_trylock_irqsave(&tasklist_lock, flags)) {
206 		task_unlock(task);
207 		do {
208 			cpu_relax();
209 		} while (!write_can_lock(&tasklist_lock));
210 		goto repeat;
211 	}
212 
213 	if (!task->mm)
214 		goto bad;
215 	/* the same process cannot be attached many times */
216 	if (task->ptrace & PT_PTRACED)
217 		goto bad;
218 	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
219 	if (retval)
220 		goto bad;
221 
222 	/* Go */
223 	task->ptrace |= PT_PTRACED;
224 	if (capable(CAP_SYS_PTRACE))
225 		task->ptrace |= PT_PTRACE_CAP;
226 
227 	__ptrace_link(task, current);
228 
229 	send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
230 bad:
231 	write_unlock_irqrestore(&tasklist_lock, flags);
232 	task_unlock(task);
233 	mutex_unlock(&task->cred_exec_mutex);
234 out:
235 	return retval;
236 }
237 
__ptrace_detach(struct task_struct * child,unsigned int data)238 static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
239 {
240 	child->exit_code = data;
241 	/* .. re-parent .. */
242 	__ptrace_unlink(child);
243 	/* .. and wake it up. */
244 	if (child->exit_state != EXIT_ZOMBIE)
245 		wake_up_process(child);
246 }
247 
ptrace_detach(struct task_struct * child,unsigned int data)248 int ptrace_detach(struct task_struct *child, unsigned int data)
249 {
250 	if (!valid_signal(data))
251 		return -EIO;
252 
253 	/* Architecture-specific hardware disable .. */
254 	ptrace_disable(child);
255 	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
256 
257 	write_lock_irq(&tasklist_lock);
258 	/* protect against de_thread()->release_task() */
259 	if (child->ptrace)
260 		__ptrace_detach(child, data);
261 	write_unlock_irq(&tasklist_lock);
262 
263 	return 0;
264 }
265 
ptrace_readdata(struct task_struct * tsk,unsigned long src,char __user * dst,int len)266 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
267 {
268 	int copied = 0;
269 
270 	while (len > 0) {
271 		char buf[128];
272 		int this_len, retval;
273 
274 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
275 		retval = access_process_vm(tsk, src, buf, this_len, 0);
276 		if (!retval) {
277 			if (copied)
278 				break;
279 			return -EIO;
280 		}
281 		if (copy_to_user(dst, buf, retval))
282 			return -EFAULT;
283 		copied += retval;
284 		src += retval;
285 		dst += retval;
286 		len -= retval;
287 	}
288 	return copied;
289 }
290 
ptrace_writedata(struct task_struct * tsk,char __user * src,unsigned long dst,int len)291 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
292 {
293 	int copied = 0;
294 
295 	while (len > 0) {
296 		char buf[128];
297 		int this_len, retval;
298 
299 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
300 		if (copy_from_user(buf, src, this_len))
301 			return -EFAULT;
302 		retval = access_process_vm(tsk, dst, buf, this_len, 1);
303 		if (!retval) {
304 			if (copied)
305 				break;
306 			return -EIO;
307 		}
308 		copied += retval;
309 		src += retval;
310 		dst += retval;
311 		len -= retval;
312 	}
313 	return copied;
314 }
315 
ptrace_setoptions(struct task_struct * child,long data)316 static int ptrace_setoptions(struct task_struct *child, long data)
317 {
318 	child->ptrace &= ~PT_TRACE_MASK;
319 
320 	if (data & PTRACE_O_TRACESYSGOOD)
321 		child->ptrace |= PT_TRACESYSGOOD;
322 
323 	if (data & PTRACE_O_TRACEFORK)
324 		child->ptrace |= PT_TRACE_FORK;
325 
326 	if (data & PTRACE_O_TRACEVFORK)
327 		child->ptrace |= PT_TRACE_VFORK;
328 
329 	if (data & PTRACE_O_TRACECLONE)
330 		child->ptrace |= PT_TRACE_CLONE;
331 
332 	if (data & PTRACE_O_TRACEEXEC)
333 		child->ptrace |= PT_TRACE_EXEC;
334 
335 	if (data & PTRACE_O_TRACEVFORKDONE)
336 		child->ptrace |= PT_TRACE_VFORK_DONE;
337 
338 	if (data & PTRACE_O_TRACEEXIT)
339 		child->ptrace |= PT_TRACE_EXIT;
340 
341 	return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
342 }
343 
ptrace_getsiginfo(struct task_struct * child,siginfo_t * info)344 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
345 {
346 	int error = -ESRCH;
347 
348 	read_lock(&tasklist_lock);
349 	if (likely(child->sighand != NULL)) {
350 		error = -EINVAL;
351 		spin_lock_irq(&child->sighand->siglock);
352 		if (likely(child->last_siginfo != NULL)) {
353 			*info = *child->last_siginfo;
354 			error = 0;
355 		}
356 		spin_unlock_irq(&child->sighand->siglock);
357 	}
358 	read_unlock(&tasklist_lock);
359 	return error;
360 }
361 
ptrace_setsiginfo(struct task_struct * child,const siginfo_t * info)362 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
363 {
364 	int error = -ESRCH;
365 
366 	read_lock(&tasklist_lock);
367 	if (likely(child->sighand != NULL)) {
368 		error = -EINVAL;
369 		spin_lock_irq(&child->sighand->siglock);
370 		if (likely(child->last_siginfo != NULL)) {
371 			*child->last_siginfo = *info;
372 			error = 0;
373 		}
374 		spin_unlock_irq(&child->sighand->siglock);
375 	}
376 	read_unlock(&tasklist_lock);
377 	return error;
378 }
379 
380 
381 #ifdef PTRACE_SINGLESTEP
382 #define is_singlestep(request)		((request) == PTRACE_SINGLESTEP)
383 #else
384 #define is_singlestep(request)		0
385 #endif
386 
387 #ifdef PTRACE_SINGLEBLOCK
388 #define is_singleblock(request)		((request) == PTRACE_SINGLEBLOCK)
389 #else
390 #define is_singleblock(request)		0
391 #endif
392 
393 #ifdef PTRACE_SYSEMU
394 #define is_sysemu_singlestep(request)	((request) == PTRACE_SYSEMU_SINGLESTEP)
395 #else
396 #define is_sysemu_singlestep(request)	0
397 #endif
398 
ptrace_resume(struct task_struct * child,long request,long data)399 static int ptrace_resume(struct task_struct *child, long request, long data)
400 {
401 	if (!valid_signal(data))
402 		return -EIO;
403 
404 	if (request == PTRACE_SYSCALL)
405 		set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
406 	else
407 		clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
408 
409 #ifdef TIF_SYSCALL_EMU
410 	if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
411 		set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
412 	else
413 		clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
414 #endif
415 
416 	if (is_singleblock(request)) {
417 		if (unlikely(!arch_has_block_step()))
418 			return -EIO;
419 		user_enable_block_step(child);
420 	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
421 		if (unlikely(!arch_has_single_step()))
422 			return -EIO;
423 		user_enable_single_step(child);
424 	}
425 	else
426 		user_disable_single_step(child);
427 
428 	child->exit_code = data;
429 	wake_up_process(child);
430 
431 	return 0;
432 }
433 
ptrace_request(struct task_struct * child,long request,long addr,long data)434 int ptrace_request(struct task_struct *child, long request,
435 		   long addr, long data)
436 {
437 	int ret = -EIO;
438 	siginfo_t siginfo;
439 
440 	switch (request) {
441 	case PTRACE_PEEKTEXT:
442 	case PTRACE_PEEKDATA:
443 		return generic_ptrace_peekdata(child, addr, data);
444 	case PTRACE_POKETEXT:
445 	case PTRACE_POKEDATA:
446 		return generic_ptrace_pokedata(child, addr, data);
447 
448 #ifdef PTRACE_OLDSETOPTIONS
449 	case PTRACE_OLDSETOPTIONS:
450 #endif
451 	case PTRACE_SETOPTIONS:
452 		ret = ptrace_setoptions(child, data);
453 		break;
454 	case PTRACE_GETEVENTMSG:
455 		ret = put_user(child->ptrace_message, (unsigned long __user *) data);
456 		break;
457 
458 	case PTRACE_GETSIGINFO:
459 		ret = ptrace_getsiginfo(child, &siginfo);
460 		if (!ret)
461 			ret = copy_siginfo_to_user((siginfo_t __user *) data,
462 						   &siginfo);
463 		break;
464 
465 	case PTRACE_SETSIGINFO:
466 		if (copy_from_user(&siginfo, (siginfo_t __user *) data,
467 				   sizeof siginfo))
468 			ret = -EFAULT;
469 		else
470 			ret = ptrace_setsiginfo(child, &siginfo);
471 		break;
472 
473 	case PTRACE_DETACH:	 /* detach a process that was attached. */
474 		ret = ptrace_detach(child, data);
475 		break;
476 
477 #ifdef PTRACE_SINGLESTEP
478 	case PTRACE_SINGLESTEP:
479 #endif
480 #ifdef PTRACE_SINGLEBLOCK
481 	case PTRACE_SINGLEBLOCK:
482 #endif
483 #ifdef PTRACE_SYSEMU
484 	case PTRACE_SYSEMU:
485 	case PTRACE_SYSEMU_SINGLESTEP:
486 #endif
487 	case PTRACE_SYSCALL:
488 	case PTRACE_CONT:
489 		return ptrace_resume(child, request, data);
490 
491 	case PTRACE_KILL:
492 		if (child->exit_state)	/* already dead */
493 			return 0;
494 		return ptrace_resume(child, request, SIGKILL);
495 
496 	default:
497 		break;
498 	}
499 
500 	return ret;
501 }
502 
503 /**
504  * ptrace_traceme  --  helper for PTRACE_TRACEME
505  *
506  * Performs checks and sets PT_PTRACED.
507  * Should be used by all ptrace implementations for PTRACE_TRACEME.
508  */
ptrace_traceme(void)509 int ptrace_traceme(void)
510 {
511 	int ret = -EPERM;
512 
513 	/*
514 	 * Are we already being traced?
515 	 */
516 repeat:
517 	task_lock(current);
518 	if (!(current->ptrace & PT_PTRACED)) {
519 		/*
520 		 * See ptrace_attach() comments about the locking here.
521 		 */
522 		unsigned long flags;
523 		if (!write_trylock_irqsave(&tasklist_lock, flags)) {
524 			task_unlock(current);
525 			do {
526 				cpu_relax();
527 			} while (!write_can_lock(&tasklist_lock));
528 			goto repeat;
529 		}
530 
531 		ret = security_ptrace_traceme(current->parent);
532 
533 		/*
534 		 * Set the ptrace bit in the process ptrace flags.
535 		 * Then link us on our parent's ptraced list.
536 		 */
537 		if (!ret) {
538 			current->ptrace |= PT_PTRACED;
539 			__ptrace_link(current, current->real_parent);
540 		}
541 
542 		write_unlock_irqrestore(&tasklist_lock, flags);
543 	}
544 	task_unlock(current);
545 	return ret;
546 }
547 
548 /**
549  * ptrace_get_task_struct  --  grab a task struct reference for ptrace
550  * @pid:       process id to grab a task_struct reference of
551  *
552  * This function is a helper for ptrace implementations.  It checks
553  * permissions and then grabs a task struct for use of the actual
554  * ptrace implementation.
555  *
556  * Returns the task_struct for @pid or an ERR_PTR() on failure.
557  */
ptrace_get_task_struct(pid_t pid)558 struct task_struct *ptrace_get_task_struct(pid_t pid)
559 {
560 	struct task_struct *child;
561 
562 	read_lock(&tasklist_lock);
563 	child = find_task_by_vpid(pid);
564 	if (child)
565 		get_task_struct(child);
566 
567 	read_unlock(&tasklist_lock);
568 	if (!child)
569 		return ERR_PTR(-ESRCH);
570 	return child;
571 }
572 
573 #ifndef arch_ptrace_attach
574 #define arch_ptrace_attach(child)	do { } while (0)
575 #endif
576 
SYSCALL_DEFINE4(ptrace,long,request,long,pid,long,addr,long,data)577 SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
578 {
579 	struct task_struct *child;
580 	long ret;
581 
582 	/*
583 	 * This lock_kernel fixes a subtle race with suid exec
584 	 */
585 	lock_kernel();
586 	if (request == PTRACE_TRACEME) {
587 		ret = ptrace_traceme();
588 		if (!ret)
589 			arch_ptrace_attach(current);
590 		goto out;
591 	}
592 
593 	child = ptrace_get_task_struct(pid);
594 	if (IS_ERR(child)) {
595 		ret = PTR_ERR(child);
596 		goto out;
597 	}
598 
599 	if (request == PTRACE_ATTACH) {
600 		ret = ptrace_attach(child);
601 		/*
602 		 * Some architectures need to do book-keeping after
603 		 * a ptrace attach.
604 		 */
605 		if (!ret)
606 			arch_ptrace_attach(child);
607 		goto out_put_task_struct;
608 	}
609 
610 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
611 	if (ret < 0)
612 		goto out_put_task_struct;
613 
614 	ret = arch_ptrace(child, request, addr, data);
615 	if (ret < 0)
616 		goto out_put_task_struct;
617 
618  out_put_task_struct:
619 	put_task_struct(child);
620  out:
621 	unlock_kernel();
622 	return ret;
623 }
624 
generic_ptrace_peekdata(struct task_struct * tsk,long addr,long data)625 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
626 {
627 	unsigned long tmp;
628 	int copied;
629 
630 	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
631 	if (copied != sizeof(tmp))
632 		return -EIO;
633 	return put_user(tmp, (unsigned long __user *)data);
634 }
635 
generic_ptrace_pokedata(struct task_struct * tsk,long addr,long data)636 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
637 {
638 	int copied;
639 
640 	copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
641 	return (copied == sizeof(data)) ? 0 : -EIO;
642 }
643 
644 #if defined CONFIG_COMPAT
645 #include <linux/compat.h>
646 
compat_ptrace_request(struct task_struct * child,compat_long_t request,compat_ulong_t addr,compat_ulong_t data)647 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
648 			  compat_ulong_t addr, compat_ulong_t data)
649 {
650 	compat_ulong_t __user *datap = compat_ptr(data);
651 	compat_ulong_t word;
652 	siginfo_t siginfo;
653 	int ret;
654 
655 	switch (request) {
656 	case PTRACE_PEEKTEXT:
657 	case PTRACE_PEEKDATA:
658 		ret = access_process_vm(child, addr, &word, sizeof(word), 0);
659 		if (ret != sizeof(word))
660 			ret = -EIO;
661 		else
662 			ret = put_user(word, datap);
663 		break;
664 
665 	case PTRACE_POKETEXT:
666 	case PTRACE_POKEDATA:
667 		ret = access_process_vm(child, addr, &data, sizeof(data), 1);
668 		ret = (ret != sizeof(data) ? -EIO : 0);
669 		break;
670 
671 	case PTRACE_GETEVENTMSG:
672 		ret = put_user((compat_ulong_t) child->ptrace_message, datap);
673 		break;
674 
675 	case PTRACE_GETSIGINFO:
676 		ret = ptrace_getsiginfo(child, &siginfo);
677 		if (!ret)
678 			ret = copy_siginfo_to_user32(
679 				(struct compat_siginfo __user *) datap,
680 				&siginfo);
681 		break;
682 
683 	case PTRACE_SETSIGINFO:
684 		memset(&siginfo, 0, sizeof siginfo);
685 		if (copy_siginfo_from_user32(
686 			    &siginfo, (struct compat_siginfo __user *) datap))
687 			ret = -EFAULT;
688 		else
689 			ret = ptrace_setsiginfo(child, &siginfo);
690 		break;
691 
692 	default:
693 		ret = ptrace_request(child, request, addr, data);
694 	}
695 
696 	return ret;
697 }
698 
compat_sys_ptrace(compat_long_t request,compat_long_t pid,compat_long_t addr,compat_long_t data)699 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
700 				  compat_long_t addr, compat_long_t data)
701 {
702 	struct task_struct *child;
703 	long ret;
704 
705 	/*
706 	 * This lock_kernel fixes a subtle race with suid exec
707 	 */
708 	lock_kernel();
709 	if (request == PTRACE_TRACEME) {
710 		ret = ptrace_traceme();
711 		goto out;
712 	}
713 
714 	child = ptrace_get_task_struct(pid);
715 	if (IS_ERR(child)) {
716 		ret = PTR_ERR(child);
717 		goto out;
718 	}
719 
720 	if (request == PTRACE_ATTACH) {
721 		ret = ptrace_attach(child);
722 		/*
723 		 * Some architectures need to do book-keeping after
724 		 * a ptrace attach.
725 		 */
726 		if (!ret)
727 			arch_ptrace_attach(child);
728 		goto out_put_task_struct;
729 	}
730 
731 	ret = ptrace_check_attach(child, request == PTRACE_KILL);
732 	if (!ret)
733 		ret = compat_arch_ptrace(child, request, addr, data);
734 
735  out_put_task_struct:
736 	put_task_struct(child);
737  out:
738 	unlock_kernel();
739 	return ret;
740 }
741 #endif	/* CONFIG_COMPAT */
742