• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_PTRACE_H
2 #define _LINUX_PTRACE_H
3 
4 #include <linux/compiler.h>		/* For unlikely.  */
5 #include <linux/sched.h>		/* For struct task_struct.  */
6 #include <linux/err.h>			/* for IS_ERR_VALUE */
7 #include <linux/bug.h>			/* For BUG_ON.  */
8 #include <linux/pid_namespace.h>	/* For task_active_pid_ns.  */
9 #include <uapi/linux/ptrace.h>
10 
11 /*
12  * Ptrace flags
13  *
14  * The owner ship rules for task->ptrace which holds the ptrace
15  * flags is simple.  When a task is running it owns it's task->ptrace
16  * flags.  When the a task is stopped the ptracer owns task->ptrace.
17  */
18 
19 #define PT_SEIZED	0x00010000	/* SEIZE used, enable new behavior */
20 #define PT_PTRACED	0x00000001
21 #define PT_DTRACE	0x00000002	/* delayed trace (used on m68k, i386) */
22 #define PT_PTRACE_CAP	0x00000004	/* ptracer can follow suid-exec */
23 
24 #define PT_OPT_FLAG_SHIFT	3
25 /* PT_TRACE_* event enable flags */
26 #define PT_EVENT_FLAG(event)	(1 << (PT_OPT_FLAG_SHIFT + (event)))
27 #define PT_TRACESYSGOOD		PT_EVENT_FLAG(0)
28 #define PT_TRACE_FORK		PT_EVENT_FLAG(PTRACE_EVENT_FORK)
29 #define PT_TRACE_VFORK		PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
30 #define PT_TRACE_CLONE		PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
31 #define PT_TRACE_EXEC		PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
32 #define PT_TRACE_VFORK_DONE	PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
33 #define PT_TRACE_EXIT		PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
34 #define PT_TRACE_SECCOMP	PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
35 
36 #define PT_EXITKILL		(PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
37 #define PT_SUSPEND_SECCOMP	(PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
38 
39 /* single stepping state bits (used on ARM and PA-RISC) */
40 #define PT_SINGLESTEP_BIT	31
41 #define PT_SINGLESTEP		(1<<PT_SINGLESTEP_BIT)
42 #define PT_BLOCKSTEP_BIT	30
43 #define PT_BLOCKSTEP		(1<<PT_BLOCKSTEP_BIT)
44 
45 extern long arch_ptrace(struct task_struct *child, long request,
46 			unsigned long addr, unsigned long data);
47 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
48 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
49 extern void ptrace_disable(struct task_struct *);
50 extern int ptrace_request(struct task_struct *child, long request,
51 			  unsigned long addr, unsigned long data);
52 extern void ptrace_notify(int exit_code);
53 extern void __ptrace_link(struct task_struct *child,
54 			  struct task_struct *new_parent);
55 extern void __ptrace_unlink(struct task_struct *child);
56 extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
57 #define PTRACE_MODE_READ	0x01
58 #define PTRACE_MODE_ATTACH	0x02
59 #define PTRACE_MODE_NOAUDIT	0x04
60 /* Returns true on success, false on denial. */
61 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62 
ptrace_reparented(struct task_struct * child)63 static inline int ptrace_reparented(struct task_struct *child)
64 {
65 	return !same_thread_group(child->real_parent, child->parent);
66 }
67 
ptrace_unlink(struct task_struct * child)68 static inline void ptrace_unlink(struct task_struct *child)
69 {
70 	if (unlikely(child->ptrace))
71 		__ptrace_unlink(child);
72 }
73 
74 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
75 			    unsigned long data);
76 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77 			    unsigned long data);
78 
79 /**
80  * ptrace_parent - return the task that is tracing the given task
81  * @task: task to consider
82  *
83  * Returns %NULL if no one is tracing @task, or the &struct task_struct
84  * pointer to its tracer.
85  *
86  * Must called under rcu_read_lock().  The pointer returned might be kept
87  * live only by RCU.  During exec, this may be called with task_lock() held
88  * on @task, still held from when check_unsafe_exec() was called.
89  */
ptrace_parent(struct task_struct * task)90 static inline struct task_struct *ptrace_parent(struct task_struct *task)
91 {
92 	if (unlikely(task->ptrace))
93 		return rcu_dereference(task->parent);
94 	return NULL;
95 }
96 
97 /**
98  * ptrace_event_enabled - test whether a ptrace event is enabled
99  * @task: ptracee of interest
100  * @event: %PTRACE_EVENT_* to test
101  *
102  * Test whether @event is enabled for ptracee @task.
103  *
104  * Returns %true if @event is enabled, %false otherwise.
105  */
ptrace_event_enabled(struct task_struct * task,int event)106 static inline bool ptrace_event_enabled(struct task_struct *task, int event)
107 {
108 	return task->ptrace & PT_EVENT_FLAG(event);
109 }
110 
111 /**
112  * ptrace_event - possibly stop for a ptrace event notification
113  * @event:	%PTRACE_EVENT_* value to report
114  * @message:	value for %PTRACE_GETEVENTMSG to return
115  *
116  * Check whether @event is enabled and, if so, report @event and @message
117  * to the ptrace parent.
118  *
119  * Called without locks.
120  */
ptrace_event(int event,unsigned long message)121 static inline void ptrace_event(int event, unsigned long message)
122 {
123 	if (unlikely(ptrace_event_enabled(current, event))) {
124 		current->ptrace_message = message;
125 		ptrace_notify((event << 8) | SIGTRAP);
126 	} else if (event == PTRACE_EVENT_EXEC) {
127 		/* legacy EXEC report via SIGTRAP */
128 		if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
129 			send_sig(SIGTRAP, current, 0);
130 	}
131 }
132 
133 /**
134  * ptrace_event_pid - possibly stop for a ptrace event notification
135  * @event:	%PTRACE_EVENT_* value to report
136  * @pid:	process identifier for %PTRACE_GETEVENTMSG to return
137  *
138  * Check whether @event is enabled and, if so, report @event and @pid
139  * to the ptrace parent.  @pid is reported as the pid_t seen from the
140  * the ptrace parent's pid namespace.
141  *
142  * Called without locks.
143  */
ptrace_event_pid(int event,struct pid * pid)144 static inline void ptrace_event_pid(int event, struct pid *pid)
145 {
146 	/*
147 	 * FIXME: There's a potential race if a ptracer in a different pid
148 	 * namespace than parent attaches between computing message below and
149 	 * when we acquire tasklist_lock in ptrace_stop().  If this happens,
150 	 * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
151 	 */
152 	unsigned long message = 0;
153 	struct pid_namespace *ns;
154 
155 	rcu_read_lock();
156 	ns = task_active_pid_ns(rcu_dereference(current->parent));
157 	if (ns)
158 		message = pid_nr_ns(pid, ns);
159 	rcu_read_unlock();
160 
161 	ptrace_event(event, message);
162 }
163 
164 /**
165  * ptrace_init_task - initialize ptrace state for a new child
166  * @child:		new child task
167  * @ptrace:		true if child should be ptrace'd by parent's tracer
168  *
169  * This is called immediately after adding @child to its parent's children
170  * list.  @ptrace is false in the normal case, and true to ptrace @child.
171  *
172  * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
173  */
ptrace_init_task(struct task_struct * child,bool ptrace)174 static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
175 {
176 	INIT_LIST_HEAD(&child->ptrace_entry);
177 	INIT_LIST_HEAD(&child->ptraced);
178 	child->jobctl = 0;
179 	child->ptrace = 0;
180 	child->parent = child->real_parent;
181 
182 	if (unlikely(ptrace) && current->ptrace) {
183 		child->ptrace = current->ptrace;
184 		__ptrace_link(child, current->parent);
185 
186 		if (child->ptrace & PT_SEIZED)
187 			task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
188 		else
189 			sigaddset(&child->pending.signal, SIGSTOP);
190 
191 		set_tsk_thread_flag(child, TIF_SIGPENDING);
192 	}
193 }
194 
195 /**
196  * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
197  * @task:	task in %EXIT_DEAD state
198  *
199  * Called with write_lock(&tasklist_lock) held.
200  */
ptrace_release_task(struct task_struct * task)201 static inline void ptrace_release_task(struct task_struct *task)
202 {
203 	BUG_ON(!list_empty(&task->ptraced));
204 	ptrace_unlink(task);
205 	BUG_ON(!list_empty(&task->ptrace_entry));
206 }
207 
208 #ifndef force_successful_syscall_return
209 /*
210  * System call handlers that, upon successful completion, need to return a
211  * negative value should call force_successful_syscall_return() right before
212  * returning.  On architectures where the syscall convention provides for a
213  * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
214  * others), this macro can be used to ensure that the error flag will not get
215  * set.  On architectures which do not support a separate error flag, the macro
216  * is a no-op and the spurious error condition needs to be filtered out by some
217  * other means (e.g., in user-level, by passing an extra argument to the
218  * syscall handler, or something along those lines).
219  */
220 #define force_successful_syscall_return() do { } while (0)
221 #endif
222 
223 #ifndef is_syscall_success
224 /*
225  * On most systems we can tell if a syscall is a success based on if the retval
226  * is an error value.  On some systems like ia64 and powerpc they have different
227  * indicators of success/failure and must define their own.
228  */
229 #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
230 #endif
231 
232 /*
233  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
234  *
235  * These do-nothing inlines are used when the arch does not
236  * implement single-step.  The kerneldoc comments are here
237  * to document the interface for all arch definitions.
238  */
239 
240 #ifndef arch_has_single_step
241 /**
242  * arch_has_single_step - does this CPU support user-mode single-step?
243  *
244  * If this is defined, then there must be function declarations or
245  * inlines for user_enable_single_step() and user_disable_single_step().
246  * arch_has_single_step() should evaluate to nonzero iff the machine
247  * supports instruction single-step for user mode.
248  * It can be a constant or it can test a CPU feature bit.
249  */
250 #define arch_has_single_step()		(0)
251 
252 /**
253  * user_enable_single_step - single-step in user-mode task
254  * @task: either current or a task stopped in %TASK_TRACED
255  *
256  * This can only be called when arch_has_single_step() has returned nonzero.
257  * Set @task so that when it returns to user mode, it will trap after the
258  * next single instruction executes.  If arch_has_block_step() is defined,
259  * this must clear the effects of user_enable_block_step() too.
260  */
user_enable_single_step(struct task_struct * task)261 static inline void user_enable_single_step(struct task_struct *task)
262 {
263 	BUG();			/* This can never be called.  */
264 }
265 
266 /**
267  * user_disable_single_step - cancel user-mode single-step
268  * @task: either current or a task stopped in %TASK_TRACED
269  *
270  * Clear @task of the effects of user_enable_single_step() and
271  * user_enable_block_step().  This can be called whether or not either
272  * of those was ever called on @task, and even if arch_has_single_step()
273  * returned zero.
274  */
user_disable_single_step(struct task_struct * task)275 static inline void user_disable_single_step(struct task_struct *task)
276 {
277 }
278 #else
279 extern void user_enable_single_step(struct task_struct *);
280 extern void user_disable_single_step(struct task_struct *);
281 #endif	/* arch_has_single_step */
282 
283 #ifndef arch_has_block_step
284 /**
285  * arch_has_block_step - does this CPU support user-mode block-step?
286  *
287  * If this is defined, then there must be a function declaration or inline
288  * for user_enable_block_step(), and arch_has_single_step() must be defined
289  * too.  arch_has_block_step() should evaluate to nonzero iff the machine
290  * supports step-until-branch for user mode.  It can be a constant or it
291  * can test a CPU feature bit.
292  */
293 #define arch_has_block_step()		(0)
294 
295 /**
296  * user_enable_block_step - step until branch in user-mode task
297  * @task: either current or a task stopped in %TASK_TRACED
298  *
299  * This can only be called when arch_has_block_step() has returned nonzero,
300  * and will never be called when single-instruction stepping is being used.
301  * Set @task so that when it returns to user mode, it will trap after the
302  * next branch or trap taken.
303  */
user_enable_block_step(struct task_struct * task)304 static inline void user_enable_block_step(struct task_struct *task)
305 {
306 	BUG();			/* This can never be called.  */
307 }
308 #else
309 extern void user_enable_block_step(struct task_struct *);
310 #endif	/* arch_has_block_step */
311 
312 #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
313 extern void user_single_step_siginfo(struct task_struct *tsk,
314 				struct pt_regs *regs, siginfo_t *info);
315 #else
user_single_step_siginfo(struct task_struct * tsk,struct pt_regs * regs,siginfo_t * info)316 static inline void user_single_step_siginfo(struct task_struct *tsk,
317 				struct pt_regs *regs, siginfo_t *info)
318 {
319 	memset(info, 0, sizeof(*info));
320 	info->si_signo = SIGTRAP;
321 }
322 #endif
323 
324 #ifndef arch_ptrace_stop_needed
325 /**
326  * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
327  * @code:	current->exit_code value ptrace will stop with
328  * @info:	siginfo_t pointer (or %NULL) for signal ptrace will stop with
329  *
330  * This is called with the siglock held, to decide whether or not it's
331  * necessary to release the siglock and call arch_ptrace_stop() with the
332  * same @code and @info arguments.  It can be defined to a constant if
333  * arch_ptrace_stop() is never required, or always is.  On machines where
334  * this makes sense, it should be defined to a quick test to optimize out
335  * calling arch_ptrace_stop() when it would be superfluous.  For example,
336  * if the thread has not been back to user mode since the last stop, the
337  * thread state might indicate that nothing needs to be done.
338  *
339  * This is guaranteed to be invoked once before a task stops for ptrace and
340  * may include arch-specific operations necessary prior to a ptrace stop.
341  */
342 #define arch_ptrace_stop_needed(code, info)	(0)
343 #endif
344 
345 #ifndef arch_ptrace_stop
346 /**
347  * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
348  * @code:	current->exit_code value ptrace will stop with
349  * @info:	siginfo_t pointer (or %NULL) for signal ptrace will stop with
350  *
351  * This is called with no locks held when arch_ptrace_stop_needed() has
352  * just returned nonzero.  It is allowed to block, e.g. for user memory
353  * access.  The arch can have machine-specific work to be done before
354  * ptrace stops.  On ia64, register backing store gets written back to user
355  * memory here.  Since this can be costly (requires dropping the siglock),
356  * we only do it when the arch requires it for this particular stop, as
357  * indicated by arch_ptrace_stop_needed().
358  */
359 #define arch_ptrace_stop(code, info)		do { } while (0)
360 #endif
361 
362 #ifndef current_pt_regs
363 #define current_pt_regs() task_pt_regs(current)
364 #endif
365 
366 #ifndef ptrace_signal_deliver
367 #define ptrace_signal_deliver() ((void)0)
368 #endif
369 
370 /*
371  * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
372  * on *all* architectures; the only reason to have a per-arch definition
373  * is optimisation.
374  */
375 #ifndef signal_pt_regs
376 #define signal_pt_regs() task_pt_regs(current)
377 #endif
378 
379 #ifndef current_user_stack_pointer
380 #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
381 #endif
382 
383 extern int task_current_syscall(struct task_struct *target, long *callno,
384 				unsigned long args[6], unsigned int maxargs,
385 				unsigned long *sp, unsigned long *pc);
386 
387 #endif
388