• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3 
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6 
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9 #include <linux/binfmts.h>
10 
11 /*
12  * Tracepoint for calling kthread_stop, performed to end a kthread:
13  */
14 TRACE_EVENT(sched_kthread_stop,
15 
16 	TP_PROTO(struct task_struct *t),
17 
18 	TP_ARGS(t),
19 
20 	TP_STRUCT__entry(
21 		__array(	char,	comm,	TASK_COMM_LEN	)
22 		__field(	pid_t,	pid			)
23 	),
24 
25 	TP_fast_assign(
26 		memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 		__entry->pid	= t->pid;
28 	),
29 
30 	TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
31 );
32 
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37 
38 	TP_PROTO(int ret),
39 
40 	TP_ARGS(ret),
41 
42 	TP_STRUCT__entry(
43 		__field(	int,	ret	)
44 	),
45 
46 	TP_fast_assign(
47 		__entry->ret	= ret;
48 	),
49 
50 	TP_printk("ret=%d", __entry->ret)
51 );
52 
53 /*
54  * Tracepoint for waking up a task:
55  */
56 DECLARE_EVENT_CLASS(sched_wakeup_template,
57 
58 	TP_PROTO(struct task_struct *p, int success),
59 
60 	TP_ARGS(p, success),
61 
62 	TP_STRUCT__entry(
63 		__array(	char,	comm,	TASK_COMM_LEN	)
64 		__field(	pid_t,	pid			)
65 		__field(	int,	prio			)
66 		__field(	int,	success			)
67 		__field(	int,	target_cpu		)
68 	),
69 
70 	TP_fast_assign(
71 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
72 		__entry->pid		= p->pid;
73 		__entry->prio		= p->prio;
74 		__entry->success	= success;
75 		__entry->target_cpu	= task_cpu(p);
76 	)
77 	TP_perf_assign(
78 		__perf_task(p);
79 	),
80 
81 	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
82 		  __entry->comm, __entry->pid, __entry->prio,
83 		  __entry->success, __entry->target_cpu)
84 );
85 
86 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
87 	     TP_PROTO(struct task_struct *p, int success),
88 	     TP_ARGS(p, success));
89 
90 /*
91  * Tracepoint for waking up a new task:
92  */
93 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
94 	     TP_PROTO(struct task_struct *p, int success),
95 	     TP_ARGS(p, success));
96 
97 #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(struct task_struct * p)98 static inline long __trace_sched_switch_state(struct task_struct *p)
99 {
100 	long state = p->state;
101 
102 #ifdef CONFIG_PREEMPT
103 	/*
104 	 * For all intents and purposes a preempted task is a running task.
105 	 */
106 	if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
107 		state = TASK_RUNNING | TASK_STATE_MAX;
108 #endif
109 
110 	return state;
111 }
112 #endif
113 
114 /*
115  * Tracepoint for task switches, performed by the scheduler:
116  */
117 TRACE_EVENT(sched_switch,
118 
119 	TP_PROTO(struct task_struct *prev,
120 		 struct task_struct *next),
121 
122 	TP_ARGS(prev, next),
123 
124 	TP_STRUCT__entry(
125 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
126 		__field(	pid_t,	prev_pid			)
127 		__field(	int,	prev_prio			)
128 		__field(	long,	prev_state			)
129 		__array(	char,	next_comm,	TASK_COMM_LEN	)
130 		__field(	pid_t,	next_pid			)
131 		__field(	int,	next_prio			)
132 	),
133 
134 	TP_fast_assign(
135 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
136 		__entry->prev_pid	= prev->pid;
137 		__entry->prev_prio	= prev->prio;
138 		__entry->prev_state	= __trace_sched_switch_state(prev);
139 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
140 		__entry->next_pid	= next->pid;
141 		__entry->next_prio	= next->prio;
142 	),
143 
144 	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
145 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
146 		__entry->prev_state & (TASK_STATE_MAX-1) ?
147 		  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
148 				{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
149 				{ 16, "Z" }, { 32, "X" }, { 64, "x" },
150 				{ 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
151 		__entry->prev_state & TASK_STATE_MAX ? "+" : "",
152 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
153 );
154 
155 /*
156  * Tracepoint for a task being migrated:
157  */
158 TRACE_EVENT(sched_migrate_task,
159 
160 	TP_PROTO(struct task_struct *p, int dest_cpu),
161 
162 	TP_ARGS(p, dest_cpu),
163 
164 	TP_STRUCT__entry(
165 		__array(	char,	comm,	TASK_COMM_LEN	)
166 		__field(	pid_t,	pid			)
167 		__field(	int,	prio			)
168 		__field(	int,	orig_cpu		)
169 		__field(	int,	dest_cpu		)
170 	),
171 
172 	TP_fast_assign(
173 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
174 		__entry->pid		= p->pid;
175 		__entry->prio		= p->prio;
176 		__entry->orig_cpu	= task_cpu(p);
177 		__entry->dest_cpu	= dest_cpu;
178 	),
179 
180 	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
181 		  __entry->comm, __entry->pid, __entry->prio,
182 		  __entry->orig_cpu, __entry->dest_cpu)
183 );
184 
185 /*
186  * Tracepoint for a CPU going offline/online:
187  */
188 TRACE_EVENT(sched_cpu_hotplug,
189 
190 	TP_PROTO(int affected_cpu, int error, int status),
191 
192 	TP_ARGS(affected_cpu, error, status),
193 
194 	TP_STRUCT__entry(
195 		__field(	int,	affected_cpu		)
196 		__field(	int,	error			)
197 		__field(	int,	status			)
198 	),
199 
200 	TP_fast_assign(
201 		__entry->affected_cpu	= affected_cpu;
202 		__entry->error		= error;
203 		__entry->status		= status;
204 	),
205 
206 	TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
207 		__entry->status ? "online" : "offline", __entry->error)
208 );
209 
210 DECLARE_EVENT_CLASS(sched_process_template,
211 
212 	TP_PROTO(struct task_struct *p),
213 
214 	TP_ARGS(p),
215 
216 	TP_STRUCT__entry(
217 		__array(	char,	comm,	TASK_COMM_LEN	)
218 		__field(	pid_t,	pid			)
219 		__field(	int,	prio			)
220 	),
221 
222 	TP_fast_assign(
223 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
224 		__entry->pid		= p->pid;
225 		__entry->prio		= p->prio;
226 	),
227 
228 	TP_printk("comm=%s pid=%d prio=%d",
229 		  __entry->comm, __entry->pid, __entry->prio)
230 );
231 
232 /*
233  * Tracepoint for freeing a task:
234  */
235 DEFINE_EVENT(sched_process_template, sched_process_free,
236 	     TP_PROTO(struct task_struct *p),
237 	     TP_ARGS(p));
238 
239 
240 /*
241  * Tracepoint for a task exiting:
242  */
243 DEFINE_EVENT(sched_process_template, sched_process_exit,
244 	     TP_PROTO(struct task_struct *p),
245 	     TP_ARGS(p));
246 
247 /*
248  * Tracepoint for waiting on task to unschedule:
249  */
250 DEFINE_EVENT(sched_process_template, sched_wait_task,
251 	TP_PROTO(struct task_struct *p),
252 	TP_ARGS(p));
253 
254 /*
255  * Tracepoint for a waiting task:
256  */
257 TRACE_EVENT(sched_process_wait,
258 
259 	TP_PROTO(struct pid *pid),
260 
261 	TP_ARGS(pid),
262 
263 	TP_STRUCT__entry(
264 		__array(	char,	comm,	TASK_COMM_LEN	)
265 		__field(	pid_t,	pid			)
266 		__field(	int,	prio			)
267 	),
268 
269 	TP_fast_assign(
270 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
271 		__entry->pid		= pid_nr(pid);
272 		__entry->prio		= current->prio;
273 	),
274 
275 	TP_printk("comm=%s pid=%d prio=%d",
276 		  __entry->comm, __entry->pid, __entry->prio)
277 );
278 
279 /*
280  * Tracepoint for do_fork:
281  */
282 TRACE_EVENT(sched_process_fork,
283 
284 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
285 
286 	TP_ARGS(parent, child),
287 
288 	TP_STRUCT__entry(
289 		__array(	char,	parent_comm,	TASK_COMM_LEN	)
290 		__field(	pid_t,	parent_pid			)
291 		__array(	char,	child_comm,	TASK_COMM_LEN	)
292 		__field(	pid_t,	child_pid			)
293 	),
294 
295 	TP_fast_assign(
296 		memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
297 		__entry->parent_pid	= parent->pid;
298 		memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
299 		__entry->child_pid	= child->pid;
300 	),
301 
302 	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
303 		__entry->parent_comm, __entry->parent_pid,
304 		__entry->child_comm, __entry->child_pid)
305 );
306 
307 /*
308  * Tracepoint for exec:
309  */
310 TRACE_EVENT(sched_process_exec,
311 
312 	TP_PROTO(struct task_struct *p, pid_t old_pid,
313 		 struct linux_binprm *bprm),
314 
315 	TP_ARGS(p, old_pid, bprm),
316 
317 	TP_STRUCT__entry(
318 		__string(	filename,	bprm->filename	)
319 		__field(	pid_t,		pid		)
320 		__field(	pid_t,		old_pid		)
321 	),
322 
323 	TP_fast_assign(
324 		__assign_str(filename, bprm->filename);
325 		__entry->pid		= p->pid;
326 		__entry->old_pid	= old_pid;
327 	),
328 
329 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
330 		  __entry->pid, __entry->old_pid)
331 );
332 
333 /*
334  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
335  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
336  */
337 DECLARE_EVENT_CLASS(sched_stat_template,
338 
339 	TP_PROTO(struct task_struct *tsk, u64 delay),
340 
341 	TP_ARGS(tsk, delay),
342 
343 	TP_STRUCT__entry(
344 		__array( char,	comm,	TASK_COMM_LEN	)
345 		__field( pid_t,	pid			)
346 		__field( u64,	delay			)
347 	),
348 
349 	TP_fast_assign(
350 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
351 		__entry->pid	= tsk->pid;
352 		__entry->delay	= delay;
353 	)
354 	TP_perf_assign(
355 		__perf_count(delay);
356 		__perf_task(tsk);
357 	),
358 
359 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
360 			__entry->comm, __entry->pid,
361 			(unsigned long long)__entry->delay)
362 );
363 
364 
365 /*
366  * Tracepoint for accounting wait time (time the task is runnable
367  * but not actually running due to scheduler contention).
368  */
369 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
370 	     TP_PROTO(struct task_struct *tsk, u64 delay),
371 	     TP_ARGS(tsk, delay));
372 
373 /*
374  * Tracepoint for accounting sleep time (time the task is not runnable,
375  * including iowait, see below).
376  */
377 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
378 	     TP_PROTO(struct task_struct *tsk, u64 delay),
379 	     TP_ARGS(tsk, delay));
380 
381 /*
382  * Tracepoint for accounting iowait time (time the task is not runnable
383  * due to waiting on IO to complete).
384  */
385 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
386 	     TP_PROTO(struct task_struct *tsk, u64 delay),
387 	     TP_ARGS(tsk, delay));
388 
389 /*
390  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
391  */
392 DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
393 	     TP_PROTO(struct task_struct *tsk, u64 delay),
394 	     TP_ARGS(tsk, delay));
395 
396 /*
397  * Tracepoint for recording the cause of uninterruptible sleep.
398  */
399 TRACE_EVENT(sched_blocked_reason,
400 
401 	TP_PROTO(struct task_struct *tsk),
402 
403 	TP_ARGS(tsk),
404 
405 	TP_STRUCT__entry(
406 		__field( pid_t,	pid	)
407 		__field( void*, caller	)
408 		__field( bool, io_wait	)
409 	),
410 
411 	TP_fast_assign(
412 		__entry->pid	= tsk->pid;
413 		__entry->caller = (void*)get_wchan(tsk);
414 		__entry->io_wait = tsk->in_iowait;
415 	),
416 
417 	TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
418 );
419 
420 /*
421  * Tracepoint for accounting runtime (time the task is executing
422  * on a CPU).
423  */
424 TRACE_EVENT(sched_stat_runtime,
425 
426 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
427 
428 	TP_ARGS(tsk, runtime, vruntime),
429 
430 	TP_STRUCT__entry(
431 		__array( char,	comm,	TASK_COMM_LEN	)
432 		__field( pid_t,	pid			)
433 		__field( u64,	runtime			)
434 		__field( u64,	vruntime			)
435 	),
436 
437 	TP_fast_assign(
438 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
439 		__entry->pid		= tsk->pid;
440 		__entry->runtime	= runtime;
441 		__entry->vruntime	= vruntime;
442 	)
443 	TP_perf_assign(
444 		__perf_count(runtime);
445 	),
446 
447 	TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
448 			__entry->comm, __entry->pid,
449 			(unsigned long long)__entry->runtime,
450 			(unsigned long long)__entry->vruntime)
451 );
452 
453 /*
454  * Tracepoint for showing priority inheritance modifying a tasks
455  * priority.
456  */
457 TRACE_EVENT(sched_pi_setprio,
458 
459 	TP_PROTO(struct task_struct *tsk, int newprio),
460 
461 	TP_ARGS(tsk, newprio),
462 
463 	TP_STRUCT__entry(
464 		__array( char,	comm,	TASK_COMM_LEN	)
465 		__field( pid_t,	pid			)
466 		__field( int,	oldprio			)
467 		__field( int,	newprio			)
468 	),
469 
470 	TP_fast_assign(
471 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
472 		__entry->pid		= tsk->pid;
473 		__entry->oldprio	= tsk->prio;
474 		__entry->newprio	= newprio;
475 	),
476 
477 	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
478 			__entry->comm, __entry->pid,
479 			__entry->oldprio, __entry->newprio)
480 );
481 
482 #endif /* _TRACE_SCHED_H */
483 
484 /* This part must be outside protection */
485 #include <trace/define_trace.h>
486