1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16 TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
32 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
52 TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56 * sched_kthread_work_queue_work - called when a work gets queued
57 * @worker: pointer to the kthread_worker
58 * @work: pointer to struct kthread_work
59 *
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
62 * reached).
63 */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66 TP_PROTO(struct kthread_worker *worker,
67 struct kthread_work *work),
68
69 TP_ARGS(worker, work),
70
71 TP_STRUCT__entry(
72 __field( void *, work )
73 __field( void *, function)
74 __field( void *, worker)
75 ),
76
77 TP_fast_assign(
78 __entry->work = work;
79 __entry->function = work->func;
80 __entry->worker = worker;
81 ),
82
83 TP_printk("work struct=%p function=%ps worker=%p",
84 __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88 * sched_kthread_work_execute_start - called immediately before the work callback
89 * @work: pointer to struct kthread_work
90 *
91 * Allows to track kthread work execution.
92 */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95 TP_PROTO(struct kthread_work *work),
96
97 TP_ARGS(work),
98
99 TP_STRUCT__entry(
100 __field( void *, work )
101 __field( void *, function)
102 ),
103
104 TP_fast_assign(
105 __entry->work = work;
106 __entry->function = work->func;
107 ),
108
109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work: pointer to struct work_struct
115 * @function: pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123 TP_ARGS(work, function),
124
125 TP_STRUCT__entry(
126 __field( void *, work )
127 __field( void *, function)
128 ),
129
130 TP_fast_assign(
131 __entry->work = work;
132 __entry->function = function;
133 ),
134
135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139 * Tracepoint for waking up a task:
140 */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143 TP_PROTO(struct task_struct *p),
144
145 TP_ARGS(__perf_task(p)),
146
147 TP_STRUCT__entry(
148 __array( char, comm, TASK_COMM_LEN )
149 __field( pid_t, pid )
150 __field( int, prio )
151 __field( int, target_cpu )
152 ),
153
154 TP_fast_assign(
155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156 __entry->pid = p->pid;
157 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158 __entry->target_cpu = task_cpu(p);
159 ),
160
161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162 __entry->comm, __entry->pid, __entry->prio,
163 __entry->target_cpu)
164 );
165
166 /*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171 TP_PROTO(struct task_struct *p),
172 TP_ARGS(p));
173
174 /*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179 TP_PROTO(struct task_struct *p),
180 TP_ARGS(p));
181
182 /*
183 * Tracepoint for waking up a new task:
184 */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186 TP_PROTO(struct task_struct *p),
187 TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(bool preempt,unsigned int prev_state,struct task_struct * p)190 static inline long __trace_sched_switch_state(bool preempt,
191 unsigned int prev_state,
192 struct task_struct *p)
193 {
194 unsigned int state;
195
196 #ifdef CONFIG_SCHED_DEBUG
197 BUG_ON(p != current);
198 #endif /* CONFIG_SCHED_DEBUG */
199
200 /*
201 * Preemption ignores task state, therefore preempted tasks are always
202 * RUNNING (we will not have dequeued if state != RUNNING).
203 */
204 if (preempt)
205 return TASK_REPORT_MAX;
206
207 /*
208 * task_state_index() uses fls() and returns a value from 0-8 range.
209 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
210 * it for left shift operation to get the correct task->state
211 * mapping.
212 */
213 state = __task_state_index(prev_state, p->exit_state);
214
215 return state ? (1 << (state - 1)) : state;
216 }
217 #endif /* CREATE_TRACE_POINTS */
218
219 /*
220 * Tracepoint for task switches, performed by the scheduler:
221 */
222 TRACE_EVENT(sched_switch,
223
224 TP_PROTO(bool preempt,
225 struct task_struct *prev,
226 struct task_struct *next,
227 unsigned int prev_state),
228
229 TP_ARGS(preempt, prev, next, prev_state),
230
231 TP_STRUCT__entry(
232 __array( char, prev_comm, TASK_COMM_LEN )
233 __field( pid_t, prev_pid )
234 __field( int, prev_prio )
235 __field( long, prev_state )
236 __array( char, next_comm, TASK_COMM_LEN )
237 __field( pid_t, next_pid )
238 __field( int, next_prio )
239 ),
240
241 TP_fast_assign(
242 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
243 __entry->prev_pid = prev->pid;
244 __entry->prev_prio = prev->prio;
245 __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
246 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
247 __entry->next_pid = next->pid;
248 __entry->next_prio = next->prio;
249 /* XXX SCHED_DEADLINE */
250 ),
251
252 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
253 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
254
255 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
256 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
257 { TASK_INTERRUPTIBLE, "S" },
258 { TASK_UNINTERRUPTIBLE, "D" },
259 { __TASK_STOPPED, "T" },
260 { __TASK_TRACED, "t" },
261 { EXIT_DEAD, "X" },
262 { EXIT_ZOMBIE, "Z" },
263 { TASK_PARKED, "P" },
264 { TASK_DEAD, "I" }) :
265 "R",
266
267 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
268 __entry->next_comm, __entry->next_pid, __entry->next_prio)
269 );
270
271 /*
272 * Tracepoint for a task being migrated:
273 */
274 TRACE_EVENT(sched_migrate_task,
275
276 TP_PROTO(struct task_struct *p, int dest_cpu),
277
278 TP_ARGS(p, dest_cpu),
279
280 TP_STRUCT__entry(
281 __array( char, comm, TASK_COMM_LEN )
282 __field( pid_t, pid )
283 __field( int, prio )
284 __field( int, orig_cpu )
285 __field( int, dest_cpu )
286 ),
287
288 TP_fast_assign(
289 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
290 __entry->pid = p->pid;
291 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
292 __entry->orig_cpu = task_cpu(p);
293 __entry->dest_cpu = dest_cpu;
294 ),
295
296 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
297 __entry->comm, __entry->pid, __entry->prio,
298 __entry->orig_cpu, __entry->dest_cpu)
299 );
300
301 DECLARE_EVENT_CLASS(sched_process_template,
302
303 TP_PROTO(struct task_struct *p),
304
305 TP_ARGS(p),
306
307 TP_STRUCT__entry(
308 __array( char, comm, TASK_COMM_LEN )
309 __field( pid_t, pid )
310 __field( int, prio )
311 ),
312
313 TP_fast_assign(
314 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
315 __entry->pid = p->pid;
316 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
317 ),
318
319 TP_printk("comm=%s pid=%d prio=%d",
320 __entry->comm, __entry->pid, __entry->prio)
321 );
322
323 /*
324 * Tracepoint for freeing a task:
325 */
326 DEFINE_EVENT(sched_process_template, sched_process_free,
327 TP_PROTO(struct task_struct *p),
328 TP_ARGS(p));
329
330 /*
331 * Tracepoint for a task exiting:
332 */
333 DEFINE_EVENT(sched_process_template, sched_process_exit,
334 TP_PROTO(struct task_struct *p),
335 TP_ARGS(p));
336
337 /*
338 * Tracepoint for waiting on task to unschedule:
339 */
340 DEFINE_EVENT(sched_process_template, sched_wait_task,
341 TP_PROTO(struct task_struct *p),
342 TP_ARGS(p));
343
344 /*
345 * Tracepoint for a waiting task:
346 */
347 TRACE_EVENT(sched_process_wait,
348
349 TP_PROTO(struct pid *pid),
350
351 TP_ARGS(pid),
352
353 TP_STRUCT__entry(
354 __array( char, comm, TASK_COMM_LEN )
355 __field( pid_t, pid )
356 __field( int, prio )
357 ),
358
359 TP_fast_assign(
360 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
361 __entry->pid = pid_nr(pid);
362 __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
363 ),
364
365 TP_printk("comm=%s pid=%d prio=%d",
366 __entry->comm, __entry->pid, __entry->prio)
367 );
368
369 /*
370 * Tracepoint for kernel_clone:
371 */
372 TRACE_EVENT(sched_process_fork,
373
374 TP_PROTO(struct task_struct *parent, struct task_struct *child),
375
376 TP_ARGS(parent, child),
377
378 TP_STRUCT__entry(
379 __array( char, parent_comm, TASK_COMM_LEN )
380 __field( pid_t, parent_pid )
381 __array( char, child_comm, TASK_COMM_LEN )
382 __field( pid_t, child_pid )
383 ),
384
385 TP_fast_assign(
386 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
387 __entry->parent_pid = parent->pid;
388 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
389 __entry->child_pid = child->pid;
390 ),
391
392 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
393 __entry->parent_comm, __entry->parent_pid,
394 __entry->child_comm, __entry->child_pid)
395 );
396
397 /*
398 * Tracepoint for exec:
399 */
400 TRACE_EVENT(sched_process_exec,
401
402 TP_PROTO(struct task_struct *p, pid_t old_pid,
403 struct linux_binprm *bprm),
404
405 TP_ARGS(p, old_pid, bprm),
406
407 TP_STRUCT__entry(
408 __string( filename, bprm->filename )
409 __field( pid_t, pid )
410 __field( pid_t, old_pid )
411 ),
412
413 TP_fast_assign(
414 __assign_str(filename, bprm->filename);
415 __entry->pid = p->pid;
416 __entry->old_pid = old_pid;
417 ),
418
419 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
420 __entry->pid, __entry->old_pid)
421 );
422
423
424 #ifdef CONFIG_SCHEDSTATS
425 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
426 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
427 #else
428 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
429 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
430 #endif
431
432 /*
433 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
434 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
435 */
436 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
437
438 TP_PROTO(struct task_struct *tsk, u64 delay),
439
440 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
441
442 TP_STRUCT__entry(
443 __array( char, comm, TASK_COMM_LEN )
444 __field( pid_t, pid )
445 __field( u64, delay )
446 ),
447
448 TP_fast_assign(
449 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
450 __entry->pid = tsk->pid;
451 __entry->delay = delay;
452 ),
453
454 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
455 __entry->comm, __entry->pid,
456 (unsigned long long)__entry->delay)
457 );
458
459 /*
460 * Tracepoint for accounting wait time (time the task is runnable
461 * but not actually running due to scheduler contention).
462 */
463 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
464 TP_PROTO(struct task_struct *tsk, u64 delay),
465 TP_ARGS(tsk, delay));
466
467 /*
468 * Tracepoint for accounting sleep time (time the task is not runnable,
469 * including iowait, see below).
470 */
471 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
472 TP_PROTO(struct task_struct *tsk, u64 delay),
473 TP_ARGS(tsk, delay));
474
475 /*
476 * Tracepoint for accounting iowait time (time the task is not runnable
477 * due to waiting on IO to complete).
478 */
479 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
480 TP_PROTO(struct task_struct *tsk, u64 delay),
481 TP_ARGS(tsk, delay));
482
483 /*
484 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
485 */
486 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
487 TP_PROTO(struct task_struct *tsk, u64 delay),
488 TP_ARGS(tsk, delay));
489
490 /*
491 * Tracepoint for recording the cause of uninterruptible sleep.
492 */
493 TRACE_EVENT(sched_blocked_reason,
494
495 TP_PROTO(struct task_struct *tsk),
496
497 TP_ARGS(tsk),
498
499 TP_STRUCT__entry(
500 __field( pid_t, pid )
501 __field( void*, caller )
502 __field( bool, io_wait )
503 ),
504
505 TP_fast_assign(
506 __entry->pid = tsk->pid;
507 __entry->caller = (void *)__get_wchan(tsk);
508 __entry->io_wait = tsk->in_iowait;
509 ),
510
511 TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
512 );
513
514 /*
515 * Tracepoint for accounting runtime (time the task is executing
516 * on a CPU).
517 */
518 DECLARE_EVENT_CLASS(sched_stat_runtime,
519
520 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
521
522 TP_ARGS(tsk, __perf_count(runtime), vruntime),
523
524 TP_STRUCT__entry(
525 __array( char, comm, TASK_COMM_LEN )
526 __field( pid_t, pid )
527 __field( u64, runtime )
528 __field( u64, vruntime )
529 ),
530
531 TP_fast_assign(
532 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
533 __entry->pid = tsk->pid;
534 __entry->runtime = runtime;
535 __entry->vruntime = vruntime;
536 ),
537
538 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
539 __entry->comm, __entry->pid,
540 (unsigned long long)__entry->runtime,
541 (unsigned long long)__entry->vruntime)
542 );
543
544 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
545 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
546 TP_ARGS(tsk, runtime, vruntime));
547
548 /*
549 * Tracepoint for showing priority inheritance modifying a tasks
550 * priority.
551 */
552 TRACE_EVENT(sched_pi_setprio,
553
554 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
555
556 TP_ARGS(tsk, pi_task),
557
558 TP_STRUCT__entry(
559 __array( char, comm, TASK_COMM_LEN )
560 __field( pid_t, pid )
561 __field( int, oldprio )
562 __field( int, newprio )
563 ),
564
565 TP_fast_assign(
566 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
567 __entry->pid = tsk->pid;
568 __entry->oldprio = tsk->prio;
569 __entry->newprio = pi_task ?
570 min(tsk->normal_prio, pi_task->prio) :
571 tsk->normal_prio;
572 /* XXX SCHED_DEADLINE bits missing */
573 ),
574
575 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
576 __entry->comm, __entry->pid,
577 __entry->oldprio, __entry->newprio)
578 );
579
580 #ifdef CONFIG_DETECT_HUNG_TASK
581 TRACE_EVENT(sched_process_hang,
582 TP_PROTO(struct task_struct *tsk),
583 TP_ARGS(tsk),
584
585 TP_STRUCT__entry(
586 __array( char, comm, TASK_COMM_LEN )
587 __field( pid_t, pid )
588 ),
589
590 TP_fast_assign(
591 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
592 __entry->pid = tsk->pid;
593 ),
594
595 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
596 );
597 #endif /* CONFIG_DETECT_HUNG_TASK */
598
599 /*
600 * Tracks migration of tasks from one runqueue to another. Can be used to
601 * detect if automatic NUMA balancing is bouncing between nodes.
602 */
603 TRACE_EVENT(sched_move_numa,
604
605 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
606
607 TP_ARGS(tsk, src_cpu, dst_cpu),
608
609 TP_STRUCT__entry(
610 __field( pid_t, pid )
611 __field( pid_t, tgid )
612 __field( pid_t, ngid )
613 __field( int, src_cpu )
614 __field( int, src_nid )
615 __field( int, dst_cpu )
616 __field( int, dst_nid )
617 ),
618
619 TP_fast_assign(
620 __entry->pid = task_pid_nr(tsk);
621 __entry->tgid = task_tgid_nr(tsk);
622 __entry->ngid = task_numa_group_id(tsk);
623 __entry->src_cpu = src_cpu;
624 __entry->src_nid = cpu_to_node(src_cpu);
625 __entry->dst_cpu = dst_cpu;
626 __entry->dst_nid = cpu_to_node(dst_cpu);
627 ),
628
629 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
630 __entry->pid, __entry->tgid, __entry->ngid,
631 __entry->src_cpu, __entry->src_nid,
632 __entry->dst_cpu, __entry->dst_nid)
633 );
634
635 DECLARE_EVENT_CLASS(sched_numa_pair_template,
636
637 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
638 struct task_struct *dst_tsk, int dst_cpu),
639
640 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
641
642 TP_STRUCT__entry(
643 __field( pid_t, src_pid )
644 __field( pid_t, src_tgid )
645 __field( pid_t, src_ngid )
646 __field( int, src_cpu )
647 __field( int, src_nid )
648 __field( pid_t, dst_pid )
649 __field( pid_t, dst_tgid )
650 __field( pid_t, dst_ngid )
651 __field( int, dst_cpu )
652 __field( int, dst_nid )
653 ),
654
655 TP_fast_assign(
656 __entry->src_pid = task_pid_nr(src_tsk);
657 __entry->src_tgid = task_tgid_nr(src_tsk);
658 __entry->src_ngid = task_numa_group_id(src_tsk);
659 __entry->src_cpu = src_cpu;
660 __entry->src_nid = cpu_to_node(src_cpu);
661 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
662 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
663 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
664 __entry->dst_cpu = dst_cpu;
665 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
666 ),
667
668 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
669 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
670 __entry->src_cpu, __entry->src_nid,
671 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
672 __entry->dst_cpu, __entry->dst_nid)
673 );
674
675 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
676
677 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
678 struct task_struct *dst_tsk, int dst_cpu),
679
680 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
681 );
682
683 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
684
685 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
686 struct task_struct *dst_tsk, int dst_cpu),
687
688 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
689 );
690
691
692 /*
693 * Tracepoint for waking a polling cpu without an IPI.
694 */
695 TRACE_EVENT(sched_wake_idle_without_ipi,
696
697 TP_PROTO(int cpu),
698
699 TP_ARGS(cpu),
700
701 TP_STRUCT__entry(
702 __field( int, cpu )
703 ),
704
705 TP_fast_assign(
706 __entry->cpu = cpu;
707 ),
708
709 TP_printk("cpu=%d", __entry->cpu)
710 );
711
712 /*
713 * Following tracepoints are not exported in tracefs and provide hooking
714 * mechanisms only for testing and debugging purposes.
715 *
716 * Postfixed with _tp to make them easily identifiable in the code.
717 */
718 DECLARE_TRACE(pelt_cfs_tp,
719 TP_PROTO(struct cfs_rq *cfs_rq),
720 TP_ARGS(cfs_rq));
721
722 DECLARE_TRACE(pelt_rt_tp,
723 TP_PROTO(struct rq *rq),
724 TP_ARGS(rq));
725
726 DECLARE_TRACE(pelt_dl_tp,
727 TP_PROTO(struct rq *rq),
728 TP_ARGS(rq));
729
730 DECLARE_TRACE(pelt_thermal_tp,
731 TP_PROTO(struct rq *rq),
732 TP_ARGS(rq));
733
734 DECLARE_TRACE(pelt_irq_tp,
735 TP_PROTO(struct rq *rq),
736 TP_ARGS(rq));
737
738 DECLARE_TRACE(pelt_se_tp,
739 TP_PROTO(struct sched_entity *se),
740 TP_ARGS(se));
741
742 DECLARE_TRACE(sched_cpu_capacity_tp,
743 TP_PROTO(struct rq *rq),
744 TP_ARGS(rq));
745
746 DECLARE_TRACE(sched_overutilized_tp,
747 TP_PROTO(struct root_domain *rd, bool overutilized),
748 TP_ARGS(rd, overutilized));
749
750 DECLARE_TRACE(sched_util_est_cfs_tp,
751 TP_PROTO(struct cfs_rq *cfs_rq),
752 TP_ARGS(cfs_rq));
753
754 DECLARE_TRACE(sched_util_est_se_tp,
755 TP_PROTO(struct sched_entity *se),
756 TP_ARGS(se));
757
758 DECLARE_TRACE(sched_update_nr_running_tp,
759 TP_PROTO(struct rq *rq, int change),
760 TP_ARGS(rq, change));
761
762 #endif /* _TRACE_SCHED_H */
763
764 /* This part must be outside protection */
765 #include <trace/define_trace.h>
766