1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16 TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
32 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
52 TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56 * sched_kthread_work_queue_work - called when a work gets queued
57 * @worker: pointer to the kthread_worker
58 * @work: pointer to struct kthread_work
59 *
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
62 * reached).
63 */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66 TP_PROTO(struct kthread_worker *worker,
67 struct kthread_work *work),
68
69 TP_ARGS(worker, work),
70
71 TP_STRUCT__entry(
72 __field( void *, work )
73 __field( void *, function)
74 __field( void *, worker)
75 ),
76
77 TP_fast_assign(
78 __entry->work = work;
79 __entry->function = work->func;
80 __entry->worker = worker;
81 ),
82
83 TP_printk("work struct=%p function=%ps worker=%p",
84 __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88 * sched_kthread_work_execute_start - called immediately before the work callback
89 * @work: pointer to struct kthread_work
90 *
91 * Allows to track kthread work execution.
92 */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95 TP_PROTO(struct kthread_work *work),
96
97 TP_ARGS(work),
98
99 TP_STRUCT__entry(
100 __field( void *, work )
101 __field( void *, function)
102 ),
103
104 TP_fast_assign(
105 __entry->work = work;
106 __entry->function = work->func;
107 ),
108
109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work: pointer to struct work_struct
115 * @function: pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123 TP_ARGS(work, function),
124
125 TP_STRUCT__entry(
126 __field( void *, work )
127 __field( void *, function)
128 ),
129
130 TP_fast_assign(
131 __entry->work = work;
132 __entry->function = function;
133 ),
134
135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139 * Tracepoint for waking up a task:
140 */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143 TP_PROTO(struct task_struct *p),
144
145 TP_ARGS(__perf_task(p)),
146
147 TP_STRUCT__entry(
148 __array( char, comm, TASK_COMM_LEN )
149 __field( pid_t, pid )
150 __field( int, prio )
151 __field( int, target_cpu )
152 ),
153
154 TP_fast_assign(
155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156 __entry->pid = p->pid;
157 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158 __entry->target_cpu = task_cpu(p);
159 ),
160
161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162 __entry->comm, __entry->pid, __entry->prio,
163 __entry->target_cpu)
164 );
165
166 /*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171 TP_PROTO(struct task_struct *p),
172 TP_ARGS(p));
173
174 /*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179 TP_PROTO(struct task_struct *p),
180 TP_ARGS(p));
181
182 /*
183 * Tracepoint for waking up a new task:
184 */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186 TP_PROTO(struct task_struct *p),
187 TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(bool preempt,struct task_struct * p)190 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
191 {
192 unsigned int state;
193
194 #ifdef CONFIG_SCHED_DEBUG
195 BUG_ON(p != current);
196 #endif /* CONFIG_SCHED_DEBUG */
197
198 /*
199 * Preemption ignores task state, therefore preempted tasks are always
200 * RUNNING (we will not have dequeued if state != RUNNING).
201 */
202 if (preempt)
203 return TASK_REPORT_MAX;
204
205 /*
206 * task_state_index() uses fls() and returns a value from 0-8 range.
207 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
208 * it for left shift operation to get the correct task->state
209 * mapping.
210 */
211 state = task_state_index(p);
212
213 return state ? (1 << (state - 1)) : state;
214 }
215 #endif /* CREATE_TRACE_POINTS */
216
217 /*
218 * Tracepoint for task switches, performed by the scheduler:
219 */
220 TRACE_EVENT(sched_switch,
221
222 TP_PROTO(bool preempt,
223 struct task_struct *prev,
224 struct task_struct *next),
225
226 TP_ARGS(preempt, prev, next),
227
228 TP_STRUCT__entry(
229 __array( char, prev_comm, TASK_COMM_LEN )
230 __field( pid_t, prev_pid )
231 __field( int, prev_prio )
232 __field( long, prev_state )
233 __array( char, next_comm, TASK_COMM_LEN )
234 __field( pid_t, next_pid )
235 __field( int, next_prio )
236 ),
237
238 TP_fast_assign(
239 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
240 __entry->prev_pid = prev->pid;
241 __entry->prev_prio = prev->prio;
242 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
243 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
244 __entry->next_pid = next->pid;
245 __entry->next_prio = next->prio;
246 /* XXX SCHED_DEADLINE */
247 ),
248
249 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
250 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251
252 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
253 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
254 { TASK_INTERRUPTIBLE, "S" },
255 { TASK_UNINTERRUPTIBLE, "D" },
256 { __TASK_STOPPED, "T" },
257 { __TASK_TRACED, "t" },
258 { EXIT_DEAD, "X" },
259 { EXIT_ZOMBIE, "Z" },
260 { TASK_PARKED, "P" },
261 { TASK_DEAD, "I" }) :
262 "R",
263
264 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
265 __entry->next_comm, __entry->next_pid, __entry->next_prio)
266 );
267
268 /*
269 * Tracepoint for a task being migrated:
270 */
271 TRACE_EVENT(sched_migrate_task,
272
273 TP_PROTO(struct task_struct *p, int dest_cpu),
274
275 TP_ARGS(p, dest_cpu),
276
277 TP_STRUCT__entry(
278 __array( char, comm, TASK_COMM_LEN )
279 __field( pid_t, pid )
280 __field( int, prio )
281 __field( int, orig_cpu )
282 __field( int, dest_cpu )
283 ),
284
285 TP_fast_assign(
286 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
287 __entry->pid = p->pid;
288 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
289 __entry->orig_cpu = task_cpu(p);
290 __entry->dest_cpu = dest_cpu;
291 ),
292
293 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
294 __entry->comm, __entry->pid, __entry->prio,
295 __entry->orig_cpu, __entry->dest_cpu)
296 );
297
298 DECLARE_EVENT_CLASS(sched_process_template,
299
300 TP_PROTO(struct task_struct *p),
301
302 TP_ARGS(p),
303
304 TP_STRUCT__entry(
305 __array( char, comm, TASK_COMM_LEN )
306 __field( pid_t, pid )
307 __field( int, prio )
308 ),
309
310 TP_fast_assign(
311 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
312 __entry->pid = p->pid;
313 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
314 ),
315
316 TP_printk("comm=%s pid=%d prio=%d",
317 __entry->comm, __entry->pid, __entry->prio)
318 );
319
320 /*
321 * Tracepoint for freeing a task:
322 */
323 DEFINE_EVENT(sched_process_template, sched_process_free,
324 TP_PROTO(struct task_struct *p),
325 TP_ARGS(p));
326
327 /*
328 * Tracepoint for a task exiting:
329 */
330 DEFINE_EVENT(sched_process_template, sched_process_exit,
331 TP_PROTO(struct task_struct *p),
332 TP_ARGS(p));
333
334 /*
335 * Tracepoint for waiting on task to unschedule:
336 */
337 DEFINE_EVENT(sched_process_template, sched_wait_task,
338 TP_PROTO(struct task_struct *p),
339 TP_ARGS(p));
340
341 /*
342 * Tracepoint for a waiting task:
343 */
344 TRACE_EVENT(sched_process_wait,
345
346 TP_PROTO(struct pid *pid),
347
348 TP_ARGS(pid),
349
350 TP_STRUCT__entry(
351 __array( char, comm, TASK_COMM_LEN )
352 __field( pid_t, pid )
353 __field( int, prio )
354 ),
355
356 TP_fast_assign(
357 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
358 __entry->pid = pid_nr(pid);
359 __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
360 ),
361
362 TP_printk("comm=%s pid=%d prio=%d",
363 __entry->comm, __entry->pid, __entry->prio)
364 );
365
366 /*
367 * Tracepoint for kernel_clone:
368 */
369 TRACE_EVENT(sched_process_fork,
370
371 TP_PROTO(struct task_struct *parent, struct task_struct *child),
372
373 TP_ARGS(parent, child),
374
375 TP_STRUCT__entry(
376 __array( char, parent_comm, TASK_COMM_LEN )
377 __field( pid_t, parent_pid )
378 __array( char, child_comm, TASK_COMM_LEN )
379 __field( pid_t, child_pid )
380 ),
381
382 TP_fast_assign(
383 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
384 __entry->parent_pid = parent->pid;
385 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
386 __entry->child_pid = child->pid;
387 ),
388
389 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
390 __entry->parent_comm, __entry->parent_pid,
391 __entry->child_comm, __entry->child_pid)
392 );
393
394 /*
395 * Tracepoint for exec:
396 */
397 TRACE_EVENT(sched_process_exec,
398
399 TP_PROTO(struct task_struct *p, pid_t old_pid,
400 struct linux_binprm *bprm),
401
402 TP_ARGS(p, old_pid, bprm),
403
404 TP_STRUCT__entry(
405 __string( filename, bprm->filename )
406 __field( pid_t, pid )
407 __field( pid_t, old_pid )
408 ),
409
410 TP_fast_assign(
411 __assign_str(filename, bprm->filename);
412 __entry->pid = p->pid;
413 __entry->old_pid = old_pid;
414 ),
415
416 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
417 __entry->pid, __entry->old_pid)
418 );
419
420
421 #ifdef CONFIG_SCHEDSTATS
422 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
423 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
424 #else
425 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
426 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
427 #endif
428
429 /*
430 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
431 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
432 */
433 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
434
435 TP_PROTO(struct task_struct *tsk, u64 delay),
436
437 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
438
439 TP_STRUCT__entry(
440 __array( char, comm, TASK_COMM_LEN )
441 __field( pid_t, pid )
442 __field( u64, delay )
443 ),
444
445 TP_fast_assign(
446 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
447 __entry->pid = tsk->pid;
448 __entry->delay = delay;
449 ),
450
451 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
452 __entry->comm, __entry->pid,
453 (unsigned long long)__entry->delay)
454 );
455
456 /*
457 * Tracepoint for accounting wait time (time the task is runnable
458 * but not actually running due to scheduler contention).
459 */
460 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
461 TP_PROTO(struct task_struct *tsk, u64 delay),
462 TP_ARGS(tsk, delay));
463
464 /*
465 * Tracepoint for accounting sleep time (time the task is not runnable,
466 * including iowait, see below).
467 */
468 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
469 TP_PROTO(struct task_struct *tsk, u64 delay),
470 TP_ARGS(tsk, delay));
471
472 /*
473 * Tracepoint for accounting iowait time (time the task is not runnable
474 * due to waiting on IO to complete).
475 */
476 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
477 TP_PROTO(struct task_struct *tsk, u64 delay),
478 TP_ARGS(tsk, delay));
479
480 /*
481 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
482 */
483 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
484 TP_PROTO(struct task_struct *tsk, u64 delay),
485 TP_ARGS(tsk, delay));
486
487 /*
488 * Tracepoint for recording the cause of uninterruptible sleep.
489 */
490 TRACE_EVENT(sched_blocked_reason,
491
492 TP_PROTO(struct task_struct *tsk),
493
494 TP_ARGS(tsk),
495
496 TP_STRUCT__entry(
497 __field( pid_t, pid )
498 __field( void*, caller )
499 __field( bool, io_wait )
500 ),
501
502 TP_fast_assign(
503 __entry->pid = tsk->pid;
504 __entry->caller = (void *)get_wchan(tsk);
505 __entry->io_wait = tsk->in_iowait;
506 ),
507
508 TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
509 );
510
511 /*
512 * Tracepoint for accounting runtime (time the task is executing
513 * on a CPU).
514 */
515 DECLARE_EVENT_CLASS(sched_stat_runtime,
516
517 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
518
519 TP_ARGS(tsk, __perf_count(runtime), vruntime),
520
521 TP_STRUCT__entry(
522 __array( char, comm, TASK_COMM_LEN )
523 __field( pid_t, pid )
524 __field( u64, runtime )
525 __field( u64, vruntime )
526 ),
527
528 TP_fast_assign(
529 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
530 __entry->pid = tsk->pid;
531 __entry->runtime = runtime;
532 __entry->vruntime = vruntime;
533 ),
534
535 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
536 __entry->comm, __entry->pid,
537 (unsigned long long)__entry->runtime,
538 (unsigned long long)__entry->vruntime)
539 );
540
541 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
542 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
543 TP_ARGS(tsk, runtime, vruntime));
544
545 /*
546 * Tracepoint for showing priority inheritance modifying a tasks
547 * priority.
548 */
549 TRACE_EVENT(sched_pi_setprio,
550
551 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
552
553 TP_ARGS(tsk, pi_task),
554
555 TP_STRUCT__entry(
556 __array( char, comm, TASK_COMM_LEN )
557 __field( pid_t, pid )
558 __field( int, oldprio )
559 __field( int, newprio )
560 ),
561
562 TP_fast_assign(
563 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
564 __entry->pid = tsk->pid;
565 __entry->oldprio = tsk->prio;
566 __entry->newprio = pi_task ?
567 min(tsk->normal_prio, pi_task->prio) :
568 tsk->normal_prio;
569 /* XXX SCHED_DEADLINE bits missing */
570 ),
571
572 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
573 __entry->comm, __entry->pid,
574 __entry->oldprio, __entry->newprio)
575 );
576
577 #ifdef CONFIG_DETECT_HUNG_TASK
578 TRACE_EVENT(sched_process_hang,
579 TP_PROTO(struct task_struct *tsk),
580 TP_ARGS(tsk),
581
582 TP_STRUCT__entry(
583 __array( char, comm, TASK_COMM_LEN )
584 __field( pid_t, pid )
585 ),
586
587 TP_fast_assign(
588 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
589 __entry->pid = tsk->pid;
590 ),
591
592 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
593 );
594 #endif /* CONFIG_DETECT_HUNG_TASK */
595
596 /*
597 * Tracks migration of tasks from one runqueue to another. Can be used to
598 * detect if automatic NUMA balancing is bouncing between nodes.
599 */
600 TRACE_EVENT(sched_move_numa,
601
602 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
603
604 TP_ARGS(tsk, src_cpu, dst_cpu),
605
606 TP_STRUCT__entry(
607 __field( pid_t, pid )
608 __field( pid_t, tgid )
609 __field( pid_t, ngid )
610 __field( int, src_cpu )
611 __field( int, src_nid )
612 __field( int, dst_cpu )
613 __field( int, dst_nid )
614 ),
615
616 TP_fast_assign(
617 __entry->pid = task_pid_nr(tsk);
618 __entry->tgid = task_tgid_nr(tsk);
619 __entry->ngid = task_numa_group_id(tsk);
620 __entry->src_cpu = src_cpu;
621 __entry->src_nid = cpu_to_node(src_cpu);
622 __entry->dst_cpu = dst_cpu;
623 __entry->dst_nid = cpu_to_node(dst_cpu);
624 ),
625
626 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
627 __entry->pid, __entry->tgid, __entry->ngid,
628 __entry->src_cpu, __entry->src_nid,
629 __entry->dst_cpu, __entry->dst_nid)
630 );
631
632 DECLARE_EVENT_CLASS(sched_numa_pair_template,
633
634 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
635 struct task_struct *dst_tsk, int dst_cpu),
636
637 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
638
639 TP_STRUCT__entry(
640 __field( pid_t, src_pid )
641 __field( pid_t, src_tgid )
642 __field( pid_t, src_ngid )
643 __field( int, src_cpu )
644 __field( int, src_nid )
645 __field( pid_t, dst_pid )
646 __field( pid_t, dst_tgid )
647 __field( pid_t, dst_ngid )
648 __field( int, dst_cpu )
649 __field( int, dst_nid )
650 ),
651
652 TP_fast_assign(
653 __entry->src_pid = task_pid_nr(src_tsk);
654 __entry->src_tgid = task_tgid_nr(src_tsk);
655 __entry->src_ngid = task_numa_group_id(src_tsk);
656 __entry->src_cpu = src_cpu;
657 __entry->src_nid = cpu_to_node(src_cpu);
658 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
659 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
660 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
661 __entry->dst_cpu = dst_cpu;
662 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
663 ),
664
665 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
666 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
667 __entry->src_cpu, __entry->src_nid,
668 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
669 __entry->dst_cpu, __entry->dst_nid)
670 );
671
672 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
673
674 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
675 struct task_struct *dst_tsk, int dst_cpu),
676
677 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
678 );
679
680 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
681
682 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
683 struct task_struct *dst_tsk, int dst_cpu),
684
685 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
686 );
687
688
689 /*
690 * Tracepoint for waking a polling cpu without an IPI.
691 */
692 TRACE_EVENT(sched_wake_idle_without_ipi,
693
694 TP_PROTO(int cpu),
695
696 TP_ARGS(cpu),
697
698 TP_STRUCT__entry(
699 __field( int, cpu )
700 ),
701
702 TP_fast_assign(
703 __entry->cpu = cpu;
704 ),
705
706 TP_printk("cpu=%d", __entry->cpu)
707 );
708
709 /*
710 * Following tracepoints are not exported in tracefs and provide hooking
711 * mechanisms only for testing and debugging purposes.
712 *
713 * Postfixed with _tp to make them easily identifiable in the code.
714 */
715 DECLARE_TRACE(pelt_cfs_tp,
716 TP_PROTO(struct cfs_rq *cfs_rq),
717 TP_ARGS(cfs_rq));
718
719 DECLARE_TRACE(pelt_rt_tp,
720 TP_PROTO(struct rq *rq),
721 TP_ARGS(rq));
722
723 DECLARE_TRACE(pelt_dl_tp,
724 TP_PROTO(struct rq *rq),
725 TP_ARGS(rq));
726
727 DECLARE_TRACE(pelt_thermal_tp,
728 TP_PROTO(struct rq *rq),
729 TP_ARGS(rq));
730
731 DECLARE_TRACE(pelt_irq_tp,
732 TP_PROTO(struct rq *rq),
733 TP_ARGS(rq));
734
735 DECLARE_TRACE(pelt_se_tp,
736 TP_PROTO(struct sched_entity *se),
737 TP_ARGS(se));
738
739 DECLARE_TRACE(sched_cpu_capacity_tp,
740 TP_PROTO(struct rq *rq),
741 TP_ARGS(rq));
742
743 DECLARE_TRACE(sched_overutilized_tp,
744 TP_PROTO(struct root_domain *rd, bool overutilized),
745 TP_ARGS(rd, overutilized));
746
747 DECLARE_TRACE(sched_util_est_cfs_tp,
748 TP_PROTO(struct cfs_rq *cfs_rq),
749 TP_ARGS(cfs_rq));
750
751 DECLARE_TRACE(sched_util_est_se_tp,
752 TP_PROTO(struct sched_entity *se),
753 TP_ARGS(se));
754
755 DECLARE_TRACE(sched_update_nr_running_tp,
756 TP_PROTO(struct rq *rq, int change),
757 TP_ARGS(rq, change));
758
759 #endif /* _TRACE_SCHED_H */
760
761 /* This part must be outside protection */
762 #include <trace/define_trace.h>
763