• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
4 #define _TRACE_SCHED_H
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM sched
8 
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/sched/clock.h>
11 #include <linux/tracepoint.h>
12 #include <linux/binfmts.h>
13 
14 #ifdef CONFIG_SCHED_RT_CAS
15 #include "eas_sched.h"
16 #endif
17 
18 /*
19  * Tracepoint for calling kthread_stop, performed to end a kthread:
20  */
21 TRACE_EVENT(sched_kthread_stop,
22 
23             TP_PROTO(struct task_struct *t),
24 
25             TP_ARGS(t),
26 
27             TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid)),
28 
29             TP_fast_assign(memcpy(__entry->comm, t->comm, TASK_COMM_LEN); __entry->pid = t->pid;),
30 
31             TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid));
32 
33 /*
34  * Tracepoint for the return value of the kthread stopping:
35  */
36 TRACE_EVENT(sched_kthread_stop_ret,
37 
38             TP_PROTO(int ret),
39 
40             TP_ARGS(ret),
41 
42             TP_STRUCT__entry(__field(int, ret)),
43 
44             TP_fast_assign(__entry->ret = ret;),
45 
46             TP_printk("ret=%d", __entry->ret));
47 
48 /*
49  * Tracepoint for waking up a task:
50  */
51 DECLARE_EVENT_CLASS(sched_wakeup_template,
52 
53                     TP_PROTO(struct task_struct *p),
54 
55                     TP_ARGS(__perf_task(p)),
56 
57                     TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(int, prio)
58                                          __field(int, success) __field(int, target_cpu)),
59 
60                     TP_fast_assign(memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid;
61                                    __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
62                                    __entry->success = 1;    /* rudiment, kill when possible */
63                                    __entry->target_cpu = task_cpu(p);),
64 
65                     TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio,
66                               __entry->target_cpu));
67 
68 /*
69  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
70  * called from the waking context.
71  */
72 DEFINE_EVENT(sched_wakeup_template, sched_waking, TP_PROTO(struct task_struct *p), TP_ARGS(p));
73 
74 /*
75  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
76  * It is not always called from the waking context.
77  */
78 DEFINE_EVENT(sched_wakeup_template, sched_wakeup, TP_PROTO(struct task_struct *p), TP_ARGS(p));
79 
80 /*
81  * Tracepoint for waking up a new task:
82  */
83 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p), TP_ARGS(p));
84 
85 #ifdef CREATE_TRACE_POINTS
trace_sched_switch_state(bool preempt,struct task_struct * p)86 static inline long trace_sched_switch_state(bool preempt, struct task_struct *p)
87 {
88     unsigned int state;
89 
90 #ifdef CONFIG_SCHED_DEBUG
91     BUG_ON(p != current);
92 #endif /* CONFIG_SCHED_DEBUG */
93 
94     /*
95      * Preemption ignores task state, therefore preempted tasks are always
96      * RUNNING (we will not have dequeued if state != RUNNING).
97      */
98     if (preempt) {
99         return TASK_REPORT_MAX;
100     }
101 
102     /*
103      * task_state_index() uses fls() and returns a value from 0-8 range.
104      * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
105      * it for left shift operation to get the correct task->state
106      * mapping.
107      */
108     state = task_state_index(p);
109 
110     return state ? (1 << (state - 1)) : state;
111 }
112 #endif /* CREATE_TRACE_POINTS */
113 
114 /*
115  * Tracepoint for task switches, performed by the scheduler:
116  */
117 TRACE_EVENT(sched_switch,
118 
119             TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next),
120 
121             TP_ARGS(preempt, prev, next),
122 
123             TP_STRUCT__entry(__array(char, prev_comm, TASK_COMM_LEN) __field(pid_t, prev_pid) __field(int, prev_prio)
124                                  __field(long, prev_state) __array(char, next_comm, TASK_COMM_LEN)
125                                     __field(pid_t, next_pid) __field(int, next_prio)),
126 
127             TP_fast_assign(memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid;
128                            __entry->prev_prio = prev->prio;
129                            __entry->prev_state = trace_sched_switch_state(preempt, prev);
130                            memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid;
131                            __entry->next_prio = next->prio;
132                            /* XXX SCHED_DEADLINE */
133                            ),
134 
135             TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
136                       __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
137 
138                       (__entry->prev_state & (TASK_REPORT_MAX - 1))
139                           ? __print_flags(__entry->prev_state &(TASK_REPORT_MAX - 1), "|", {TASK_INTERRUPTIBLE, "S"},
140                                           {TASK_UNINTERRUPTIBLE, "D"}, {__TASK_STOPPED, "T"}, {__TASK_TRACED, "t"},
141                                             {EXIT_DEAD, "X"}, {EXIT_ZOMBIE, "Z"}, {TASK_PARKED, "P"}, {TASK_DEAD, "I"})
142                           : "R",
143 
144                         __entry->prev_state &TASK_REPORT_MAX ? "+" : "", __entry->next_comm, __entry->next_pid,
145                       __entry->next_prio));
146 
147 /*
148  * Tracepoint for a task being migrated:
149  */
150 TRACE_EVENT(sched_migrate_task,
151 
152             TP_PROTO(struct task_struct *p, int dest_cpu),
153 
154             TP_ARGS(p, dest_cpu),
155 
156             TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(int, prio)
157                                  __field(int, orig_cpu) __field(int, dest_cpu) __field(int, running)),
158 
159             TP_fast_assign(memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid;
160                            __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
161                            __entry->orig_cpu = task_cpu(p); __entry->dest_cpu = dest_cpu;
162                            __entry->running = (p->state == TASK_RUNNING);),
163 
164             TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d running=%d", __entry->comm, __entry->pid,
165                       __entry->prio, __entry->orig_cpu, __entry->dest_cpu, __entry->running));
166 
167 DECLARE_EVENT_CLASS(sched_process_template,
168 
169                     TP_PROTO(struct task_struct *p),
170 
171                     TP_ARGS(p),
172 
173                     TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(int, prio)),
174 
175                     TP_fast_assign(memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid;
176                                    __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
177                                    ),
178 
179                     TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio));
180 
181 /*
182  * Tracepoint for freeing a task:
183  */
184 DEFINE_EVENT(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p));
185 
186 /*
187  * Tracepoint for a task exiting:
188  */
189 DEFINE_EVENT(sched_process_template, sched_process_exit, TP_PROTO(struct task_struct *p), TP_ARGS(p));
190 
191 /*
192  * Tracepoint for waiting on task to unschedule:
193  */
194 DEFINE_EVENT(sched_process_template, sched_wait_task, TP_PROTO(struct task_struct *p), TP_ARGS(p));
195 
196 /*
197  * Tracepoint for a waiting task:
198  */
199 TRACE_EVENT(sched_process_wait,
200 
201             TP_PROTO(struct pid *pid),
202 
203             TP_ARGS(pid),
204 
205             TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(int, prio)),
206 
207             TP_fast_assign(memcpy(__entry->comm, current->comm, TASK_COMM_LEN); __entry->pid = pid_nr(pid);
208                            __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
209                            ),
210 
211             TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio));
212 
213 /*
214  * Tracepoint for do_fork:
215  */
216 TRACE_EVENT(sched_process_fork,
217 
218             TP_PROTO(struct task_struct *parent, struct task_struct *child),
219 
220             TP_ARGS(parent, child),
221 
222             TP_STRUCT__entry(__array(char, parent_comm, TASK_COMM_LEN) __field(pid_t, parent_pid)
223                                  __array(char, child_comm, TASK_COMM_LEN) __field(pid_t, child_pid)),
224 
225             TP_fast_assign(memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); __entry->parent_pid = parent->pid;
226                            memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); __entry->child_pid = child->pid;),
227 
228             TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid,
229                       __entry->child_comm, __entry->child_pid));
230 
231 /*
232  * Tracepoint for exec:
233  */
234 TRACE_EVENT(sched_process_exec,
235 
236             TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm),
237 
238             TP_ARGS(p, old_pid, bprm),
239 
240             TP_STRUCT__entry(__string(filename, bprm->filename) __field(pid_t, pid) __field(pid_t, old_pid)),
241 
242             TP_fast_assign(__assign_str(filename, bprm->filename); __entry->pid = p->pid; __entry->old_pid = old_pid;),
243 
244             TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), __entry->pid, __entry->old_pid));
245 
246 #ifdef CONFIG_SCHEDSTATS
247 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
248 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
249 #else
250 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
251 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
252 #endif
253 
254 /*
255  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
256  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
257  */
258 DECLARE_EVENT_CLASS_SCHEDSTAT(
259     sched_stat_template,
260 
261     TP_PROTO(struct task_struct *tsk, u64 delay),
262 
263     TP_ARGS(__perf_task(tsk), __perf_count(delay)),
264 
265     TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(u64, delay)),
266 
267     TP_fast_assign(memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->delay = delay;),
268 
269     TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay));
270 
271 /*
272  * Tracepoint for accounting wait time (time the task is runnable
273  * but not actually running due to scheduler contention).
274  */
275 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait, TP_PROTO(struct task_struct *tsk, u64 delay),
276                        TP_ARGS(tsk, delay));
277 
278 /*
279  * Tracepoint for accounting sleep time (time the task is not runnable,
280  * including iowait, see below).
281  */
282 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep, TP_PROTO(struct task_struct *tsk, u64 delay),
283                        TP_ARGS(tsk, delay));
284 
285 /*
286  * Tracepoint for accounting iowait time (time the task is not runnable
287  * due to waiting on IO to complete).
288  */
289 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay),
290                        TP_ARGS(tsk, delay));
291 
292 /*
293  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
294  */
295 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, TP_PROTO(struct task_struct *tsk, u64 delay),
296                        TP_ARGS(tsk, delay));
297 
298 /*
299  * Tracepoint for accounting runtime (time the task is executing
300  * on a CPU).
301  */
302 DECLARE_EVENT_CLASS(sched_stat_runtime,
303 
304                     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
305 
306                     TP_ARGS(tsk, __perf_count(runtime), vruntime),
307 
308                     TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(u64, runtime)
309                                          __field(u64, vruntime)),
310 
311                     TP_fast_assign(memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid;
312                                    __entry->runtime = runtime; __entry->vruntime = vruntime;),
313 
314                     TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", __entry->comm, __entry->pid,
315                               (unsigned long long)__entry->runtime, (unsigned long long)__entry->vruntime));
316 
317 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
318              TP_ARGS(tsk, runtime, vruntime));
319 
320 /*
321  * Tracepoint for showing priority inheritance modifying a tasks
322  * priority.
323  */
324 TRACE_EVENT(sched_pi_setprio,
325 
326             TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
327 
328             TP_ARGS(tsk, pi_task),
329 
330             TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid) __field(int, oldprio)
331                                  __field(int, newprio)),
332 
333             TP_fast_assign(memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid;
334                            __entry->oldprio = tsk->prio;
335                            __entry->newprio = pi_task ? min(tsk->normal_prio, pi_task->prio) : tsk->normal_prio;
336                            /* XXX SCHED_DEADLINE bits missing */
337                            ),
338 
339             TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", __entry->comm, __entry->pid, __entry->oldprio,
340                       __entry->newprio));
341 
342 #ifdef CONFIG_DETECT_HUNG_TASK
343 TRACE_EVENT(sched_process_hang, TP_PROTO(struct task_struct *tsk), TP_ARGS(tsk),
344 
345             TP_STRUCT__entry(__array(char, comm, TASK_COMM_LEN) __field(pid_t, pid)),
346 
347             TP_fast_assign(memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid;),
348 
349             TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid));
350 #endif /* CONFIG_DETECT_HUNG_TASK */
351 
352 /*
353  * Tracks migration of tasks from one runqueue to another. Can be used to
354  * detect if automatic NUMA balancing is bouncing between nodes.
355  */
356 TRACE_EVENT(sched_move_numa,
357 
358             TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
359 
360             TP_ARGS(tsk, src_cpu, dst_cpu),
361 
362             TP_STRUCT__entry(__field(pid_t, pid) __field(pid_t, tgid) __field(pid_t, ngid) __field(int, src_cpu)
363                                  __field(int, src_nid) __field(int, dst_cpu) __field(int, dst_nid)),
364 
365             TP_fast_assign(__entry->pid = task_pid_nr(tsk); __entry->tgid = task_tgid_nr(tsk);
366                            __entry->ngid = task_numa_group_id(tsk); __entry->src_cpu = src_cpu;
367                            __entry->src_nid = cpu_to_node(src_cpu); __entry->dst_cpu = dst_cpu;
368                            __entry->dst_nid = cpu_to_node(dst_cpu);),
369 
370             TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", __entry->pid, __entry->tgid,
371                       __entry->ngid, __entry->src_cpu, __entry->src_nid, __entry->dst_cpu, __entry->dst_nid));
372 
373 DECLARE_EVENT_CLASS(
374     sched_numa_pair_template,
375 
376     TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu),
377 
378     TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
379 
380     TP_STRUCT__entry(__field(pid_t, src_pid) __field(pid_t, src_tgid) __field(pid_t, src_ngid) __field(int, src_cpu)
381                          __field(int, src_nid) __field(pid_t, dst_pid) __field(pid_t, dst_tgid) __field(pid_t, dst_ngid)
382                             __field(int, dst_cpu) __field(int, dst_nid)),
383 
384     TP_fast_assign(__entry->src_pid = task_pid_nr(src_tsk); __entry->src_tgid = task_tgid_nr(src_tsk);
385                    __entry->src_ngid = task_numa_group_id(src_tsk); __entry->src_cpu = src_cpu;
386                    __entry->src_nid = cpu_to_node(src_cpu); __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
387                    __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
388                    __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0; __entry->dst_cpu = dst_cpu;
389                    __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;),
390 
391     TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d "
392               "dst_nid=%d",
393               __entry->src_pid, __entry->src_tgid, __entry->src_ngid, __entry->src_cpu, __entry->src_nid,
394               __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, __entry->dst_cpu, __entry->dst_nid));
395 
396 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
397 
398              TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu),
399 
400              TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu));
401 
402 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
403 
404              TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu),
405 
406              TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu));
407 
408 /*
409  * Tracepoint for waking a polling cpu without an IPI.
410  */
411 TRACE_EVENT(sched_wake_idle_without_ipi,
412 
413             TP_PROTO(int cpu),
414 
415             TP_ARGS(cpu),
416 
417             TP_STRUCT__entry(__field(int, cpu)),
418 
419             TP_fast_assign(__entry->cpu = cpu;),
420 
421             TP_printk("cpu=%d", __entry->cpu));
422 
423 #ifdef CONFIG_SCHED_CORE_CTRL
424 TRACE_EVENT(core_ctl_eval_need,
425 
426             TP_PROTO(unsigned int cpu, unsigned int old_need, unsigned int new_need, unsigned int updated),
427             TP_ARGS(cpu, old_need, new_need, updated),
428             TP_STRUCT__entry(__field(u32, cpu) __field(u32, old_need) __field(u32, new_need) __field(u32, updated)),
429             TP_fast_assign(__entry->cpu = cpu; __entry->old_need = old_need; __entry->new_need = new_need;
430                            __entry->updated = updated;),
431             TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu, __entry->old_need,
432                       __entry->new_need, __entry->updated));
433 
434 TRACE_EVENT(core_ctl_set_busy,
435 
436             TP_PROTO(unsigned int cpu, unsigned int busy, unsigned int old_is_busy, unsigned int is_busy,
437                      int high_irqload),
438             TP_ARGS(cpu, busy, old_is_busy, is_busy, high_irqload),
439             TP_STRUCT__entry(__field(u32, cpu) __field(u32, busy) __field(u32, old_is_busy) __field(u32, is_busy)
440                                  __field(bool, high_irqload)),
441             TP_fast_assign(__entry->cpu = cpu; __entry->busy = busy; __entry->old_is_busy = old_is_busy;
442                            __entry->is_busy = is_busy; __entry->high_irqload = high_irqload;),
443             TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d", __entry->cpu, __entry->busy,
444                       __entry->old_is_busy, __entry->is_busy, __entry->high_irqload));
445 
446 TRACE_EVENT(core_ctl_set_boost,
447 
448             TP_PROTO(u32 refcount, s32 ret), TP_ARGS(refcount, ret),
449             TP_STRUCT__entry(__field(u32, refcount) __field(s32, ret)),
450             TP_fast_assign(__entry->refcount = refcount; __entry->ret = ret;),
451             TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret));
452 
453 TRACE_EVENT(core_ctl_update_nr_need,
454 
455             TP_PROTO(int cpu, int nr_need, int prev_misfit_need, int nrrun, int max_nr, int nr_prev_assist),
456 
457             TP_ARGS(cpu, nr_need, prev_misfit_need, nrrun, max_nr, nr_prev_assist),
458 
459             TP_STRUCT__entry(__field(int, cpu) __field(int, nr_need) __field(int, prev_misfit_need) __field(int, nrrun)
460                                  __field(int, max_nr) __field(int, nr_prev_assist)),
461 
462             TP_fast_assign(__entry->cpu = cpu; __entry->nr_need = nr_need; __entry->prev_misfit_need = prev_misfit_need;
463                            __entry->nrrun = nrrun; __entry->max_nr = max_nr; __entry->nr_prev_assist = nr_prev_assist;),
464 
465             TP_printk("cpu=%d nr_need=%d prev_misfit_need=%d nrrun=%d max_nr=%d nr_prev_assist=%d", __entry->cpu,
466                       __entry->nr_need, __entry->prev_misfit_need, __entry->nrrun, __entry->max_nr,
467                       __entry->nr_prev_assist));
468 #endif
469 
470 #ifdef CONFIG_SCHED_RUNNING_AVG
471 /*
472  * Tracepoint for sched_get_nr_running_avg
473  */
474 TRACE_EVENT(sched_get_nr_running_avg,
475 
476             TP_PROTO(int cpu, int nr, int nr_misfit, int nr_max),
477 
478             TP_ARGS(cpu, nr, nr_misfit, nr_max),
479 
480             TP_STRUCT__entry(__field(int, cpu) __field(int, nr) __field(int, nr_misfit) __field(int, nr_max)),
481 
482             TP_fast_assign(__entry->cpu = cpu; __entry->nr = nr; __entry->nr_misfit = nr_misfit;
483                            __entry->nr_max = nr_max;),
484 
485             TP_printk("cpu=%d nr=%d nr_misfit=%d nr_max=%d", __entry->cpu, __entry->nr, __entry->nr_misfit,
486                       __entry->nr_max));
487 #endif
488 
489 #ifdef CONFIG_CPU_ISOLATION_OPT
490 /*
491  * sched_isolate - called when cores are isolated/unisolated
492  *
493  * @acutal_mask: mask of cores actually isolated/unisolated
494  * @req_mask: mask of cores requested isolated/unisolated
495  * @online_mask: cpu online mask
496  * @time: amount of time in us it took to isolate/unisolate
497  * @isolate: 1 if isolating, 0 if unisolating
498  *
499  */
500 TRACE_EVENT(sched_isolate,
501 
502             TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus, u64 start_time, unsigned char isolate),
503 
504             TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
505 
506             TP_STRUCT__entry(__field(u32, requested_cpu) __field(u32, isolated_cpus) __field(u32, time)
507                                  __field(unsigned char, isolate)),
508 
509             TP_fast_assign(__entry->requested_cpu = requested_cpu; __entry->isolated_cpus = isolated_cpus;
510                            __entry->time = div64_u64(sched_clock() - start_time, 1000); __entry->isolate = isolate;),
511 
512             TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d", __entry->requested_cpu, __entry->isolated_cpus,
513                       __entry->time, __entry->isolate));
514 #endif
515 
516 /*
517  * Following tracepoints are not exported in tracefs and provide hooking
518  * mechanisms only for testing and debugging purposes.
519  *
520  * Postfixed with _tp to make them easily identifiable in the code.
521  */
522 DECLARE_TRACE(pelt_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq));
523 
524 DECLARE_TRACE(pelt_rt_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq));
525 
526 DECLARE_TRACE(pelt_dl_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq));
527 
528 DECLARE_TRACE(pelt_thermal_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq));
529 
530 DECLARE_TRACE(pelt_irq_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq));
531 
532 DECLARE_TRACE(pelt_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se));
533 
534 DECLARE_TRACE(sched_cpu_capacity_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq));
535 
536 DECLARE_TRACE(sched_overutilized_tp, TP_PROTO(struct root_domain *rd, bool overutilized), TP_ARGS(rd, overutilized));
537 
538 DECLARE_TRACE(sched_util_est_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq));
539 
540 DECLARE_TRACE(sched_util_est_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se));
541 
542 DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change));
543 
544 #endif /* _TRACE_SCHED_H */
545 
546 /* This part must be outside protection */
547 #include <trace/define_trace.h>
548