1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM walt
4
5 #if !defined(_TRACE_WALT_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_WALT_H
7
8 #include <linux/trace_seq.h>
9 #include <linux/tracepoint.h>
10
11 struct rq;
12 extern const char *task_event_names[];
13
14 #if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT)
__window_data(u32 * dst,u32 * src)15 static inline void __window_data(u32 *dst, u32 *src)
16 {
17 if (src)
18 memcpy(dst, src, nr_cpu_ids * sizeof(u32));
19 else
20 memset(dst, 0, nr_cpu_ids * sizeof(u32));
21 }
22
23 struct trace_seq;
__window_print(struct trace_seq * p,const u32 * buf,int buf_len)24 const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
25 {
26 int i;
27 const char *ret = p->buffer + seq_buf_used(&p->seq);
28
29 for (i = 0; i < buf_len; i++)
30 trace_seq_printf(p, "%u ", buf[i]);
31
32 trace_seq_putc(p, 0);
33
34 return ret;
35 }
36
__rq_update_sum(struct rq * rq,bool curr,bool new)37 static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
38 {
39 if (curr)
40 if (new)
41 return rq->nt_curr_runnable_sum;
42 else
43 return rq->curr_runnable_sum;
44 else
45 if (new)
46 return rq->nt_prev_runnable_sum;
47 else
48 return rq->prev_runnable_sum;
49 }
50
51 #ifdef CONFIG_SCHED_RTG
__grp_update_sum(struct rq * rq,bool curr,bool new)52 static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
53 {
54 if (curr)
55 if (new)
56 return rq->grp_time.nt_curr_runnable_sum;
57 else
58 return rq->grp_time.curr_runnable_sum;
59 else
60 if (new)
61 return rq->grp_time.nt_prev_runnable_sum;
62 else
63 return rq->grp_time.prev_runnable_sum;
64 }
65
66 static inline s64
__get_update_sum(struct rq * rq,enum migrate_types migrate_type,bool src,bool new,bool curr)67 __get_update_sum(struct rq *rq, enum migrate_types migrate_type,
68 bool src, bool new, bool curr)
69 {
70 switch (migrate_type) {
71 case RQ_TO_GROUP:
72 if (src)
73 return __rq_update_sum(rq, curr, new);
74 else
75 return __grp_update_sum(rq, curr, new);
76 case GROUP_TO_RQ:
77 if (src)
78 return __grp_update_sum(rq, curr, new);
79 else
80 return __rq_update_sum(rq, curr, new);
81 default:
82 WARN_ON_ONCE(1);
83 return -1;
84 }
85 }
86 #endif
87 #endif
88
89 TRACE_EVENT(sched_update_history,
90
91 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
92 enum task_event evt),
93
94 TP_ARGS(rq, p, runtime, samples, evt),
95
96 TP_STRUCT__entry(
97 __array(char, comm, TASK_COMM_LEN)
98 __field(pid_t, pid)
99 __field(unsigned int, runtime)
100 __field(int, samples)
101 __field(enum task_event, evt)
102 __field(unsigned int, demand)
103 __array(u32, hist, RAVG_HIST_SIZE_MAX)
104 __field(int, cpu)
105 ),
106
107 TP_fast_assign(
108 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
109 __entry->pid = p->pid;
110 __entry->runtime = runtime;
111 __entry->samples = samples;
112 __entry->evt = evt;
113 __entry->demand = p->ravg.demand;
114 memcpy(__entry->hist, p->ravg.sum_history,
115 RAVG_HIST_SIZE_MAX * sizeof(u32));
116 __entry->cpu = rq->cpu;
117 ),
118
119 TP_printk("%d (%s): runtime %u samples %d event %s demand %u (hist: %u %u %u %u %u) cpu %d",
120 __entry->pid, __entry->comm,
121 __entry->runtime, __entry->samples,
122 task_event_names[__entry->evt], __entry->demand,
123 __entry->hist[0], __entry->hist[1],
124 __entry->hist[2], __entry->hist[3],
125 __entry->hist[4], __entry->cpu)
126 );
127
128 TRACE_EVENT(sched_update_task_ravg,
129
130 TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
131 u64 wallclock, u64 irqtime),
132
133 TP_ARGS(p, rq, evt, wallclock, irqtime),
134
135 TP_STRUCT__entry(
136 __array(char, comm, TASK_COMM_LEN)
137 __field(pid_t, pid)
138 __field(pid_t, cur_pid)
139 __field(unsigned int, cur_freq)
140 __field(u64, wallclock)
141 __field(u64, mark_start)
142 __field(u64, delta_m)
143 __field(u64, win_start)
144 __field(u64, delta)
145 __field(u64, irqtime)
146 __field(enum task_event, evt)
147 __field(unsigned int, demand)
148 __field(unsigned int, sum)
149 __field(int, cpu)
150 __field(u64, rq_cs)
151 __field(u64, rq_ps)
152 __field(u32, curr_window)
153 __field(u32, prev_window)
154 __dynamic_array(u32, curr_sum, nr_cpu_ids)
155 __dynamic_array(u32, prev_sum, nr_cpu_ids)
156 __field(u64, nt_cs)
157 __field(u64, nt_ps)
158 __field(u32, active_windows)
159 ),
160
161 TP_fast_assign(
162 __entry->wallclock = wallclock;
163 __entry->win_start = rq->window_start;
164 __entry->delta = (wallclock - rq->window_start);
165 __entry->evt = evt;
166 __entry->cpu = rq->cpu;
167 __entry->cur_pid = rq->curr->pid;
168 __entry->cur_freq = rq->cluster->cur_freq;
169 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
170 __entry->pid = p->pid;
171 __entry->mark_start = p->ravg.mark_start;
172 __entry->delta_m = (wallclock - p->ravg.mark_start);
173 __entry->demand = p->ravg.demand;
174 __entry->sum = p->ravg.sum;
175 __entry->irqtime = irqtime;
176 __entry->rq_cs = rq->curr_runnable_sum;
177 __entry->rq_ps = rq->prev_runnable_sum;
178 __entry->curr_window = p->ravg.curr_window;
179 __entry->prev_window = p->ravg.prev_window;
180 __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu);
181 __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu);
182 __entry->nt_cs = rq->nt_curr_runnable_sum;
183 __entry->nt_ps = rq->nt_prev_runnable_sum;
184 __entry->active_windows = p->ravg.active_windows;
185 ),
186
187 TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u",
188 __entry->wallclock, __entry->win_start, __entry->delta,
189 task_event_names[__entry->evt], __entry->cpu,
190 __entry->cur_freq, __entry->cur_pid,
191 __entry->pid, __entry->comm, __entry->mark_start,
192 __entry->delta_m, __entry->demand,
193 __entry->sum, __entry->irqtime,
194 __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
195 __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
196 __entry->prev_window,
197 __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
198 __entry->nt_cs, __entry->nt_ps,
199 __entry->active_windows)
200 );
201
202 extern const char *migrate_type_names[];
203
204 #ifdef CONFIG_SCHED_RTG
205 TRACE_EVENT(sched_migration_update_sum,
206
207 TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
208
209 TP_ARGS(p, migrate_type, rq),
210
211 TP_STRUCT__entry(
212 __field(int, tcpu)
213 __field(int, pid)
214 __field(enum migrate_types, migrate_type)
215 __field(s64, src_cs)
216 __field(s64, src_ps)
217 __field(s64, dst_cs)
218 __field(s64, dst_ps)
219 __field(s64, src_nt_cs)
220 __field(s64, src_nt_ps)
221 __field(s64, dst_nt_cs)
222 __field(s64, dst_nt_ps)
223 ),
224
225 TP_fast_assign(
226 __entry->tcpu = task_cpu(p);
227 __entry->pid = p->pid;
228 __entry->migrate_type = migrate_type;
229 __entry->src_cs = __get_update_sum(rq, migrate_type,
230 true, false, true);
231 __entry->src_ps = __get_update_sum(rq, migrate_type,
232 true, false, false);
233 __entry->dst_cs = __get_update_sum(rq, migrate_type,
234 false, false, true);
235 __entry->dst_ps = __get_update_sum(rq, migrate_type,
236 false, false, false);
237 __entry->src_nt_cs = __get_update_sum(rq, migrate_type,
238 true, true, true);
239 __entry->src_nt_ps = __get_update_sum(rq, migrate_type,
240 true, true, false);
241 __entry->dst_nt_cs = __get_update_sum(rq, migrate_type,
242 false, true, true);
243 __entry->dst_nt_ps = __get_update_sum(rq, migrate_type,
244 false, true, false);
245 ),
246
247 TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
248 __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
249 __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
250 __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
251 );
252 #endif
253 #endif /* _TRACE_WALT_H */
254
255 /* This part must be outside protection */
256 #include <trace/define_trace.h>
257