1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include "sched.h"
10
11 /*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15 #define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23 /*
24 * Ease the printing of nsec fields:
25 */
nsec_high(unsigned long long nsec)26 static long long nsec_high(unsigned long long nsec)
27 {
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36 }
37
nsec_low(unsigned long long nsec)38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44 }
45
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48 #define SCHED_FEAT(name, enabled) \
49 #name ,
50
51 const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54
55 EXPORT_SYMBOL_GPL(sched_feat_names);
56 #undef SCHED_FEAT
57
sched_feat_show(struct seq_file * m,void * v)58 static int sched_feat_show(struct seq_file *m, void *v)
59 {
60 int i;
61
62 for (i = 0; i < __SCHED_FEAT_NR; i++) {
63 if (!(sysctl_sched_features & (1UL << i)))
64 seq_puts(m, "NO_");
65 seq_printf(m, "%s ", sched_feat_names[i]);
66 }
67 seq_puts(m, "\n");
68
69 return 0;
70 }
71
72 #ifdef CONFIG_JUMP_LABEL
73
74 #define jump_label_key__true STATIC_KEY_INIT_TRUE
75 #define jump_label_key__false STATIC_KEY_INIT_FALSE
76
77 #define SCHED_FEAT(name, enabled) \
78 jump_label_key__##enabled ,
79
80 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
81 #include "features.h"
82 };
83 EXPORT_SYMBOL_GPL(sched_feat_keys);
84
85 #undef SCHED_FEAT
86
sched_feat_disable(int i)87 static void sched_feat_disable(int i)
88 {
89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
sched_feat_enable(int i)92 static void sched_feat_enable(int i)
93 {
94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else
sched_feat_disable(int i)97 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
100
sched_feat_set(char * cmp)101 static int sched_feat_set(char *cmp)
102 {
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
121 }
122
123 return 0;
124 }
125
126 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129 {
130 char buf[64];
131 char *cmp;
132 int ret;
133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
146 cpus_read_lock();
147 inode_lock(inode);
148 ret = sched_feat_set(cmp);
149 inode_unlock(inode);
150 cpus_read_unlock();
151 if (ret < 0)
152 return ret;
153
154 *ppos += cnt;
155
156 return cnt;
157 }
158
sched_feat_open(struct inode * inode,struct file * filp)159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161 return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170 };
171
172 __read_mostly bool sched_debug_enabled;
173
sched_init_debug(void)174 static __init int sched_init_debug(void)
175 {
176 debugfs_create_file("sched_features", 0644, NULL, NULL,
177 &sched_feat_fops);
178
179 debugfs_create_bool("sched_debug", 0644, NULL,
180 &sched_debug_enabled);
181
182 return 0;
183 }
184 late_initcall(sched_init_debug);
185
186 #ifdef CONFIG_SMP
187
188 #ifdef CONFIG_SYSCTL
189
190 static struct ctl_table sd_ctl_dir[] = {
191 {
192 .procname = "sched_domain",
193 .mode = 0555,
194 },
195 {}
196 };
197
198 static struct ctl_table sd_ctl_root[] = {
199 {
200 .procname = "kernel",
201 .mode = 0555,
202 .child = sd_ctl_dir,
203 },
204 {}
205 };
206
sd_alloc_ctl_entry(int n)207 static struct ctl_table *sd_alloc_ctl_entry(int n)
208 {
209 struct ctl_table *entry =
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212 return entry;
213 }
214
sd_free_ctl_entry(struct ctl_table ** tablep)215 static void sd_free_ctl_entry(struct ctl_table **tablep)
216 {
217 struct ctl_table *entry;
218
219 /*
220 * In the intermediate directories, both the child directory and
221 * procname are dynamically allocated and could fail but the mode
222 * will always be set. In the lowest directory the names are
223 * static strings and all have proc handlers.
224 */
225 for (entry = *tablep; entry->mode; entry++) {
226 if (entry->child)
227 sd_free_ctl_entry(&entry->child);
228 if (entry->proc_handler == NULL)
229 kfree(entry->procname);
230 }
231
232 kfree(*tablep);
233 *tablep = NULL;
234 }
235
236 static void
set_table_entry(struct ctl_table * entry,const char * procname,void * data,int maxlen,umode_t mode,proc_handler * proc_handler)237 set_table_entry(struct ctl_table *entry,
238 const char *procname, void *data, int maxlen,
239 umode_t mode, proc_handler *proc_handler)
240 {
241 entry->procname = procname;
242 entry->data = data;
243 entry->maxlen = maxlen;
244 entry->mode = mode;
245 entry->proc_handler = proc_handler;
246 }
247
sd_ctl_doflags(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)248 static int sd_ctl_doflags(struct ctl_table *table, int write,
249 void *buffer, size_t *lenp, loff_t *ppos)
250 {
251 unsigned long flags = *(unsigned long *)table->data;
252 size_t data_size = 0;
253 size_t len = 0;
254 char *tmp, *buf;
255 int idx;
256
257 if (write)
258 return 0;
259
260 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
261 char *name = sd_flag_debug[idx].name;
262
263 /* Name plus whitespace */
264 data_size += strlen(name) + 1;
265 }
266
267 if (*ppos > data_size) {
268 *lenp = 0;
269 return 0;
270 }
271
272 buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL);
273 if (!buf)
274 return -ENOMEM;
275
276 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
277 char *name = sd_flag_debug[idx].name;
278
279 len += snprintf(buf + len, strlen(name) + 2, "%s ", name);
280 }
281
282 tmp = buf + *ppos;
283 len -= *ppos;
284
285 if (len > *lenp)
286 len = *lenp;
287 if (len)
288 memcpy(buffer, tmp, len);
289 if (len < *lenp) {
290 ((char *)buffer)[len] = '\n';
291 len++;
292 }
293
294 *lenp = len;
295 *ppos += len;
296
297 kfree(buf);
298
299 return 0;
300 }
301
302 static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain * sd)303 sd_alloc_ctl_domain_table(struct sched_domain *sd)
304 {
305 struct ctl_table *table = sd_alloc_ctl_entry(9);
306
307 if (table == NULL)
308 return NULL;
309
310 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
311 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
312 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
313 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
314 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
315 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags);
316 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
317 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
318 /* &table[8] is terminator */
319
320 return table;
321 }
322
sd_alloc_ctl_cpu_table(int cpu)323 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
324 {
325 struct ctl_table *entry, *table;
326 struct sched_domain *sd;
327 int domain_num = 0, i;
328 char buf[32];
329
330 for_each_domain(cpu, sd)
331 domain_num++;
332 entry = table = sd_alloc_ctl_entry(domain_num + 1);
333 if (table == NULL)
334 return NULL;
335
336 i = 0;
337 for_each_domain(cpu, sd) {
338 snprintf(buf, 32, "domain%d", i);
339 entry->procname = kstrdup(buf, GFP_KERNEL);
340 entry->mode = 0555;
341 entry->child = sd_alloc_ctl_domain_table(sd);
342 entry++;
343 i++;
344 }
345 return table;
346 }
347
348 static cpumask_var_t sd_sysctl_cpus;
349 static struct ctl_table_header *sd_sysctl_header;
350
register_sched_domain_sysctl(void)351 void register_sched_domain_sysctl(void)
352 {
353 static struct ctl_table *cpu_entries;
354 static struct ctl_table **cpu_idx;
355 static bool init_done = false;
356 char buf[32];
357 int i;
358
359 if (!cpu_entries) {
360 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
361 if (!cpu_entries)
362 return;
363
364 WARN_ON(sd_ctl_dir[0].child);
365 sd_ctl_dir[0].child = cpu_entries;
366 }
367
368 if (!cpu_idx) {
369 struct ctl_table *e = cpu_entries;
370
371 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
372 if (!cpu_idx)
373 return;
374
375 /* deal with sparse possible map */
376 for_each_possible_cpu(i) {
377 cpu_idx[i] = e;
378 e++;
379 }
380 }
381
382 if (!cpumask_available(sd_sysctl_cpus)) {
383 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
384 return;
385 }
386
387 if (!init_done) {
388 init_done = true;
389 /* init to possible to not have holes in @cpu_entries */
390 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
391 }
392
393 for_each_cpu(i, sd_sysctl_cpus) {
394 struct ctl_table *e = cpu_idx[i];
395
396 if (e->child)
397 sd_free_ctl_entry(&e->child);
398
399 if (!e->procname) {
400 snprintf(buf, 32, "cpu%d", i);
401 e->procname = kstrdup(buf, GFP_KERNEL);
402 }
403 e->mode = 0555;
404 e->child = sd_alloc_ctl_cpu_table(i);
405
406 __cpumask_clear_cpu(i, sd_sysctl_cpus);
407 }
408
409 WARN_ON(sd_sysctl_header);
410 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
411 }
412
dirty_sched_domain_sysctl(int cpu)413 void dirty_sched_domain_sysctl(int cpu)
414 {
415 if (cpumask_available(sd_sysctl_cpus))
416 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
417 }
418
419 /* may be called multiple times per register */
unregister_sched_domain_sysctl(void)420 void unregister_sched_domain_sysctl(void)
421 {
422 unregister_sysctl_table(sd_sysctl_header);
423 sd_sysctl_header = NULL;
424 }
425 #endif /* CONFIG_SYSCTL */
426 #endif /* CONFIG_SMP */
427
428 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)429 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
430 {
431 struct sched_entity *se = tg->se[cpu];
432
433 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
434 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
435 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
436 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
437
438 if (!se)
439 return;
440
441 PN(se->exec_start);
442 PN(se->vruntime);
443 PN(se->sum_exec_runtime);
444
445 if (schedstat_enabled()) {
446 PN_SCHEDSTAT(se->statistics.wait_start);
447 PN_SCHEDSTAT(se->statistics.sleep_start);
448 PN_SCHEDSTAT(se->statistics.block_start);
449 PN_SCHEDSTAT(se->statistics.sleep_max);
450 PN_SCHEDSTAT(se->statistics.block_max);
451 PN_SCHEDSTAT(se->statistics.exec_max);
452 PN_SCHEDSTAT(se->statistics.slice_max);
453 PN_SCHEDSTAT(se->statistics.wait_max);
454 PN_SCHEDSTAT(se->statistics.wait_sum);
455 P_SCHEDSTAT(se->statistics.wait_count);
456 }
457
458 P(se->load.weight);
459 #ifdef CONFIG_SMP
460 P(se->avg.load_avg);
461 P(se->avg.util_avg);
462 P(se->avg.runnable_avg);
463 #endif
464
465 #undef PN_SCHEDSTAT
466 #undef PN
467 #undef P_SCHEDSTAT
468 #undef P
469 }
470 #endif
471
472 #ifdef CONFIG_CGROUP_SCHED
473 static DEFINE_SPINLOCK(sched_debug_lock);
474 static char group_path[PATH_MAX];
475
task_group_path(struct task_group * tg,char * path,int plen)476 static void task_group_path(struct task_group *tg, char *path, int plen)
477 {
478 if (autogroup_path(tg, path, plen))
479 return;
480
481 cgroup_path(tg->css.cgroup, path, plen);
482 }
483
484 /*
485 * Only 1 SEQ_printf_task_group_path() caller can use the full length
486 * group_path[] for cgroup path. Other simultaneous callers will have
487 * to use a shorter stack buffer. A "..." suffix is appended at the end
488 * of the stack buffer so that it will show up in case the output length
489 * matches the given buffer size to indicate possible path name truncation.
490 */
491 #define SEQ_printf_task_group_path(m, tg, fmt...) \
492 { \
493 if (spin_trylock(&sched_debug_lock)) { \
494 task_group_path(tg, group_path, sizeof(group_path)); \
495 SEQ_printf(m, fmt, group_path); \
496 spin_unlock(&sched_debug_lock); \
497 } else { \
498 char buf[128]; \
499 char *bufend = buf + sizeof(buf) - 3; \
500 task_group_path(tg, buf, bufend - buf); \
501 strcpy(bufend - 1, "..."); \
502 SEQ_printf(m, fmt, buf); \
503 } \
504 }
505 #endif
506
507 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)508 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
509 {
510 if (rq->curr == p)
511 SEQ_printf(m, ">R");
512 else
513 SEQ_printf(m, " %c", task_state_to_char(p));
514
515 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
516 p->comm, task_pid_nr(p),
517 SPLIT_NS(p->se.vruntime),
518 (long long)(p->nvcsw + p->nivcsw),
519 p->prio);
520
521 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
522 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
523 SPLIT_NS(p->se.sum_exec_runtime),
524 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
525
526 #ifdef CONFIG_NUMA_BALANCING
527 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
528 #endif
529 #ifdef CONFIG_CGROUP_SCHED
530 SEQ_printf_task_group_path(m, task_group(p), " %s")
531 #endif
532
533 SEQ_printf(m, "\n");
534 }
535
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)536 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
537 {
538 struct task_struct *g, *p;
539
540 SEQ_printf(m, "\n");
541 SEQ_printf(m, "runnable tasks:\n");
542 SEQ_printf(m, " S task PID tree-key switches prio"
543 " wait-time sum-exec sum-sleep\n");
544 SEQ_printf(m, "-------------------------------------------------------"
545 "------------------------------------------------------\n");
546
547 rcu_read_lock();
548 for_each_process_thread(g, p) {
549 if (task_cpu(p) != rq_cpu)
550 continue;
551
552 print_task(m, rq, p);
553 }
554 rcu_read_unlock();
555 }
556
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)557 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
558 {
559 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
560 spread, rq0_min_vruntime, spread0;
561 struct rq *rq = cpu_rq(cpu);
562 struct sched_entity *last;
563 unsigned long flags;
564
565 #ifdef CONFIG_FAIR_GROUP_SCHED
566 SEQ_printf(m, "\n");
567 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
568 #else
569 SEQ_printf(m, "\n");
570 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
571 #endif
572 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
573 SPLIT_NS(cfs_rq->exec_clock));
574
575 raw_spin_lock_irqsave(&rq->lock, flags);
576 if (rb_first_cached(&cfs_rq->tasks_timeline))
577 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
578 last = __pick_last_entity(cfs_rq);
579 if (last)
580 max_vruntime = last->vruntime;
581 min_vruntime = cfs_rq->min_vruntime;
582 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
583 raw_spin_unlock_irqrestore(&rq->lock, flags);
584 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
585 SPLIT_NS(MIN_vruntime));
586 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
587 SPLIT_NS(min_vruntime));
588 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
589 SPLIT_NS(max_vruntime));
590 spread = max_vruntime - MIN_vruntime;
591 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
592 SPLIT_NS(spread));
593 spread0 = min_vruntime - rq0_min_vruntime;
594 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
595 SPLIT_NS(spread0));
596 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
597 cfs_rq->nr_spread_over);
598 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
599 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
600 #ifdef CONFIG_SMP
601 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
602 cfs_rq->avg.load_avg);
603 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
604 cfs_rq->avg.runnable_avg);
605 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
606 cfs_rq->avg.util_avg);
607 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
608 cfs_rq->avg.util_est.enqueued);
609 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
610 cfs_rq->removed.load_avg);
611 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
612 cfs_rq->removed.util_avg);
613 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
614 cfs_rq->removed.runnable_avg);
615 #ifdef CONFIG_FAIR_GROUP_SCHED
616 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
617 cfs_rq->tg_load_avg_contrib);
618 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
619 atomic_long_read(&cfs_rq->tg->load_avg));
620 #endif
621 #endif
622 #ifdef CONFIG_CFS_BANDWIDTH
623 SEQ_printf(m, " .%-30s: %d\n", "throttled",
624 cfs_rq->throttled);
625 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
626 cfs_rq->throttle_count);
627 #endif
628
629 #ifdef CONFIG_FAIR_GROUP_SCHED
630 print_cfs_group_stats(m, cpu, cfs_rq->tg);
631 #endif
632 }
633
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)634 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
635 {
636 #ifdef CONFIG_RT_GROUP_SCHED
637 SEQ_printf(m, "\n");
638 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
639 #else
640 SEQ_printf(m, "\n");
641 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
642 #endif
643
644 #define P(x) \
645 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
646 #define PU(x) \
647 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
648 #define PN(x) \
649 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
650
651 PU(rt_nr_running);
652 #ifdef CONFIG_SMP
653 PU(rt_nr_migratory);
654 #endif
655 P(rt_throttled);
656 PN(rt_time);
657 PN(rt_runtime);
658
659 #undef PN
660 #undef PU
661 #undef P
662 }
663
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)664 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
665 {
666 struct dl_bw *dl_bw;
667
668 SEQ_printf(m, "\n");
669 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
670
671 #define PU(x) \
672 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
673
674 PU(dl_nr_running);
675 #ifdef CONFIG_SMP
676 PU(dl_nr_migratory);
677 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
678 #else
679 dl_bw = &dl_rq->dl_bw;
680 #endif
681 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
682 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
683
684 #undef PU
685 }
686
print_cpu(struct seq_file * m,int cpu)687 static void print_cpu(struct seq_file *m, int cpu)
688 {
689 struct rq *rq = cpu_rq(cpu);
690
691 #ifdef CONFIG_X86
692 {
693 unsigned int freq = cpu_khz ? : 1;
694
695 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
696 cpu, freq / 1000, (freq % 1000));
697 }
698 #else
699 SEQ_printf(m, "cpu#%d\n", cpu);
700 #endif
701
702 #define P(x) \
703 do { \
704 if (sizeof(rq->x) == 4) \
705 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
706 else \
707 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
708 } while (0)
709
710 #define PN(x) \
711 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
712
713 P(nr_running);
714 P(nr_switches);
715 P(nr_uninterruptible);
716 PN(next_balance);
717 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
718 PN(clock);
719 PN(clock_task);
720 #undef P
721 #undef PN
722
723 #ifdef CONFIG_SMP
724 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
725 P64(avg_idle);
726 P64(max_idle_balance_cost);
727 #undef P64
728 #endif
729
730 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
731 if (schedstat_enabled()) {
732 P(yld_count);
733 P(sched_count);
734 P(sched_goidle);
735 P(ttwu_count);
736 P(ttwu_local);
737 }
738 #undef P
739
740 print_cfs_stats(m, cpu);
741 print_rt_stats(m, cpu);
742 print_dl_stats(m, cpu);
743
744 print_rq(m, rq, cpu);
745 SEQ_printf(m, "\n");
746 }
747
748 static const char *sched_tunable_scaling_names[] = {
749 "none",
750 "logarithmic",
751 "linear"
752 };
753
sched_debug_header(struct seq_file * m)754 static void sched_debug_header(struct seq_file *m)
755 {
756 u64 ktime, sched_clk, cpu_clk;
757 unsigned long flags;
758
759 local_irq_save(flags);
760 ktime = ktime_to_ns(ktime_get());
761 sched_clk = sched_clock();
762 cpu_clk = local_clock();
763 local_irq_restore(flags);
764
765 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
766 init_utsname()->release,
767 (int)strcspn(init_utsname()->version, " "),
768 init_utsname()->version);
769
770 #define P(x) \
771 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
772 #define PN(x) \
773 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
774 PN(ktime);
775 PN(sched_clk);
776 PN(cpu_clk);
777 P(jiffies);
778 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
779 P(sched_clock_stable());
780 #endif
781 #undef PN
782 #undef P
783
784 SEQ_printf(m, "\n");
785 SEQ_printf(m, "sysctl_sched\n");
786
787 #define P(x) \
788 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
789 #define PN(x) \
790 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
791 PN(sysctl_sched_latency);
792 PN(sysctl_sched_min_granularity);
793 PN(sysctl_sched_wakeup_granularity);
794 P(sysctl_sched_child_runs_first);
795 P(sysctl_sched_features);
796 #undef PN
797 #undef P
798
799 SEQ_printf(m, " .%-40s: %d (%s)\n",
800 "sysctl_sched_tunable_scaling",
801 sysctl_sched_tunable_scaling,
802 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
803 SEQ_printf(m, "\n");
804 }
805
sched_debug_show(struct seq_file * m,void * v)806 static int sched_debug_show(struct seq_file *m, void *v)
807 {
808 int cpu = (unsigned long)(v - 2);
809
810 if (cpu != -1)
811 print_cpu(m, cpu);
812 else
813 sched_debug_header(m);
814
815 return 0;
816 }
817
sysrq_sched_debug_show(void)818 void sysrq_sched_debug_show(void)
819 {
820 int cpu;
821
822 sched_debug_header(NULL);
823 for_each_online_cpu(cpu) {
824 /*
825 * Need to reset softlockup watchdogs on all CPUs, because
826 * another CPU might be blocked waiting for us to process
827 * an IPI or stop_machine.
828 */
829 touch_nmi_watchdog();
830 touch_all_softlockup_watchdogs();
831 print_cpu(NULL, cpu);
832 }
833 }
834
835 /*
836 * This itererator needs some explanation.
837 * It returns 1 for the header position.
838 * This means 2 is CPU 0.
839 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
840 * to use cpumask_* to iterate over the CPUs.
841 */
sched_debug_start(struct seq_file * file,loff_t * offset)842 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
843 {
844 unsigned long n = *offset;
845
846 if (n == 0)
847 return (void *) 1;
848
849 n--;
850
851 if (n > 0)
852 n = cpumask_next(n - 1, cpu_online_mask);
853 else
854 n = cpumask_first(cpu_online_mask);
855
856 *offset = n + 1;
857
858 if (n < nr_cpu_ids)
859 return (void *)(unsigned long)(n + 2);
860
861 return NULL;
862 }
863
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)864 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
865 {
866 (*offset)++;
867 return sched_debug_start(file, offset);
868 }
869
sched_debug_stop(struct seq_file * file,void * data)870 static void sched_debug_stop(struct seq_file *file, void *data)
871 {
872 }
873
874 static const struct seq_operations sched_debug_sops = {
875 .start = sched_debug_start,
876 .next = sched_debug_next,
877 .stop = sched_debug_stop,
878 .show = sched_debug_show,
879 };
880
init_sched_debug_procfs(void)881 static int __init init_sched_debug_procfs(void)
882 {
883 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
884 return -ENOMEM;
885 return 0;
886 }
887
888 __initcall(init_sched_debug_procfs);
889
890 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
891 #define __P(F) __PS(#F, F)
892 #define P(F) __PS(#F, p->F)
893 #define PM(F, M) __PS(#F, p->F & (M))
894 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
895 #define __PN(F) __PSN(#F, F)
896 #define PN(F) __PSN(#F, p->F)
897
898
899 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)900 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
901 unsigned long tpf, unsigned long gsf, unsigned long gpf)
902 {
903 SEQ_printf(m, "numa_faults node=%d ", node);
904 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
905 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
906 }
907 #endif
908
909
sched_show_numa(struct task_struct * p,struct seq_file * m)910 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
911 {
912 #ifdef CONFIG_NUMA_BALANCING
913 if (p->mm)
914 P(mm->numa_scan_seq);
915
916 P(numa_pages_migrated);
917 P(numa_preferred_nid);
918 P(total_numa_faults);
919 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
920 task_node(p), task_numa_group_id(p));
921 show_numa_stats(p, m);
922 #endif
923 }
924
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)925 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
926 struct seq_file *m)
927 {
928 unsigned long nr_switches;
929
930 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
931 get_nr_threads(p));
932 SEQ_printf(m,
933 "---------------------------------------------------------"
934 "----------\n");
935
936 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
937 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
938
939 PN(se.exec_start);
940 PN(se.vruntime);
941 PN(se.sum_exec_runtime);
942
943 nr_switches = p->nvcsw + p->nivcsw;
944
945 P(se.nr_migrations);
946
947 if (schedstat_enabled()) {
948 u64 avg_atom, avg_per_cpu;
949
950 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
951 PN_SCHEDSTAT(se.statistics.wait_start);
952 PN_SCHEDSTAT(se.statistics.sleep_start);
953 PN_SCHEDSTAT(se.statistics.block_start);
954 PN_SCHEDSTAT(se.statistics.sleep_max);
955 PN_SCHEDSTAT(se.statistics.block_max);
956 PN_SCHEDSTAT(se.statistics.exec_max);
957 PN_SCHEDSTAT(se.statistics.slice_max);
958 PN_SCHEDSTAT(se.statistics.wait_max);
959 PN_SCHEDSTAT(se.statistics.wait_sum);
960 P_SCHEDSTAT(se.statistics.wait_count);
961 PN_SCHEDSTAT(se.statistics.iowait_sum);
962 P_SCHEDSTAT(se.statistics.iowait_count);
963 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
964 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
965 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
966 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
967 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
968 P_SCHEDSTAT(se.statistics.nr_wakeups);
969 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
970 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
971 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
972 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
973 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
974 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
975 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
976 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
977
978 avg_atom = p->se.sum_exec_runtime;
979 if (nr_switches)
980 avg_atom = div64_ul(avg_atom, nr_switches);
981 else
982 avg_atom = -1LL;
983
984 avg_per_cpu = p->se.sum_exec_runtime;
985 if (p->se.nr_migrations) {
986 avg_per_cpu = div64_u64(avg_per_cpu,
987 p->se.nr_migrations);
988 } else {
989 avg_per_cpu = -1LL;
990 }
991
992 __PN(avg_atom);
993 __PN(avg_per_cpu);
994 }
995
996 __P(nr_switches);
997 __PS("nr_voluntary_switches", p->nvcsw);
998 __PS("nr_involuntary_switches", p->nivcsw);
999
1000 P(se.load.weight);
1001 #ifdef CONFIG_SMP
1002 P(se.avg.load_sum);
1003 P(se.avg.runnable_sum);
1004 P(se.avg.util_sum);
1005 P(se.avg.load_avg);
1006 P(se.avg.runnable_avg);
1007 P(se.avg.util_avg);
1008 P(se.avg.last_update_time);
1009 P(se.avg.util_est.ewma);
1010 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1011 #endif
1012 #ifdef CONFIG_UCLAMP_TASK
1013 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1014 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1015 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1016 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1017 #endif
1018 P(policy);
1019 P(prio);
1020 if (task_has_dl_policy(p)) {
1021 P(dl.runtime);
1022 P(dl.deadline);
1023 }
1024 #undef PN_SCHEDSTAT
1025 #undef P_SCHEDSTAT
1026
1027 {
1028 unsigned int this_cpu = raw_smp_processor_id();
1029 u64 t0, t1;
1030
1031 t0 = cpu_clock(this_cpu);
1032 t1 = cpu_clock(this_cpu);
1033 __PS("clock-delta", t1-t0);
1034 }
1035
1036 sched_show_numa(p, m);
1037 }
1038
proc_sched_set_task(struct task_struct * p)1039 void proc_sched_set_task(struct task_struct *p)
1040 {
1041 #ifdef CONFIG_SCHEDSTATS
1042 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1043 #endif
1044 }
1045