1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include "sched.h"
10
11 /*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15 #define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23 /*
24 * Ease the printing of nsec fields:
25 */
nsec_high(unsigned long long nsec)26 static long long nsec_high(unsigned long long nsec)
27 {
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36 }
37
nsec_low(unsigned long long nsec)38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44 }
45
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48 #define SCHED_FEAT(name, enabled) \
49 #name ,
50
51 static const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54
55 #undef SCHED_FEAT
56
sched_feat_show(struct seq_file * m,void * v)57 static int sched_feat_show(struct seq_file *m, void *v)
58 {
59 int i;
60
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
63 seq_puts(m, "NO_");
64 seq_printf(m, "%s ", sched_feat_names[i]);
65 }
66 seq_puts(m, "\n");
67
68 return 0;
69 }
70
71 #ifdef CONFIG_JUMP_LABEL
72
73 #define jump_label_key__true STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
75
76 #define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
78
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80 #include "features.h"
81 };
82
83 #undef SCHED_FEAT
84
sched_feat_disable(int i)85 static void sched_feat_disable(int i)
86 {
87 static_key_disable_cpuslocked(&sched_feat_keys[i]);
88 }
89
sched_feat_enable(int i)90 static void sched_feat_enable(int i)
91 {
92 static_key_enable_cpuslocked(&sched_feat_keys[i]);
93 }
94 #else
sched_feat_disable(int i)95 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)96 static void sched_feat_enable(int i) { };
97 #endif /* CONFIG_JUMP_LABEL */
98
sched_feat_set(char * cmp)99 static int sched_feat_set(char *cmp)
100 {
101 int i;
102 int neg = 0;
103
104 if (strncmp(cmp, "NO_", 3) == 0) {
105 neg = 1;
106 cmp += 3;
107 }
108
109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 if (i < 0)
111 return i;
112
113 if (neg) {
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
116 } else {
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
119 }
120
121 return 0;
122 }
123
124 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)125 sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
127 {
128 char buf[64];
129 char *cmp;
130 int ret;
131 struct inode *inode;
132
133 if (cnt > 63)
134 cnt = 63;
135
136 if (copy_from_user(&buf, ubuf, cnt))
137 return -EFAULT;
138
139 buf[cnt] = 0;
140 cmp = strstrip(buf);
141
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
144 cpus_read_lock();
145 inode_lock(inode);
146 ret = sched_feat_set(cmp);
147 inode_unlock(inode);
148 cpus_read_unlock();
149 if (ret < 0)
150 return ret;
151
152 *ppos += cnt;
153
154 return cnt;
155 }
156
sched_feat_open(struct inode * inode,struct file * filp)157 static int sched_feat_open(struct inode *inode, struct file *filp)
158 {
159 return single_open(filp, sched_feat_show, NULL);
160 }
161
162 static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168 };
169
170 __read_mostly bool sched_debug_enabled;
171
sched_init_debug(void)172 static __init int sched_init_debug(void)
173 {
174 debugfs_create_file("sched_features", 0644, NULL, NULL,
175 &sched_feat_fops);
176
177 debugfs_create_bool("sched_debug", 0644, NULL,
178 &sched_debug_enabled);
179
180 return 0;
181 }
182 late_initcall(sched_init_debug);
183
184 #ifdef CONFIG_SMP
185
186 #ifdef CONFIG_SYSCTL
187
188 static struct ctl_table sd_ctl_dir[] = {
189 {
190 .procname = "sched_domain",
191 .mode = 0555,
192 },
193 {}
194 };
195
196 static struct ctl_table sd_ctl_root[] = {
197 {
198 .procname = "kernel",
199 .mode = 0555,
200 .child = sd_ctl_dir,
201 },
202 {}
203 };
204
sd_alloc_ctl_entry(int n)205 static struct ctl_table *sd_alloc_ctl_entry(int n)
206 {
207 struct ctl_table *entry =
208 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
209
210 return entry;
211 }
212
sd_free_ctl_entry(struct ctl_table ** tablep)213 static void sd_free_ctl_entry(struct ctl_table **tablep)
214 {
215 struct ctl_table *entry;
216
217 /*
218 * In the intermediate directories, both the child directory and
219 * procname are dynamically allocated and could fail but the mode
220 * will always be set. In the lowest directory the names are
221 * static strings and all have proc handlers.
222 */
223 for (entry = *tablep; entry->mode; entry++) {
224 if (entry->child)
225 sd_free_ctl_entry(&entry->child);
226 if (entry->proc_handler == NULL)
227 kfree(entry->procname);
228 }
229
230 kfree(*tablep);
231 *tablep = NULL;
232 }
233
234 static void
set_table_entry(struct ctl_table * entry,const char * procname,void * data,int maxlen,umode_t mode,proc_handler * proc_handler)235 set_table_entry(struct ctl_table *entry,
236 const char *procname, void *data, int maxlen,
237 umode_t mode, proc_handler *proc_handler)
238 {
239 entry->procname = procname;
240 entry->data = data;
241 entry->maxlen = maxlen;
242 entry->mode = mode;
243 entry->proc_handler = proc_handler;
244 }
245
sd_ctl_doflags(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)246 static int sd_ctl_doflags(struct ctl_table *table, int write,
247 void *buffer, size_t *lenp, loff_t *ppos)
248 {
249 unsigned long flags = *(unsigned long *)table->data;
250 size_t data_size = 0;
251 size_t len = 0;
252 char *tmp, *buf;
253 int idx;
254
255 if (write)
256 return 0;
257
258 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
259 char *name = sd_flag_debug[idx].name;
260
261 /* Name plus whitespace */
262 data_size += strlen(name) + 1;
263 }
264
265 if (*ppos > data_size) {
266 *lenp = 0;
267 return 0;
268 }
269
270 buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL);
271 if (!buf)
272 return -ENOMEM;
273
274 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
275 char *name = sd_flag_debug[idx].name;
276
277 len += snprintf(buf + len, strlen(name) + 2, "%s ", name);
278 }
279
280 tmp = buf + *ppos;
281 len -= *ppos;
282
283 if (len > *lenp)
284 len = *lenp;
285 if (len)
286 memcpy(buffer, tmp, len);
287 if (len < *lenp) {
288 ((char *)buffer)[len] = '\n';
289 len++;
290 }
291
292 *lenp = len;
293 *ppos += len;
294
295 kfree(buf);
296
297 return 0;
298 }
299
300 static struct ctl_table *
sd_alloc_ctl_domain_table(struct sched_domain * sd)301 sd_alloc_ctl_domain_table(struct sched_domain *sd)
302 {
303 struct ctl_table *table = sd_alloc_ctl_entry(9);
304
305 if (table == NULL)
306 return NULL;
307
308 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
309 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
310 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
311 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
312 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
313 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags);
314 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
315 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
316 /* &table[8] is terminator */
317
318 return table;
319 }
320
sd_alloc_ctl_cpu_table(int cpu)321 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
322 {
323 struct ctl_table *entry, *table;
324 struct sched_domain *sd;
325 int domain_num = 0, i;
326 char buf[32];
327
328 for_each_domain(cpu, sd)
329 domain_num++;
330 entry = table = sd_alloc_ctl_entry(domain_num + 1);
331 if (table == NULL)
332 return NULL;
333
334 i = 0;
335 for_each_domain(cpu, sd) {
336 snprintf(buf, 32, "domain%d", i);
337 entry->procname = kstrdup(buf, GFP_KERNEL);
338 entry->mode = 0555;
339 entry->child = sd_alloc_ctl_domain_table(sd);
340 entry++;
341 i++;
342 }
343 return table;
344 }
345
346 static cpumask_var_t sd_sysctl_cpus;
347 static struct ctl_table_header *sd_sysctl_header;
348
register_sched_domain_sysctl(void)349 void register_sched_domain_sysctl(void)
350 {
351 static struct ctl_table *cpu_entries;
352 static struct ctl_table **cpu_idx;
353 static bool init_done = false;
354 char buf[32];
355 int i;
356
357 if (!cpu_entries) {
358 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
359 if (!cpu_entries)
360 return;
361
362 WARN_ON(sd_ctl_dir[0].child);
363 sd_ctl_dir[0].child = cpu_entries;
364 }
365
366 if (!cpu_idx) {
367 struct ctl_table *e = cpu_entries;
368
369 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
370 if (!cpu_idx)
371 return;
372
373 /* deal with sparse possible map */
374 for_each_possible_cpu(i) {
375 cpu_idx[i] = e;
376 e++;
377 }
378 }
379
380 if (!cpumask_available(sd_sysctl_cpus)) {
381 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
382 return;
383 }
384
385 if (!init_done) {
386 init_done = true;
387 /* init to possible to not have holes in @cpu_entries */
388 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
389 }
390
391 for_each_cpu(i, sd_sysctl_cpus) {
392 struct ctl_table *e = cpu_idx[i];
393
394 if (e->child)
395 sd_free_ctl_entry(&e->child);
396
397 if (!e->procname) {
398 snprintf(buf, 32, "cpu%d", i);
399 e->procname = kstrdup(buf, GFP_KERNEL);
400 }
401 e->mode = 0555;
402 e->child = sd_alloc_ctl_cpu_table(i);
403
404 __cpumask_clear_cpu(i, sd_sysctl_cpus);
405 }
406
407 WARN_ON(sd_sysctl_header);
408 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
409 }
410
dirty_sched_domain_sysctl(int cpu)411 void dirty_sched_domain_sysctl(int cpu)
412 {
413 if (cpumask_available(sd_sysctl_cpus))
414 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
415 }
416
417 /* may be called multiple times per register */
unregister_sched_domain_sysctl(void)418 void unregister_sched_domain_sysctl(void)
419 {
420 unregister_sysctl_table(sd_sysctl_header);
421 sd_sysctl_header = NULL;
422 }
423 #endif /* CONFIG_SYSCTL */
424 #endif /* CONFIG_SMP */
425
426 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)427 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
428 {
429 struct sched_entity *se = tg->se[cpu];
430
431 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
432 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
433 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
434 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
435
436 if (!se)
437 return;
438
439 PN(se->exec_start);
440 PN(se->vruntime);
441 PN(se->sum_exec_runtime);
442
443 if (schedstat_enabled()) {
444 PN_SCHEDSTAT(se->statistics.wait_start);
445 PN_SCHEDSTAT(se->statistics.sleep_start);
446 PN_SCHEDSTAT(se->statistics.block_start);
447 PN_SCHEDSTAT(se->statistics.sleep_max);
448 PN_SCHEDSTAT(se->statistics.block_max);
449 PN_SCHEDSTAT(se->statistics.exec_max);
450 PN_SCHEDSTAT(se->statistics.slice_max);
451 PN_SCHEDSTAT(se->statistics.wait_max);
452 PN_SCHEDSTAT(se->statistics.wait_sum);
453 P_SCHEDSTAT(se->statistics.wait_count);
454 }
455
456 P(se->load.weight);
457 #ifdef CONFIG_SMP
458 P(se->avg.load_avg);
459 P(se->avg.util_avg);
460 P(se->avg.runnable_avg);
461 #endif
462
463 #undef PN_SCHEDSTAT
464 #undef PN
465 #undef P_SCHEDSTAT
466 #undef P
467 }
468 #endif
469
470 #ifdef CONFIG_CGROUP_SCHED
471 static DEFINE_SPINLOCK(sched_debug_lock);
472 static char group_path[PATH_MAX];
473
task_group_path(struct task_group * tg,char * path,int plen)474 static void task_group_path(struct task_group *tg, char *path, int plen)
475 {
476 if (autogroup_path(tg, path, plen))
477 return;
478
479 cgroup_path(tg->css.cgroup, path, plen);
480 }
481
482 /*
483 * Only 1 SEQ_printf_task_group_path() caller can use the full length
484 * group_path[] for cgroup path. Other simultaneous callers will have
485 * to use a shorter stack buffer. A "..." suffix is appended at the end
486 * of the stack buffer so that it will show up in case the output length
487 * matches the given buffer size to indicate possible path name truncation.
488 */
489 #define SEQ_printf_task_group_path(m, tg, fmt...) \
490 { \
491 if (spin_trylock(&sched_debug_lock)) { \
492 task_group_path(tg, group_path, sizeof(group_path)); \
493 SEQ_printf(m, fmt, group_path); \
494 spin_unlock(&sched_debug_lock); \
495 } else { \
496 char buf[128]; \
497 char *bufend = buf + sizeof(buf) - 3; \
498 task_group_path(tg, buf, bufend - buf); \
499 strcpy(bufend - 1, "..."); \
500 SEQ_printf(m, fmt, buf); \
501 } \
502 }
503 #endif
504
505 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)506 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
507 {
508 if (rq->curr == p)
509 SEQ_printf(m, ">R");
510 else
511 SEQ_printf(m, " %c", task_state_to_char(p));
512
513 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
514 p->comm, task_pid_nr(p),
515 SPLIT_NS(p->se.vruntime),
516 (long long)(p->nvcsw + p->nivcsw),
517 p->prio);
518
519 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
520 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
521 SPLIT_NS(p->se.sum_exec_runtime),
522 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
523
524 #ifdef CONFIG_NUMA_BALANCING
525 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
526 #endif
527 #ifdef CONFIG_CGROUP_SCHED
528 SEQ_printf_task_group_path(m, task_group(p), " %s")
529 #endif
530
531 SEQ_printf(m, "\n");
532 }
533
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)534 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
535 {
536 struct task_struct *g, *p;
537
538 SEQ_printf(m, "\n");
539 SEQ_printf(m, "runnable tasks:\n");
540 SEQ_printf(m, " S task PID tree-key switches prio"
541 " wait-time sum-exec sum-sleep\n");
542 SEQ_printf(m, "-------------------------------------------------------"
543 "------------------------------------------------------\n");
544
545 rcu_read_lock();
546 for_each_process_thread(g, p) {
547 if (task_cpu(p) != rq_cpu)
548 continue;
549
550 print_task(m, rq, p);
551 }
552 rcu_read_unlock();
553 }
554
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)555 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
556 {
557 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
558 spread, rq0_min_vruntime, spread0;
559 struct rq *rq = cpu_rq(cpu);
560 struct sched_entity *last;
561 unsigned long flags;
562
563 #ifdef CONFIG_FAIR_GROUP_SCHED
564 SEQ_printf(m, "\n");
565 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
566 #else
567 SEQ_printf(m, "\n");
568 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
569 #endif
570 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
571 SPLIT_NS(cfs_rq->exec_clock));
572
573 raw_spin_lock_irqsave(&rq->lock, flags);
574 if (rb_first_cached(&cfs_rq->tasks_timeline))
575 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
576 last = __pick_last_entity(cfs_rq);
577 if (last)
578 max_vruntime = last->vruntime;
579 min_vruntime = cfs_rq->min_vruntime;
580 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
581 raw_spin_unlock_irqrestore(&rq->lock, flags);
582 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
583 SPLIT_NS(MIN_vruntime));
584 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
585 SPLIT_NS(min_vruntime));
586 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
587 SPLIT_NS(max_vruntime));
588 spread = max_vruntime - MIN_vruntime;
589 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
590 SPLIT_NS(spread));
591 spread0 = min_vruntime - rq0_min_vruntime;
592 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
593 SPLIT_NS(spread0));
594 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
595 cfs_rq->nr_spread_over);
596 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
597 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
598 #ifdef CONFIG_SMP
599 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
600 cfs_rq->avg.load_avg);
601 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
602 cfs_rq->avg.runnable_avg);
603 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
604 cfs_rq->avg.util_avg);
605 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
606 cfs_rq->avg.util_est.enqueued);
607 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
608 cfs_rq->removed.load_avg);
609 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
610 cfs_rq->removed.util_avg);
611 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
612 cfs_rq->removed.runnable_avg);
613 #ifdef CONFIG_FAIR_GROUP_SCHED
614 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
615 cfs_rq->tg_load_avg_contrib);
616 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
617 atomic_long_read(&cfs_rq->tg->load_avg));
618 #endif
619 #endif
620 #ifdef CONFIG_CFS_BANDWIDTH
621 SEQ_printf(m, " .%-30s: %d\n", "throttled",
622 cfs_rq->throttled);
623 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
624 cfs_rq->throttle_count);
625 #endif
626
627 #ifdef CONFIG_FAIR_GROUP_SCHED
628 print_cfs_group_stats(m, cpu, cfs_rq->tg);
629 #endif
630 }
631
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)632 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
633 {
634 #ifdef CONFIG_RT_GROUP_SCHED
635 SEQ_printf(m, "\n");
636 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
637 #else
638 SEQ_printf(m, "\n");
639 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
640 #endif
641
642 #define P(x) \
643 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
644 #define PU(x) \
645 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
646 #define PN(x) \
647 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
648
649 PU(rt_nr_running);
650 #ifdef CONFIG_SMP
651 PU(rt_nr_migratory);
652 #endif
653 P(rt_throttled);
654 PN(rt_time);
655 PN(rt_runtime);
656
657 #undef PN
658 #undef PU
659 #undef P
660 }
661
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)662 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
663 {
664 struct dl_bw *dl_bw;
665
666 SEQ_printf(m, "\n");
667 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
668
669 #define PU(x) \
670 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
671
672 PU(dl_nr_running);
673 #ifdef CONFIG_SMP
674 PU(dl_nr_migratory);
675 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
676 #else
677 dl_bw = &dl_rq->dl_bw;
678 #endif
679 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
680 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
681
682 #undef PU
683 }
684
print_cpu(struct seq_file * m,int cpu)685 static void print_cpu(struct seq_file *m, int cpu)
686 {
687 struct rq *rq = cpu_rq(cpu);
688
689 #ifdef CONFIG_X86
690 {
691 unsigned int freq = cpu_khz ? : 1;
692
693 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
694 cpu, freq / 1000, (freq % 1000));
695 }
696 #else
697 SEQ_printf(m, "cpu#%d\n", cpu);
698 #endif
699
700 #define P(x) \
701 do { \
702 if (sizeof(rq->x) == 4) \
703 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
704 else \
705 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
706 } while (0)
707
708 #define PN(x) \
709 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
710
711 P(nr_running);
712 P(nr_switches);
713 P(nr_uninterruptible);
714 PN(next_balance);
715 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
716 PN(clock);
717 PN(clock_task);
718 #ifdef CONFIG_SCHED_WALT
719 P(cluster->load_scale_factor);
720 P(cluster->capacity);
721 P(cluster->max_possible_capacity);
722 P(cluster->efficiency);
723 P(cluster->cur_freq);
724 P(cluster->max_freq);
725 P(cluster->exec_scale_factor);
726 SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
727 rq->walt_stats.cumulative_runnable_avg_scaled);
728 #endif
729 #undef P
730 #undef PN
731
732 #ifdef CONFIG_SMP
733 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
734 P64(avg_idle);
735 P64(max_idle_balance_cost);
736 #undef P64
737 #endif
738
739 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
740 if (schedstat_enabled()) {
741 P(yld_count);
742 P(sched_count);
743 P(sched_goidle);
744 P(ttwu_count);
745 P(ttwu_local);
746 }
747 #undef P
748
749 print_cfs_stats(m, cpu);
750 print_rt_stats(m, cpu);
751 print_dl_stats(m, cpu);
752
753 print_rq(m, rq, cpu);
754 SEQ_printf(m, "\n");
755 }
756
757 static const char *sched_tunable_scaling_names[] = {
758 "none",
759 "logarithmic",
760 "linear"
761 };
762
sched_debug_header(struct seq_file * m)763 static void sched_debug_header(struct seq_file *m)
764 {
765 u64 ktime, sched_clk, cpu_clk;
766 unsigned long flags;
767
768 local_irq_save(flags);
769 ktime = ktime_to_ns(ktime_get());
770 sched_clk = sched_clock();
771 cpu_clk = local_clock();
772 local_irq_restore(flags);
773
774 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
775 init_utsname()->release,
776 (int)strcspn(init_utsname()->version, " "),
777 init_utsname()->version);
778
779 #define P(x) \
780 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
781 #define PN(x) \
782 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
783 PN(ktime);
784 PN(sched_clk);
785 PN(cpu_clk);
786 P(jiffies);
787 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
788 P(sched_clock_stable());
789 #endif
790 #undef PN
791 #undef P
792
793 SEQ_printf(m, "\n");
794 SEQ_printf(m, "sysctl_sched\n");
795
796 #define P(x) \
797 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
798 #define PN(x) \
799 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
800 PN(sysctl_sched_latency);
801 PN(sysctl_sched_min_granularity);
802 PN(sysctl_sched_wakeup_granularity);
803 P(sysctl_sched_child_runs_first);
804 P(sysctl_sched_features);
805 #ifdef CONFIG_SCHED_WALT
806 P(sched_init_task_load_windows);
807 P(min_capacity);
808 P(max_capacity);
809 P(sched_ravg_window);
810 #endif
811 #undef PN
812 #undef P
813
814 SEQ_printf(m, " .%-40s: %d (%s)\n",
815 "sysctl_sched_tunable_scaling",
816 sysctl_sched_tunable_scaling,
817 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
818 SEQ_printf(m, "\n");
819 }
820
sched_debug_show(struct seq_file * m,void * v)821 static int sched_debug_show(struct seq_file *m, void *v)
822 {
823 int cpu = (unsigned long)(v - 2);
824
825 if (cpu != -1)
826 print_cpu(m, cpu);
827 else
828 sched_debug_header(m);
829
830 return 0;
831 }
832
sysrq_sched_debug_show(void)833 void sysrq_sched_debug_show(void)
834 {
835 int cpu;
836
837 sched_debug_header(NULL);
838 for_each_online_cpu(cpu) {
839 /*
840 * Need to reset softlockup watchdogs on all CPUs, because
841 * another CPU might be blocked waiting for us to process
842 * an IPI or stop_machine.
843 */
844 touch_nmi_watchdog();
845 touch_all_softlockup_watchdogs();
846 print_cpu(NULL, cpu);
847 }
848 }
849
850 /*
851 * This itererator needs some explanation.
852 * It returns 1 for the header position.
853 * This means 2 is CPU 0.
854 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
855 * to use cpumask_* to iterate over the CPUs.
856 */
sched_debug_start(struct seq_file * file,loff_t * offset)857 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
858 {
859 unsigned long n = *offset;
860
861 if (n == 0)
862 return (void *) 1;
863
864 n--;
865
866 if (n > 0)
867 n = cpumask_next(n - 1, cpu_online_mask);
868 else
869 n = cpumask_first(cpu_online_mask);
870
871 *offset = n + 1;
872
873 if (n < nr_cpu_ids)
874 return (void *)(unsigned long)(n + 2);
875
876 return NULL;
877 }
878
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)879 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
880 {
881 (*offset)++;
882 return sched_debug_start(file, offset);
883 }
884
sched_debug_stop(struct seq_file * file,void * data)885 static void sched_debug_stop(struct seq_file *file, void *data)
886 {
887 }
888
889 static const struct seq_operations sched_debug_sops = {
890 .start = sched_debug_start,
891 .next = sched_debug_next,
892 .stop = sched_debug_stop,
893 .show = sched_debug_show,
894 };
895
init_sched_debug_procfs(void)896 static int __init init_sched_debug_procfs(void)
897 {
898 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
899 return -ENOMEM;
900 return 0;
901 }
902
903 __initcall(init_sched_debug_procfs);
904
905 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
906 #define __P(F) __PS(#F, F)
907 #define P(F) __PS(#F, p->F)
908 #define PM(F, M) __PS(#F, p->F & (M))
909 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
910 #define __PN(F) __PSN(#F, F)
911 #define PN(F) __PSN(#F, p->F)
912
913
914 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)915 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
916 unsigned long tpf, unsigned long gsf, unsigned long gpf)
917 {
918 SEQ_printf(m, "numa_faults node=%d ", node);
919 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
920 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
921 }
922 #endif
923
924
sched_show_numa(struct task_struct * p,struct seq_file * m)925 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
926 {
927 #ifdef CONFIG_NUMA_BALANCING
928 if (p->mm)
929 P(mm->numa_scan_seq);
930
931 P(numa_pages_migrated);
932 P(numa_preferred_nid);
933 P(total_numa_faults);
934 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
935 task_node(p), task_numa_group_id(p));
936 show_numa_stats(p, m);
937 #endif
938 }
939
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)940 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
941 struct seq_file *m)
942 {
943 unsigned long nr_switches;
944
945 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
946 get_nr_threads(p));
947 SEQ_printf(m,
948 "---------------------------------------------------------"
949 "----------\n");
950
951 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
952 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
953
954 PN(se.exec_start);
955 PN(se.vruntime);
956 PN(se.sum_exec_runtime);
957
958 nr_switches = p->nvcsw + p->nivcsw;
959
960 P(se.nr_migrations);
961
962 if (schedstat_enabled()) {
963 u64 avg_atom, avg_per_cpu;
964
965 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
966 PN_SCHEDSTAT(se.statistics.wait_start);
967 PN_SCHEDSTAT(se.statistics.sleep_start);
968 PN_SCHEDSTAT(se.statistics.block_start);
969 PN_SCHEDSTAT(se.statistics.sleep_max);
970 PN_SCHEDSTAT(se.statistics.block_max);
971 PN_SCHEDSTAT(se.statistics.exec_max);
972 PN_SCHEDSTAT(se.statistics.slice_max);
973 PN_SCHEDSTAT(se.statistics.wait_max);
974 PN_SCHEDSTAT(se.statistics.wait_sum);
975 P_SCHEDSTAT(se.statistics.wait_count);
976 PN_SCHEDSTAT(se.statistics.iowait_sum);
977 P_SCHEDSTAT(se.statistics.iowait_count);
978 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
979 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
980 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
981 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
982 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
983 P_SCHEDSTAT(se.statistics.nr_wakeups);
984 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
985 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
986 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
987 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
988 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
989 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
990 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
991 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
992
993 #ifdef CONFIG_SCHED_WALT
994 P(ravg.demand);
995 #endif
996
997 avg_atom = p->se.sum_exec_runtime;
998 if (nr_switches)
999 avg_atom = div64_ul(avg_atom, nr_switches);
1000 else
1001 avg_atom = -1LL;
1002
1003 avg_per_cpu = p->se.sum_exec_runtime;
1004 if (p->se.nr_migrations) {
1005 avg_per_cpu = div64_u64(avg_per_cpu,
1006 p->se.nr_migrations);
1007 } else {
1008 avg_per_cpu = -1LL;
1009 }
1010
1011 __PN(avg_atom);
1012 __PN(avg_per_cpu);
1013 }
1014
1015 __P(nr_switches);
1016 __PS("nr_voluntary_switches", p->nvcsw);
1017 __PS("nr_involuntary_switches", p->nivcsw);
1018
1019 P(se.load.weight);
1020 #ifdef CONFIG_SMP
1021 P(se.avg.load_sum);
1022 P(se.avg.runnable_sum);
1023 P(se.avg.util_sum);
1024 P(se.avg.load_avg);
1025 P(se.avg.runnable_avg);
1026 P(se.avg.util_avg);
1027 P(se.avg.last_update_time);
1028 P(se.avg.util_est.ewma);
1029 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1030 #endif
1031 #ifdef CONFIG_UCLAMP_TASK
1032 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1033 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1034 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1035 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1036 #endif
1037 P(policy);
1038 P(prio);
1039 #ifdef CONFIG_SCHED_LATENCY_NICE
1040 P(latency_prio);
1041 #endif
1042 if (task_has_dl_policy(p)) {
1043 P(dl.runtime);
1044 P(dl.deadline);
1045 }
1046 #undef PN_SCHEDSTAT
1047 #undef P_SCHEDSTAT
1048
1049 {
1050 unsigned int this_cpu = raw_smp_processor_id();
1051 u64 t0, t1;
1052
1053 t0 = cpu_clock(this_cpu);
1054 t1 = cpu_clock(this_cpu);
1055 __PS("clock-delta", t1-t0);
1056 }
1057
1058 sched_show_numa(p, m);
1059 }
1060
proc_sched_set_task(struct task_struct * p)1061 void proc_sched_set_task(struct task_struct *p)
1062 {
1063 #ifdef CONFIG_SCHEDSTATS
1064 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1065 #endif
1066 }
1067