1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include "sched.h"
10
11 /*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15 #define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23 /*
24 * Ease the printing of nsec fields:
25 */
nsec_high(unsigned long long nsec)26 static long long nsec_high(unsigned long long nsec)
27 {
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36 }
37
nsec_low(unsigned long long nsec)38 static unsigned long nsec_low(unsigned long long nsec)
39 {
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44 }
45
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48 #define SCHED_FEAT(name, enabled) \
49 #name ,
50
51 const char * const sched_feat_names[] = {
52 #include "features.h"
53 };
54 EXPORT_SYMBOL_GPL(sched_feat_names);
55
56 #undef SCHED_FEAT
57
sched_feat_show(struct seq_file * m,void * v)58 static int sched_feat_show(struct seq_file *m, void *v)
59 {
60 int i;
61
62 for (i = 0; i < __SCHED_FEAT_NR; i++) {
63 if (!(sysctl_sched_features & (1UL << i)))
64 seq_puts(m, "NO_");
65 seq_printf(m, "%s ", sched_feat_names[i]);
66 }
67 seq_puts(m, "\n");
68
69 return 0;
70 }
71
72 #ifdef CONFIG_JUMP_LABEL
73
74 #define jump_label_key__true STATIC_KEY_INIT_TRUE
75 #define jump_label_key__false STATIC_KEY_INIT_FALSE
76
77 #define SCHED_FEAT(name, enabled) \
78 jump_label_key__##enabled ,
79
80 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
81 #include "features.h"
82 };
83 EXPORT_SYMBOL_GPL(sched_feat_keys);
84
85 #undef SCHED_FEAT
86
sched_feat_disable(int i)87 static void sched_feat_disable(int i)
88 {
89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
sched_feat_enable(int i)92 static void sched_feat_enable(int i)
93 {
94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else
sched_feat_disable(int i)97 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)98 static void sched_feat_enable(int i) { };
99 #endif /* CONFIG_JUMP_LABEL */
100
sched_feat_set(char * cmp)101 static int sched_feat_set(char *cmp)
102 {
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
121 }
122
123 return 0;
124 }
125
126 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129 {
130 char buf[64];
131 char *cmp;
132 int ret;
133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
146 cpus_read_lock();
147 inode_lock(inode);
148 ret = sched_feat_set(cmp);
149 inode_unlock(inode);
150 cpus_read_unlock();
151 if (ret < 0)
152 return ret;
153
154 *ppos += cnt;
155
156 return cnt;
157 }
158
sched_feat_open(struct inode * inode,struct file * filp)159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161 return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170 };
171
172 #ifdef CONFIG_SMP
173
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)174 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
175 size_t cnt, loff_t *ppos)
176 {
177 char buf[16];
178 unsigned int scaling;
179
180 if (cnt > 15)
181 cnt = 15;
182
183 if (copy_from_user(&buf, ubuf, cnt))
184 return -EFAULT;
185 buf[cnt] = '\0';
186
187 if (kstrtouint(buf, 10, &scaling))
188 return -EINVAL;
189
190 if (scaling >= SCHED_TUNABLESCALING_END)
191 return -EINVAL;
192
193 sysctl_sched_tunable_scaling = scaling;
194 if (sched_update_scaling())
195 return -EINVAL;
196
197 *ppos += cnt;
198 return cnt;
199 }
200
sched_scaling_show(struct seq_file * m,void * v)201 static int sched_scaling_show(struct seq_file *m, void *v)
202 {
203 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
204 return 0;
205 }
206
sched_scaling_open(struct inode * inode,struct file * filp)207 static int sched_scaling_open(struct inode *inode, struct file *filp)
208 {
209 return single_open(filp, sched_scaling_show, NULL);
210 }
211
212 static const struct file_operations sched_scaling_fops = {
213 .open = sched_scaling_open,
214 .write = sched_scaling_write,
215 .read = seq_read,
216 .llseek = seq_lseek,
217 .release = single_release,
218 };
219
220 #endif /* SMP */
221
222 #ifdef CONFIG_PREEMPT_DYNAMIC
223
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)224 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
225 size_t cnt, loff_t *ppos)
226 {
227 char buf[16];
228 int mode;
229
230 if (cnt > 15)
231 cnt = 15;
232
233 if (copy_from_user(&buf, ubuf, cnt))
234 return -EFAULT;
235
236 buf[cnt] = 0;
237 mode = sched_dynamic_mode(strstrip(buf));
238 if (mode < 0)
239 return mode;
240
241 sched_dynamic_update(mode);
242
243 *ppos += cnt;
244
245 return cnt;
246 }
247
sched_dynamic_show(struct seq_file * m,void * v)248 static int sched_dynamic_show(struct seq_file *m, void *v)
249 {
250 static const char * preempt_modes[] = {
251 "none", "voluntary", "full"
252 };
253 int i;
254
255 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
256 if (preempt_dynamic_mode == i)
257 seq_puts(m, "(");
258 seq_puts(m, preempt_modes[i]);
259 if (preempt_dynamic_mode == i)
260 seq_puts(m, ")");
261
262 seq_puts(m, " ");
263 }
264
265 seq_puts(m, "\n");
266 return 0;
267 }
268
sched_dynamic_open(struct inode * inode,struct file * filp)269 static int sched_dynamic_open(struct inode *inode, struct file *filp)
270 {
271 return single_open(filp, sched_dynamic_show, NULL);
272 }
273
274 static const struct file_operations sched_dynamic_fops = {
275 .open = sched_dynamic_open,
276 .write = sched_dynamic_write,
277 .read = seq_read,
278 .llseek = seq_lseek,
279 .release = single_release,
280 };
281
282 #endif /* CONFIG_PREEMPT_DYNAMIC */
283
284 __read_mostly bool sched_debug_verbose;
285
286 static const struct seq_operations sched_debug_sops;
287
sched_debug_open(struct inode * inode,struct file * filp)288 static int sched_debug_open(struct inode *inode, struct file *filp)
289 {
290 return seq_open(filp, &sched_debug_sops);
291 }
292
293 static const struct file_operations sched_debug_fops = {
294 .open = sched_debug_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = seq_release,
298 };
299
300 static struct dentry *debugfs_sched;
301
sched_init_debug(void)302 static __init int sched_init_debug(void)
303 {
304 struct dentry __maybe_unused *numa;
305
306 debugfs_sched = debugfs_create_dir("sched", NULL);
307
308 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
309 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
310 #ifdef CONFIG_PREEMPT_DYNAMIC
311 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
312 #endif
313
314 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
315 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
316 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
317
318 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
319 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
320
321 #ifdef CONFIG_SMP
322 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
323 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
324 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
325
326 mutex_lock(&sched_domains_mutex);
327 update_sched_domain_debugfs();
328 mutex_unlock(&sched_domains_mutex);
329 #endif
330
331 #ifdef CONFIG_NUMA_BALANCING
332 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
333
334 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
335 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
336 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
337 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
338 #endif
339
340 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
341
342 return 0;
343 }
344 late_initcall(sched_init_debug);
345
346 #ifdef CONFIG_SMP
347
348 static cpumask_var_t sd_sysctl_cpus;
349 static struct dentry *sd_dentry;
350
sd_flags_show(struct seq_file * m,void * v)351 static int sd_flags_show(struct seq_file *m, void *v)
352 {
353 unsigned long flags = *(unsigned int *)m->private;
354 int idx;
355
356 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
357 seq_puts(m, sd_flag_debug[idx].name);
358 seq_puts(m, " ");
359 }
360 seq_puts(m, "\n");
361
362 return 0;
363 }
364
sd_flags_open(struct inode * inode,struct file * file)365 static int sd_flags_open(struct inode *inode, struct file *file)
366 {
367 return single_open(file, sd_flags_show, inode->i_private);
368 }
369
370 static const struct file_operations sd_flags_fops = {
371 .open = sd_flags_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = single_release,
375 };
376
register_sd(struct sched_domain * sd,struct dentry * parent)377 static void register_sd(struct sched_domain *sd, struct dentry *parent)
378 {
379 #define SDM(type, mode, member) \
380 debugfs_create_##type(#member, mode, parent, &sd->member)
381
382 SDM(ulong, 0644, min_interval);
383 SDM(ulong, 0644, max_interval);
384 SDM(u64, 0644, max_newidle_lb_cost);
385 SDM(u32, 0644, busy_factor);
386 SDM(u32, 0644, imbalance_pct);
387 SDM(u32, 0644, cache_nice_tries);
388 SDM(str, 0444, name);
389
390 #undef SDM
391
392 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
393 }
394
update_sched_domain_debugfs(void)395 void update_sched_domain_debugfs(void)
396 {
397 int cpu, i;
398
399 /*
400 * This can unfortunately be invoked before sched_debug_init() creates
401 * the debug directory. Don't touch sd_sysctl_cpus until then.
402 */
403 if (!debugfs_sched)
404 return;
405
406 if (!cpumask_available(sd_sysctl_cpus)) {
407 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
408 return;
409 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
410 }
411
412 if (!sd_dentry)
413 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
414
415 for_each_cpu(cpu, sd_sysctl_cpus) {
416 struct sched_domain *sd;
417 struct dentry *d_cpu;
418 char buf[32];
419
420 snprintf(buf, sizeof(buf), "cpu%d", cpu);
421 debugfs_lookup_and_remove(buf, sd_dentry);
422 d_cpu = debugfs_create_dir(buf, sd_dentry);
423
424 i = 0;
425 for_each_domain(cpu, sd) {
426 struct dentry *d_sd;
427
428 snprintf(buf, sizeof(buf), "domain%d", i);
429 d_sd = debugfs_create_dir(buf, d_cpu);
430
431 register_sd(sd, d_sd);
432 i++;
433 }
434
435 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
436 }
437 }
438
dirty_sched_domain_sysctl(int cpu)439 void dirty_sched_domain_sysctl(int cpu)
440 {
441 if (cpumask_available(sd_sysctl_cpus))
442 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
443 }
444
445 #endif /* CONFIG_SMP */
446
447 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)448 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
449 {
450 struct sched_entity *se = tg->se[cpu];
451
452 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
453 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
454 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
455 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
456
457 if (!se)
458 return;
459
460 PN(se->exec_start);
461 PN(se->vruntime);
462 PN(se->sum_exec_runtime);
463
464 if (schedstat_enabled()) {
465 PN_SCHEDSTAT(se->statistics.wait_start);
466 PN_SCHEDSTAT(se->statistics.sleep_start);
467 PN_SCHEDSTAT(se->statistics.block_start);
468 PN_SCHEDSTAT(se->statistics.sleep_max);
469 PN_SCHEDSTAT(se->statistics.block_max);
470 PN_SCHEDSTAT(se->statistics.exec_max);
471 PN_SCHEDSTAT(se->statistics.slice_max);
472 PN_SCHEDSTAT(se->statistics.wait_max);
473 PN_SCHEDSTAT(se->statistics.wait_sum);
474 P_SCHEDSTAT(se->statistics.wait_count);
475 }
476
477 P(se->load.weight);
478 #ifdef CONFIG_SMP
479 P(se->avg.load_avg);
480 P(se->avg.util_avg);
481 P(se->avg.runnable_avg);
482 #endif
483
484 #undef PN_SCHEDSTAT
485 #undef PN
486 #undef P_SCHEDSTAT
487 #undef P
488 }
489 #endif
490
491 #ifdef CONFIG_CGROUP_SCHED
492 static DEFINE_SPINLOCK(sched_debug_lock);
493 static char group_path[PATH_MAX];
494
task_group_path(struct task_group * tg,char * path,int plen)495 static void task_group_path(struct task_group *tg, char *path, int plen)
496 {
497 if (autogroup_path(tg, path, plen))
498 return;
499
500 cgroup_path(tg->css.cgroup, path, plen);
501 }
502
503 /*
504 * Only 1 SEQ_printf_task_group_path() caller can use the full length
505 * group_path[] for cgroup path. Other simultaneous callers will have
506 * to use a shorter stack buffer. A "..." suffix is appended at the end
507 * of the stack buffer so that it will show up in case the output length
508 * matches the given buffer size to indicate possible path name truncation.
509 */
510 #define SEQ_printf_task_group_path(m, tg, fmt...) \
511 { \
512 if (spin_trylock(&sched_debug_lock)) { \
513 task_group_path(tg, group_path, sizeof(group_path)); \
514 SEQ_printf(m, fmt, group_path); \
515 spin_unlock(&sched_debug_lock); \
516 } else { \
517 char buf[128]; \
518 char *bufend = buf + sizeof(buf) - 3; \
519 task_group_path(tg, buf, bufend - buf); \
520 strcpy(bufend - 1, "..."); \
521 SEQ_printf(m, fmt, buf); \
522 } \
523 }
524 #endif
525
526 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)527 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
528 {
529 if (task_current(rq, p))
530 SEQ_printf(m, ">R");
531 else
532 SEQ_printf(m, " %c", task_state_to_char(p));
533
534 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
535 p->comm, task_pid_nr(p),
536 SPLIT_NS(p->se.vruntime),
537 (long long)(p->nvcsw + p->nivcsw),
538 p->prio);
539
540 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
541 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
542 SPLIT_NS(p->se.sum_exec_runtime),
543 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
544
545 #ifdef CONFIG_NUMA_BALANCING
546 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
547 #endif
548 #ifdef CONFIG_CGROUP_SCHED
549 SEQ_printf_task_group_path(m, task_group(p), " %s")
550 #endif
551
552 SEQ_printf(m, "\n");
553 }
554
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)555 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
556 {
557 struct task_struct *g, *p;
558
559 SEQ_printf(m, "\n");
560 SEQ_printf(m, "runnable tasks:\n");
561 SEQ_printf(m, " S task PID tree-key switches prio"
562 " wait-time sum-exec sum-sleep\n");
563 SEQ_printf(m, "-------------------------------------------------------"
564 "------------------------------------------------------\n");
565
566 rcu_read_lock();
567 for_each_process_thread(g, p) {
568 if (task_cpu(p) != rq_cpu)
569 continue;
570
571 print_task(m, rq, p);
572 }
573 rcu_read_unlock();
574 }
575
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)576 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
577 {
578 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
579 spread, rq0_min_vruntime, spread0;
580 struct rq *rq = cpu_rq(cpu);
581 struct sched_entity *last;
582 unsigned long flags;
583
584 #ifdef CONFIG_FAIR_GROUP_SCHED
585 SEQ_printf(m, "\n");
586 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
587 #else
588 SEQ_printf(m, "\n");
589 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
590 #endif
591 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
592 SPLIT_NS(cfs_rq->exec_clock));
593
594 raw_spin_rq_lock_irqsave(rq, flags);
595 if (rb_first_cached(&cfs_rq->tasks_timeline))
596 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
597 last = __pick_last_entity(cfs_rq);
598 if (last)
599 max_vruntime = last->vruntime;
600 min_vruntime = cfs_rq->min_vruntime;
601 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
602 raw_spin_rq_unlock_irqrestore(rq, flags);
603 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
604 SPLIT_NS(MIN_vruntime));
605 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
606 SPLIT_NS(min_vruntime));
607 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
608 SPLIT_NS(max_vruntime));
609 spread = max_vruntime - MIN_vruntime;
610 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
611 SPLIT_NS(spread));
612 spread0 = min_vruntime - rq0_min_vruntime;
613 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
614 SPLIT_NS(spread0));
615 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
616 cfs_rq->nr_spread_over);
617 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
618 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
619 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
620 cfs_rq->idle_h_nr_running);
621 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
622 #ifdef CONFIG_SMP
623 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
624 cfs_rq->avg.load_avg);
625 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
626 cfs_rq->avg.runnable_avg);
627 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
628 cfs_rq->avg.util_avg);
629 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
630 cfs_rq->avg.util_est.enqueued);
631 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
632 cfs_rq->removed.load_avg);
633 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
634 cfs_rq->removed.util_avg);
635 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
636 cfs_rq->removed.runnable_avg);
637 #ifdef CONFIG_FAIR_GROUP_SCHED
638 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
639 cfs_rq->tg_load_avg_contrib);
640 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
641 atomic_long_read(&cfs_rq->tg->load_avg));
642 #endif
643 #endif
644 #ifdef CONFIG_CFS_BANDWIDTH
645 SEQ_printf(m, " .%-30s: %d\n", "throttled",
646 cfs_rq->throttled);
647 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
648 cfs_rq->throttle_count);
649 #endif
650
651 #ifdef CONFIG_FAIR_GROUP_SCHED
652 print_cfs_group_stats(m, cpu, cfs_rq->tg);
653 #endif
654 }
655
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)656 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
657 {
658 #ifdef CONFIG_RT_GROUP_SCHED
659 SEQ_printf(m, "\n");
660 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
661 #else
662 SEQ_printf(m, "\n");
663 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
664 #endif
665
666 #define P(x) \
667 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
668 #define PU(x) \
669 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
670 #define PN(x) \
671 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
672
673 PU(rt_nr_running);
674 #ifdef CONFIG_SMP
675 PU(rt_nr_migratory);
676 #endif
677 P(rt_throttled);
678 PN(rt_time);
679 PN(rt_runtime);
680
681 #undef PN
682 #undef PU
683 #undef P
684 }
685
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)686 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
687 {
688 struct dl_bw *dl_bw;
689
690 SEQ_printf(m, "\n");
691 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
692
693 #define PU(x) \
694 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
695
696 PU(dl_nr_running);
697 #ifdef CONFIG_SMP
698 PU(dl_nr_migratory);
699 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
700 #else
701 dl_bw = &dl_rq->dl_bw;
702 #endif
703 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
704 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
705
706 #undef PU
707 }
708
print_cpu(struct seq_file * m,int cpu)709 static void print_cpu(struct seq_file *m, int cpu)
710 {
711 struct rq *rq = cpu_rq(cpu);
712
713 #ifdef CONFIG_X86
714 {
715 unsigned int freq = cpu_khz ? : 1;
716
717 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
718 cpu, freq / 1000, (freq % 1000));
719 }
720 #else
721 SEQ_printf(m, "cpu#%d\n", cpu);
722 #endif
723
724 #define P(x) \
725 do { \
726 if (sizeof(rq->x) == 4) \
727 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
728 else \
729 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
730 } while (0)
731
732 #define PN(x) \
733 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
734
735 P(nr_running);
736 P(nr_switches);
737 P(nr_uninterruptible);
738 PN(next_balance);
739 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
740 PN(clock);
741 PN(clock_task);
742 #undef P
743 #undef PN
744
745 #ifdef CONFIG_SMP
746 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
747 P64(avg_idle);
748 P64(max_idle_balance_cost);
749 #undef P64
750 #endif
751
752 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
753 if (schedstat_enabled()) {
754 P(yld_count);
755 P(sched_count);
756 P(sched_goidle);
757 P(ttwu_count);
758 P(ttwu_local);
759 }
760 #undef P
761
762 print_cfs_stats(m, cpu);
763 print_rt_stats(m, cpu);
764 print_dl_stats(m, cpu);
765
766 print_rq(m, rq, cpu);
767 SEQ_printf(m, "\n");
768 }
769
770 static const char *sched_tunable_scaling_names[] = {
771 "none",
772 "logarithmic",
773 "linear"
774 };
775
sched_debug_header(struct seq_file * m)776 static void sched_debug_header(struct seq_file *m)
777 {
778 u64 ktime, sched_clk, cpu_clk;
779 unsigned long flags;
780
781 local_irq_save(flags);
782 ktime = ktime_to_ns(ktime_get());
783 sched_clk = sched_clock();
784 cpu_clk = local_clock();
785 local_irq_restore(flags);
786
787 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
788 init_utsname()->release,
789 (int)strcspn(init_utsname()->version, " "),
790 init_utsname()->version);
791
792 #define P(x) \
793 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
794 #define PN(x) \
795 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
796 PN(ktime);
797 PN(sched_clk);
798 PN(cpu_clk);
799 P(jiffies);
800 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
801 P(sched_clock_stable());
802 #endif
803 #undef PN
804 #undef P
805
806 SEQ_printf(m, "\n");
807 SEQ_printf(m, "sysctl_sched\n");
808
809 #define P(x) \
810 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
811 #define PN(x) \
812 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
813 PN(sysctl_sched_latency);
814 PN(sysctl_sched_min_granularity);
815 PN(sysctl_sched_wakeup_granularity);
816 P(sysctl_sched_child_runs_first);
817 P(sysctl_sched_features);
818 #undef PN
819 #undef P
820
821 SEQ_printf(m, " .%-40s: %d (%s)\n",
822 "sysctl_sched_tunable_scaling",
823 sysctl_sched_tunable_scaling,
824 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
825 SEQ_printf(m, "\n");
826 }
827
sched_debug_show(struct seq_file * m,void * v)828 static int sched_debug_show(struct seq_file *m, void *v)
829 {
830 int cpu = (unsigned long)(v - 2);
831
832 if (cpu != -1)
833 print_cpu(m, cpu);
834 else
835 sched_debug_header(m);
836
837 return 0;
838 }
839
sysrq_sched_debug_show(void)840 void sysrq_sched_debug_show(void)
841 {
842 int cpu;
843
844 sched_debug_header(NULL);
845 for_each_online_cpu(cpu) {
846 /*
847 * Need to reset softlockup watchdogs on all CPUs, because
848 * another CPU might be blocked waiting for us to process
849 * an IPI or stop_machine.
850 */
851 touch_nmi_watchdog();
852 touch_all_softlockup_watchdogs();
853 print_cpu(NULL, cpu);
854 }
855 }
856
857 /*
858 * This iterator needs some explanation.
859 * It returns 1 for the header position.
860 * This means 2 is CPU 0.
861 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
862 * to use cpumask_* to iterate over the CPUs.
863 */
sched_debug_start(struct seq_file * file,loff_t * offset)864 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
865 {
866 unsigned long n = *offset;
867
868 if (n == 0)
869 return (void *) 1;
870
871 n--;
872
873 if (n > 0)
874 n = cpumask_next(n - 1, cpu_online_mask);
875 else
876 n = cpumask_first(cpu_online_mask);
877
878 *offset = n + 1;
879
880 if (n < nr_cpu_ids)
881 return (void *)(unsigned long)(n + 2);
882
883 return NULL;
884 }
885
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)886 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
887 {
888 (*offset)++;
889 return sched_debug_start(file, offset);
890 }
891
sched_debug_stop(struct seq_file * file,void * data)892 static void sched_debug_stop(struct seq_file *file, void *data)
893 {
894 }
895
896 static const struct seq_operations sched_debug_sops = {
897 .start = sched_debug_start,
898 .next = sched_debug_next,
899 .stop = sched_debug_stop,
900 .show = sched_debug_show,
901 };
902
903 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
904 #define __P(F) __PS(#F, F)
905 #define P(F) __PS(#F, p->F)
906 #define PM(F, M) __PS(#F, p->F & (M))
907 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
908 #define __PN(F) __PSN(#F, F)
909 #define PN(F) __PSN(#F, p->F)
910
911
912 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)913 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
914 unsigned long tpf, unsigned long gsf, unsigned long gpf)
915 {
916 SEQ_printf(m, "numa_faults node=%d ", node);
917 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
918 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
919 }
920 #endif
921
922
sched_show_numa(struct task_struct * p,struct seq_file * m)923 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
924 {
925 #ifdef CONFIG_NUMA_BALANCING
926 if (p->mm)
927 P(mm->numa_scan_seq);
928
929 P(numa_pages_migrated);
930 P(numa_preferred_nid);
931 P(total_numa_faults);
932 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
933 task_node(p), task_numa_group_id(p));
934 show_numa_stats(p, m);
935 #endif
936 }
937
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)938 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
939 struct seq_file *m)
940 {
941 unsigned long nr_switches;
942
943 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
944 get_nr_threads(p));
945 SEQ_printf(m,
946 "---------------------------------------------------------"
947 "----------\n");
948
949 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
950 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
951
952 PN(se.exec_start);
953 PN(se.vruntime);
954 PN(se.sum_exec_runtime);
955
956 nr_switches = p->nvcsw + p->nivcsw;
957
958 P(se.nr_migrations);
959
960 if (schedstat_enabled()) {
961 u64 avg_atom, avg_per_cpu;
962
963 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
964 PN_SCHEDSTAT(se.statistics.wait_start);
965 PN_SCHEDSTAT(se.statistics.sleep_start);
966 PN_SCHEDSTAT(se.statistics.block_start);
967 PN_SCHEDSTAT(se.statistics.sleep_max);
968 PN_SCHEDSTAT(se.statistics.block_max);
969 PN_SCHEDSTAT(se.statistics.exec_max);
970 PN_SCHEDSTAT(se.statistics.slice_max);
971 PN_SCHEDSTAT(se.statistics.wait_max);
972 PN_SCHEDSTAT(se.statistics.wait_sum);
973 P_SCHEDSTAT(se.statistics.wait_count);
974 PN_SCHEDSTAT(se.statistics.iowait_sum);
975 P_SCHEDSTAT(se.statistics.iowait_count);
976 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
977 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
978 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
979 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
980 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
981 P_SCHEDSTAT(se.statistics.nr_wakeups);
982 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
983 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
984 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
985 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
986 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
987 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
988 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
989 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
990
991 avg_atom = p->se.sum_exec_runtime;
992 if (nr_switches)
993 avg_atom = div64_ul(avg_atom, nr_switches);
994 else
995 avg_atom = -1LL;
996
997 avg_per_cpu = p->se.sum_exec_runtime;
998 if (p->se.nr_migrations) {
999 avg_per_cpu = div64_u64(avg_per_cpu,
1000 p->se.nr_migrations);
1001 } else {
1002 avg_per_cpu = -1LL;
1003 }
1004
1005 __PN(avg_atom);
1006 __PN(avg_per_cpu);
1007 }
1008
1009 __P(nr_switches);
1010 __PS("nr_voluntary_switches", p->nvcsw);
1011 __PS("nr_involuntary_switches", p->nivcsw);
1012
1013 P(se.load.weight);
1014 #ifdef CONFIG_SMP
1015 P(se.avg.load_sum);
1016 P(se.avg.runnable_sum);
1017 P(se.avg.util_sum);
1018 P(se.avg.load_avg);
1019 P(se.avg.runnable_avg);
1020 P(se.avg.util_avg);
1021 P(se.avg.last_update_time);
1022 P(se.avg.util_est.ewma);
1023 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1024 #endif
1025 #ifdef CONFIG_UCLAMP_TASK
1026 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1027 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1028 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1029 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1030 #endif
1031 P(policy);
1032 P(prio);
1033 if (task_has_dl_policy(p)) {
1034 P(dl.runtime);
1035 P(dl.deadline);
1036 }
1037 #undef PN_SCHEDSTAT
1038 #undef P_SCHEDSTAT
1039
1040 {
1041 unsigned int this_cpu = raw_smp_processor_id();
1042 u64 t0, t1;
1043
1044 t0 = cpu_clock(this_cpu);
1045 t1 = cpu_clock(this_cpu);
1046 __PS("clock-delta", t1-t0);
1047 }
1048
1049 sched_show_numa(p, m);
1050 }
1051
proc_sched_set_task(struct task_struct * p)1052 void proc_sched_set_task(struct task_struct *p)
1053 {
1054 #ifdef CONFIG_SCHEDSTATS
1055 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1056 #endif
1057 }
1058
resched_latency_warn(int cpu,u64 latency)1059 void resched_latency_warn(int cpu, u64 latency)
1060 {
1061 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1062
1063 WARN(__ratelimit(&latency_check_ratelimit),
1064 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1065 "without schedule\n",
1066 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1067 }
1068