• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 
10 /*
11  * This allows printing both to /proc/sched_debug and
12  * to the console
13  */
14 #define SEQ_printf(m, x...)			\
15  do {						\
16 	if (m)					\
17 		seq_printf(m, x);		\
18 	else					\
19 		pr_cont(x);			\
20  } while (0)
21 
22 /*
23  * Ease the printing of nsec fields:
24  */
nsec_high(unsigned long long nsec)25 static long long nsec_high(unsigned long long nsec)
26 {
27 	if ((long long)nsec < 0) {
28 		nsec = -nsec;
29 		do_div(nsec, 1000000);
30 		return -nsec;
31 	}
32 	do_div(nsec, 1000000);
33 
34 	return nsec;
35 }
36 
nsec_low(unsigned long long nsec)37 static unsigned long nsec_low(unsigned long long nsec)
38 {
39 	if ((long long)nsec < 0)
40 		nsec = -nsec;
41 
42 	return do_div(nsec, 1000000);
43 }
44 
45 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46 
47 #define SCHED_FEAT(name, enabled)	\
48 	#name ,
49 
50 const char * const sched_feat_names[] = {
51 #include "features.h"
52 };
53 EXPORT_SYMBOL_GPL(sched_feat_names);
54 
55 #undef SCHED_FEAT
56 
sched_feat_show(struct seq_file * m,void * v)57 static int sched_feat_show(struct seq_file *m, void *v)
58 {
59 	int i;
60 
61 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 		if (!(sysctl_sched_features & (1UL << i)))
63 			seq_puts(m, "NO_");
64 		seq_printf(m, "%s ", sched_feat_names[i]);
65 	}
66 	seq_puts(m, "\n");
67 
68 	return 0;
69 }
70 
71 #ifdef CONFIG_JUMP_LABEL
72 
73 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
75 
76 #define SCHED_FEAT(name, enabled)	\
77 	jump_label_key__##enabled ,
78 
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80 #include "features.h"
81 };
82 EXPORT_SYMBOL_GPL(sched_feat_keys);
83 
84 #undef SCHED_FEAT
85 
sched_feat_disable(int i)86 static void sched_feat_disable(int i)
87 {
88 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
89 }
90 
sched_feat_enable(int i)91 static void sched_feat_enable(int i)
92 {
93 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
94 }
95 #else
sched_feat_disable(int i)96 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)97 static void sched_feat_enable(int i) { };
98 #endif /* CONFIG_JUMP_LABEL */
99 
sched_feat_set(char * cmp)100 static int sched_feat_set(char *cmp)
101 {
102 	int i;
103 	int neg = 0;
104 
105 	if (strncmp(cmp, "NO_", 3) == 0) {
106 		neg = 1;
107 		cmp += 3;
108 	}
109 
110 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
111 	if (i < 0)
112 		return i;
113 
114 	if (neg) {
115 		sysctl_sched_features &= ~(1UL << i);
116 		sched_feat_disable(i);
117 	} else {
118 		sysctl_sched_features |= (1UL << i);
119 		sched_feat_enable(i);
120 	}
121 
122 	return 0;
123 }
124 
125 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)126 sched_feat_write(struct file *filp, const char __user *ubuf,
127 		size_t cnt, loff_t *ppos)
128 {
129 	char buf[64];
130 	char *cmp;
131 	int ret;
132 	struct inode *inode;
133 
134 	if (cnt > 63)
135 		cnt = 63;
136 
137 	if (copy_from_user(&buf, ubuf, cnt))
138 		return -EFAULT;
139 
140 	buf[cnt] = 0;
141 	cmp = strstrip(buf);
142 
143 	/* Ensure the static_key remains in a consistent state */
144 	inode = file_inode(filp);
145 	cpus_read_lock();
146 	inode_lock(inode);
147 	ret = sched_feat_set(cmp);
148 	inode_unlock(inode);
149 	cpus_read_unlock();
150 	if (ret < 0)
151 		return ret;
152 
153 	*ppos += cnt;
154 
155 	return cnt;
156 }
157 
sched_feat_open(struct inode * inode,struct file * filp)158 static int sched_feat_open(struct inode *inode, struct file *filp)
159 {
160 	return single_open(filp, sched_feat_show, NULL);
161 }
162 
163 static const struct file_operations sched_feat_fops = {
164 	.open		= sched_feat_open,
165 	.write		= sched_feat_write,
166 	.read		= seq_read,
167 	.llseek		= seq_lseek,
168 	.release	= single_release,
169 };
170 
171 #ifdef CONFIG_SMP
172 
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)173 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
174 				   size_t cnt, loff_t *ppos)
175 {
176 	char buf[16];
177 	unsigned int scaling;
178 
179 	if (cnt > 15)
180 		cnt = 15;
181 
182 	if (copy_from_user(&buf, ubuf, cnt))
183 		return -EFAULT;
184 	buf[cnt] = '\0';
185 
186 	if (kstrtouint(buf, 10, &scaling))
187 		return -EINVAL;
188 
189 	if (scaling >= SCHED_TUNABLESCALING_END)
190 		return -EINVAL;
191 
192 	sysctl_sched_tunable_scaling = scaling;
193 	if (sched_update_scaling())
194 		return -EINVAL;
195 
196 	*ppos += cnt;
197 	return cnt;
198 }
199 
sched_scaling_show(struct seq_file * m,void * v)200 static int sched_scaling_show(struct seq_file *m, void *v)
201 {
202 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
203 	return 0;
204 }
205 
sched_scaling_open(struct inode * inode,struct file * filp)206 static int sched_scaling_open(struct inode *inode, struct file *filp)
207 {
208 	return single_open(filp, sched_scaling_show, NULL);
209 }
210 
211 static const struct file_operations sched_scaling_fops = {
212 	.open		= sched_scaling_open,
213 	.write		= sched_scaling_write,
214 	.read		= seq_read,
215 	.llseek		= seq_lseek,
216 	.release	= single_release,
217 };
218 
219 #endif /* SMP */
220 
221 #ifdef CONFIG_PREEMPT_DYNAMIC
222 
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)223 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
224 				   size_t cnt, loff_t *ppos)
225 {
226 	char buf[16];
227 	int mode;
228 
229 	if (cnt > 15)
230 		cnt = 15;
231 
232 	if (copy_from_user(&buf, ubuf, cnt))
233 		return -EFAULT;
234 
235 	buf[cnt] = 0;
236 	mode = sched_dynamic_mode(strstrip(buf));
237 	if (mode < 0)
238 		return mode;
239 
240 	sched_dynamic_update(mode);
241 
242 	*ppos += cnt;
243 
244 	return cnt;
245 }
246 
sched_dynamic_show(struct seq_file * m,void * v)247 static int sched_dynamic_show(struct seq_file *m, void *v)
248 {
249 	static const char * preempt_modes[] = {
250 		"none", "voluntary", "full"
251 	};
252 	int i;
253 
254 	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
255 		if (preempt_dynamic_mode == i)
256 			seq_puts(m, "(");
257 		seq_puts(m, preempt_modes[i]);
258 		if (preempt_dynamic_mode == i)
259 			seq_puts(m, ")");
260 
261 		seq_puts(m, " ");
262 	}
263 
264 	seq_puts(m, "\n");
265 	return 0;
266 }
267 
sched_dynamic_open(struct inode * inode,struct file * filp)268 static int sched_dynamic_open(struct inode *inode, struct file *filp)
269 {
270 	return single_open(filp, sched_dynamic_show, NULL);
271 }
272 
273 static const struct file_operations sched_dynamic_fops = {
274 	.open		= sched_dynamic_open,
275 	.write		= sched_dynamic_write,
276 	.read		= seq_read,
277 	.llseek		= seq_lseek,
278 	.release	= single_release,
279 };
280 
281 #endif /* CONFIG_PREEMPT_DYNAMIC */
282 
283 __read_mostly bool sched_debug_verbose;
284 
285 static const struct seq_operations sched_debug_sops;
286 
sched_debug_open(struct inode * inode,struct file * filp)287 static int sched_debug_open(struct inode *inode, struct file *filp)
288 {
289 	return seq_open(filp, &sched_debug_sops);
290 }
291 
292 static const struct file_operations sched_debug_fops = {
293 	.open		= sched_debug_open,
294 	.read		= seq_read,
295 	.llseek		= seq_lseek,
296 	.release	= seq_release,
297 };
298 
299 static struct dentry *debugfs_sched;
300 
sched_init_debug(void)301 static __init int sched_init_debug(void)
302 {
303 	struct dentry __maybe_unused *numa;
304 
305 	debugfs_sched = debugfs_create_dir("sched", NULL);
306 
307 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
308 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
309 #ifdef CONFIG_PREEMPT_DYNAMIC
310 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
311 #endif
312 
313 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
314 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
315 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
316 	debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
317 
318 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
319 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
320 
321 #ifdef CONFIG_SMP
322 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
323 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
324 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
325 
326 	mutex_lock(&sched_domains_mutex);
327 	update_sched_domain_debugfs();
328 	mutex_unlock(&sched_domains_mutex);
329 #endif
330 
331 #ifdef CONFIG_NUMA_BALANCING
332 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
333 
334 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
335 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
336 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
337 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
338 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
339 #endif
340 
341 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
342 
343 	return 0;
344 }
345 late_initcall(sched_init_debug);
346 
347 #ifdef CONFIG_SMP
348 
349 static cpumask_var_t		sd_sysctl_cpus;
350 static struct dentry		*sd_dentry;
351 
sd_flags_show(struct seq_file * m,void * v)352 static int sd_flags_show(struct seq_file *m, void *v)
353 {
354 	unsigned long flags = *(unsigned int *)m->private;
355 	int idx;
356 
357 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
358 		seq_puts(m, sd_flag_debug[idx].name);
359 		seq_puts(m, " ");
360 	}
361 	seq_puts(m, "\n");
362 
363 	return 0;
364 }
365 
sd_flags_open(struct inode * inode,struct file * file)366 static int sd_flags_open(struct inode *inode, struct file *file)
367 {
368 	return single_open(file, sd_flags_show, inode->i_private);
369 }
370 
371 static const struct file_operations sd_flags_fops = {
372 	.open		= sd_flags_open,
373 	.read		= seq_read,
374 	.llseek		= seq_lseek,
375 	.release	= single_release,
376 };
377 
register_sd(struct sched_domain * sd,struct dentry * parent)378 static void register_sd(struct sched_domain *sd, struct dentry *parent)
379 {
380 #define SDM(type, mode, member)	\
381 	debugfs_create_##type(#member, mode, parent, &sd->member)
382 
383 	SDM(ulong, 0644, min_interval);
384 	SDM(ulong, 0644, max_interval);
385 	SDM(u64,   0644, max_newidle_lb_cost);
386 	SDM(u32,   0644, busy_factor);
387 	SDM(u32,   0644, imbalance_pct);
388 	SDM(u32,   0644, cache_nice_tries);
389 	SDM(str,   0444, name);
390 
391 #undef SDM
392 
393 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
394 }
395 
update_sched_domain_debugfs(void)396 void update_sched_domain_debugfs(void)
397 {
398 	int cpu, i;
399 
400 	/*
401 	 * This can unfortunately be invoked before sched_debug_init() creates
402 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
403 	 */
404 	if (!debugfs_sched)
405 		return;
406 
407 	if (!cpumask_available(sd_sysctl_cpus)) {
408 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
409 			return;
410 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
411 	}
412 
413 	if (!sd_dentry)
414 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
415 
416 	for_each_cpu(cpu, sd_sysctl_cpus) {
417 		struct sched_domain *sd;
418 		struct dentry *d_cpu;
419 		char buf[32];
420 
421 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
422 		debugfs_lookup_and_remove(buf, sd_dentry);
423 		d_cpu = debugfs_create_dir(buf, sd_dentry);
424 
425 		i = 0;
426 		for_each_domain(cpu, sd) {
427 			struct dentry *d_sd;
428 
429 			snprintf(buf, sizeof(buf), "domain%d", i);
430 			d_sd = debugfs_create_dir(buf, d_cpu);
431 
432 			register_sd(sd, d_sd);
433 			i++;
434 		}
435 
436 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
437 	}
438 }
439 
dirty_sched_domain_sysctl(int cpu)440 void dirty_sched_domain_sysctl(int cpu)
441 {
442 	if (cpumask_available(sd_sysctl_cpus))
443 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
444 }
445 
446 #endif /* CONFIG_SMP */
447 
448 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)449 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
450 {
451 	struct sched_entity *se = tg->se[cpu];
452 
453 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
454 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
455 		#F, (long long)schedstat_val(stats->F))
456 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
457 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
458 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
459 
460 	if (!se)
461 		return;
462 
463 	PN(se->exec_start);
464 	PN(se->vruntime);
465 	PN(se->sum_exec_runtime);
466 
467 	if (schedstat_enabled()) {
468 		struct sched_statistics *stats;
469 		stats = __schedstats_from_se(se);
470 
471 		PN_SCHEDSTAT(wait_start);
472 		PN_SCHEDSTAT(sleep_start);
473 		PN_SCHEDSTAT(block_start);
474 		PN_SCHEDSTAT(sleep_max);
475 		PN_SCHEDSTAT(block_max);
476 		PN_SCHEDSTAT(exec_max);
477 		PN_SCHEDSTAT(slice_max);
478 		PN_SCHEDSTAT(wait_max);
479 		PN_SCHEDSTAT(wait_sum);
480 		P_SCHEDSTAT(wait_count);
481 	}
482 
483 	P(se->load.weight);
484 #ifdef CONFIG_SMP
485 	P(se->avg.load_avg);
486 	P(se->avg.util_avg);
487 	P(se->avg.runnable_avg);
488 #endif
489 
490 #undef PN_SCHEDSTAT
491 #undef PN
492 #undef P_SCHEDSTAT
493 #undef P
494 }
495 #endif
496 
497 #ifdef CONFIG_CGROUP_SCHED
498 static DEFINE_SPINLOCK(sched_debug_lock);
499 static char group_path[PATH_MAX];
500 
task_group_path(struct task_group * tg,char * path,int plen)501 static void task_group_path(struct task_group *tg, char *path, int plen)
502 {
503 	if (autogroup_path(tg, path, plen))
504 		return;
505 
506 	cgroup_path(tg->css.cgroup, path, plen);
507 }
508 
509 /*
510  * Only 1 SEQ_printf_task_group_path() caller can use the full length
511  * group_path[] for cgroup path. Other simultaneous callers will have
512  * to use a shorter stack buffer. A "..." suffix is appended at the end
513  * of the stack buffer so that it will show up in case the output length
514  * matches the given buffer size to indicate possible path name truncation.
515  */
516 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
517 {									\
518 	if (spin_trylock(&sched_debug_lock)) {				\
519 		task_group_path(tg, group_path, sizeof(group_path));	\
520 		SEQ_printf(m, fmt, group_path);				\
521 		spin_unlock(&sched_debug_lock);				\
522 	} else {							\
523 		char buf[128];						\
524 		char *bufend = buf + sizeof(buf) - 3;			\
525 		task_group_path(tg, buf, bufend - buf);			\
526 		strcpy(bufend - 1, "...");				\
527 		SEQ_printf(m, fmt, buf);				\
528 	}								\
529 }
530 #endif
531 
532 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)533 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
534 {
535 	if (task_current(rq, p))
536 		SEQ_printf(m, ">R");
537 	else
538 		SEQ_printf(m, " %c", task_state_to_char(p));
539 
540 	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
541 		p->comm, task_pid_nr(p),
542 		SPLIT_NS(p->se.vruntime),
543 		(long long)(p->nvcsw + p->nivcsw),
544 		p->prio);
545 
546 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
547 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
548 		SPLIT_NS(p->se.sum_exec_runtime),
549 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
550 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
551 
552 #ifdef CONFIG_NUMA_BALANCING
553 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
554 #endif
555 #ifdef CONFIG_CGROUP_SCHED
556 	SEQ_printf_task_group_path(m, task_group(p), " %s")
557 #endif
558 
559 	SEQ_printf(m, "\n");
560 }
561 
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)562 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
563 {
564 	struct task_struct *g, *p;
565 
566 	SEQ_printf(m, "\n");
567 	SEQ_printf(m, "runnable tasks:\n");
568 	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
569 		   "     wait-time             sum-exec        sum-sleep\n");
570 	SEQ_printf(m, "-------------------------------------------------------"
571 		   "------------------------------------------------------\n");
572 
573 	rcu_read_lock();
574 	for_each_process_thread(g, p) {
575 		if (task_cpu(p) != rq_cpu)
576 			continue;
577 
578 		print_task(m, rq, p);
579 	}
580 	rcu_read_unlock();
581 }
582 
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)583 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
584 {
585 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
586 		spread, rq0_min_vruntime, spread0;
587 	struct rq *rq = cpu_rq(cpu);
588 	struct sched_entity *last;
589 	unsigned long flags;
590 
591 #ifdef CONFIG_FAIR_GROUP_SCHED
592 	SEQ_printf(m, "\n");
593 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
594 #else
595 	SEQ_printf(m, "\n");
596 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
597 #endif
598 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
599 			SPLIT_NS(cfs_rq->exec_clock));
600 
601 	raw_spin_rq_lock_irqsave(rq, flags);
602 	if (rb_first_cached(&cfs_rq->tasks_timeline))
603 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
604 	last = __pick_last_entity(cfs_rq);
605 	if (last)
606 		max_vruntime = last->vruntime;
607 	min_vruntime = cfs_rq->min_vruntime;
608 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
609 	raw_spin_rq_unlock_irqrestore(rq, flags);
610 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
611 			SPLIT_NS(MIN_vruntime));
612 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
613 			SPLIT_NS(min_vruntime));
614 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
615 			SPLIT_NS(max_vruntime));
616 	spread = max_vruntime - MIN_vruntime;
617 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
618 			SPLIT_NS(spread));
619 	spread0 = min_vruntime - rq0_min_vruntime;
620 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
621 			SPLIT_NS(spread0));
622 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
623 			cfs_rq->nr_spread_over);
624 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
625 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
626 	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
627 			cfs_rq->idle_nr_running);
628 	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
629 			cfs_rq->idle_h_nr_running);
630 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
631 #ifdef CONFIG_SMP
632 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
633 			cfs_rq->avg.load_avg);
634 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
635 			cfs_rq->avg.runnable_avg);
636 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
637 			cfs_rq->avg.util_avg);
638 	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
639 			cfs_rq->avg.util_est.enqueued);
640 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
641 			cfs_rq->removed.load_avg);
642 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
643 			cfs_rq->removed.util_avg);
644 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
645 			cfs_rq->removed.runnable_avg);
646 #ifdef CONFIG_FAIR_GROUP_SCHED
647 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
648 			cfs_rq->tg_load_avg_contrib);
649 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
650 			atomic_long_read(&cfs_rq->tg->load_avg));
651 #endif
652 #endif
653 #ifdef CONFIG_CFS_BANDWIDTH
654 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
655 			cfs_rq->throttled);
656 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
657 			cfs_rq->throttle_count);
658 #endif
659 
660 #ifdef CONFIG_FAIR_GROUP_SCHED
661 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
662 #endif
663 }
664 
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)665 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
666 {
667 #ifdef CONFIG_RT_GROUP_SCHED
668 	SEQ_printf(m, "\n");
669 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
670 #else
671 	SEQ_printf(m, "\n");
672 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
673 #endif
674 
675 #define P(x) \
676 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
677 #define PU(x) \
678 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
679 #define PN(x) \
680 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
681 
682 	PU(rt_nr_running);
683 #ifdef CONFIG_SMP
684 	PU(rt_nr_migratory);
685 #endif
686 	P(rt_throttled);
687 	PN(rt_time);
688 	PN(rt_runtime);
689 
690 #undef PN
691 #undef PU
692 #undef P
693 }
694 
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)695 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
696 {
697 	struct dl_bw *dl_bw;
698 
699 	SEQ_printf(m, "\n");
700 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
701 
702 #define PU(x) \
703 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
704 
705 	PU(dl_nr_running);
706 #ifdef CONFIG_SMP
707 	PU(dl_nr_migratory);
708 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
709 #else
710 	dl_bw = &dl_rq->dl_bw;
711 #endif
712 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
713 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
714 
715 #undef PU
716 }
717 
print_cpu(struct seq_file * m,int cpu)718 static void print_cpu(struct seq_file *m, int cpu)
719 {
720 	struct rq *rq = cpu_rq(cpu);
721 
722 #ifdef CONFIG_X86
723 	{
724 		unsigned int freq = cpu_khz ? : 1;
725 
726 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
727 			   cpu, freq / 1000, (freq % 1000));
728 	}
729 #else
730 	SEQ_printf(m, "cpu#%d\n", cpu);
731 #endif
732 
733 #define P(x)								\
734 do {									\
735 	if (sizeof(rq->x) == 4)						\
736 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
737 	else								\
738 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
739 } while (0)
740 
741 #define PN(x) \
742 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
743 
744 	P(nr_running);
745 	P(nr_switches);
746 	P(nr_uninterruptible);
747 	PN(next_balance);
748 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
749 	PN(clock);
750 	PN(clock_task);
751 #undef P
752 #undef PN
753 
754 #ifdef CONFIG_SMP
755 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
756 	P64(avg_idle);
757 	P64(max_idle_balance_cost);
758 #undef P64
759 #endif
760 
761 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
762 	if (schedstat_enabled()) {
763 		P(yld_count);
764 		P(sched_count);
765 		P(sched_goidle);
766 		P(ttwu_count);
767 		P(ttwu_local);
768 	}
769 #undef P
770 
771 	print_cfs_stats(m, cpu);
772 	print_rt_stats(m, cpu);
773 	print_dl_stats(m, cpu);
774 
775 	print_rq(m, rq, cpu);
776 	SEQ_printf(m, "\n");
777 }
778 
779 static const char *sched_tunable_scaling_names[] = {
780 	"none",
781 	"logarithmic",
782 	"linear"
783 };
784 
sched_debug_header(struct seq_file * m)785 static void sched_debug_header(struct seq_file *m)
786 {
787 	u64 ktime, sched_clk, cpu_clk;
788 	unsigned long flags;
789 
790 	local_irq_save(flags);
791 	ktime = ktime_to_ns(ktime_get());
792 	sched_clk = sched_clock();
793 	cpu_clk = local_clock();
794 	local_irq_restore(flags);
795 
796 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
797 		init_utsname()->release,
798 		(int)strcspn(init_utsname()->version, " "),
799 		init_utsname()->version);
800 
801 #define P(x) \
802 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
803 #define PN(x) \
804 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
805 	PN(ktime);
806 	PN(sched_clk);
807 	PN(cpu_clk);
808 	P(jiffies);
809 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
810 	P(sched_clock_stable());
811 #endif
812 #undef PN
813 #undef P
814 
815 	SEQ_printf(m, "\n");
816 	SEQ_printf(m, "sysctl_sched\n");
817 
818 #define P(x) \
819 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
820 #define PN(x) \
821 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
822 	PN(sysctl_sched_latency);
823 	PN(sysctl_sched_min_granularity);
824 	PN(sysctl_sched_idle_min_granularity);
825 	PN(sysctl_sched_wakeup_granularity);
826 	P(sysctl_sched_child_runs_first);
827 	P(sysctl_sched_features);
828 #undef PN
829 #undef P
830 
831 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
832 		"sysctl_sched_tunable_scaling",
833 		sysctl_sched_tunable_scaling,
834 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
835 	SEQ_printf(m, "\n");
836 }
837 
sched_debug_show(struct seq_file * m,void * v)838 static int sched_debug_show(struct seq_file *m, void *v)
839 {
840 	int cpu = (unsigned long)(v - 2);
841 
842 	if (cpu != -1)
843 		print_cpu(m, cpu);
844 	else
845 		sched_debug_header(m);
846 
847 	return 0;
848 }
849 
sysrq_sched_debug_show(void)850 void sysrq_sched_debug_show(void)
851 {
852 	int cpu;
853 
854 	sched_debug_header(NULL);
855 	for_each_online_cpu(cpu) {
856 		/*
857 		 * Need to reset softlockup watchdogs on all CPUs, because
858 		 * another CPU might be blocked waiting for us to process
859 		 * an IPI or stop_machine.
860 		 */
861 		touch_nmi_watchdog();
862 		touch_all_softlockup_watchdogs();
863 		print_cpu(NULL, cpu);
864 	}
865 }
866 
867 /*
868  * This iterator needs some explanation.
869  * It returns 1 for the header position.
870  * This means 2 is CPU 0.
871  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
872  * to use cpumask_* to iterate over the CPUs.
873  */
sched_debug_start(struct seq_file * file,loff_t * offset)874 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
875 {
876 	unsigned long n = *offset;
877 
878 	if (n == 0)
879 		return (void *) 1;
880 
881 	n--;
882 
883 	if (n > 0)
884 		n = cpumask_next(n - 1, cpu_online_mask);
885 	else
886 		n = cpumask_first(cpu_online_mask);
887 
888 	*offset = n + 1;
889 
890 	if (n < nr_cpu_ids)
891 		return (void *)(unsigned long)(n + 2);
892 
893 	return NULL;
894 }
895 
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)896 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
897 {
898 	(*offset)++;
899 	return sched_debug_start(file, offset);
900 }
901 
sched_debug_stop(struct seq_file * file,void * data)902 static void sched_debug_stop(struct seq_file *file, void *data)
903 {
904 }
905 
906 static const struct seq_operations sched_debug_sops = {
907 	.start		= sched_debug_start,
908 	.next		= sched_debug_next,
909 	.stop		= sched_debug_stop,
910 	.show		= sched_debug_show,
911 };
912 
913 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
914 #define __P(F) __PS(#F, F)
915 #define   P(F) __PS(#F, p->F)
916 #define   PM(F, M) __PS(#F, p->F & (M))
917 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
918 #define __PN(F) __PSN(#F, F)
919 #define   PN(F) __PSN(#F, p->F)
920 
921 
922 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)923 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
924 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
925 {
926 	SEQ_printf(m, "numa_faults node=%d ", node);
927 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
928 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
929 }
930 #endif
931 
932 
sched_show_numa(struct task_struct * p,struct seq_file * m)933 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
934 {
935 #ifdef CONFIG_NUMA_BALANCING
936 	if (p->mm)
937 		P(mm->numa_scan_seq);
938 
939 	P(numa_pages_migrated);
940 	P(numa_preferred_nid);
941 	P(total_numa_faults);
942 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
943 			task_node(p), task_numa_group_id(p));
944 	show_numa_stats(p, m);
945 #endif
946 }
947 
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)948 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
949 						  struct seq_file *m)
950 {
951 	unsigned long nr_switches;
952 
953 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
954 						get_nr_threads(p));
955 	SEQ_printf(m,
956 		"---------------------------------------------------------"
957 		"----------\n");
958 
959 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
960 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
961 
962 	PN(se.exec_start);
963 	PN(se.vruntime);
964 	PN(se.sum_exec_runtime);
965 
966 	nr_switches = p->nvcsw + p->nivcsw;
967 
968 	P(se.nr_migrations);
969 
970 	if (schedstat_enabled()) {
971 		u64 avg_atom, avg_per_cpu;
972 
973 		PN_SCHEDSTAT(sum_sleep_runtime);
974 		PN_SCHEDSTAT(sum_block_runtime);
975 		PN_SCHEDSTAT(wait_start);
976 		PN_SCHEDSTAT(sleep_start);
977 		PN_SCHEDSTAT(block_start);
978 		PN_SCHEDSTAT(sleep_max);
979 		PN_SCHEDSTAT(block_max);
980 		PN_SCHEDSTAT(exec_max);
981 		PN_SCHEDSTAT(slice_max);
982 		PN_SCHEDSTAT(wait_max);
983 		PN_SCHEDSTAT(wait_sum);
984 		P_SCHEDSTAT(wait_count);
985 		PN_SCHEDSTAT(iowait_sum);
986 		P_SCHEDSTAT(iowait_count);
987 		P_SCHEDSTAT(nr_migrations_cold);
988 		P_SCHEDSTAT(nr_failed_migrations_affine);
989 		P_SCHEDSTAT(nr_failed_migrations_running);
990 		P_SCHEDSTAT(nr_failed_migrations_hot);
991 		P_SCHEDSTAT(nr_forced_migrations);
992 		P_SCHEDSTAT(nr_wakeups);
993 		P_SCHEDSTAT(nr_wakeups_sync);
994 		P_SCHEDSTAT(nr_wakeups_migrate);
995 		P_SCHEDSTAT(nr_wakeups_local);
996 		P_SCHEDSTAT(nr_wakeups_remote);
997 		P_SCHEDSTAT(nr_wakeups_affine);
998 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
999 		P_SCHEDSTAT(nr_wakeups_passive);
1000 		P_SCHEDSTAT(nr_wakeups_idle);
1001 
1002 		avg_atom = p->se.sum_exec_runtime;
1003 		if (nr_switches)
1004 			avg_atom = div64_ul(avg_atom, nr_switches);
1005 		else
1006 			avg_atom = -1LL;
1007 
1008 		avg_per_cpu = p->se.sum_exec_runtime;
1009 		if (p->se.nr_migrations) {
1010 			avg_per_cpu = div64_u64(avg_per_cpu,
1011 						p->se.nr_migrations);
1012 		} else {
1013 			avg_per_cpu = -1LL;
1014 		}
1015 
1016 		__PN(avg_atom);
1017 		__PN(avg_per_cpu);
1018 
1019 #ifdef CONFIG_SCHED_CORE
1020 		PN_SCHEDSTAT(core_forceidle_sum);
1021 #endif
1022 	}
1023 
1024 	__P(nr_switches);
1025 	__PS("nr_voluntary_switches", p->nvcsw);
1026 	__PS("nr_involuntary_switches", p->nivcsw);
1027 
1028 	P(se.load.weight);
1029 #ifdef CONFIG_SMP
1030 	P(se.avg.load_sum);
1031 	P(se.avg.runnable_sum);
1032 	P(se.avg.util_sum);
1033 	P(se.avg.load_avg);
1034 	P(se.avg.runnable_avg);
1035 	P(se.avg.util_avg);
1036 	P(se.avg.last_update_time);
1037 	P(se.avg.util_est.ewma);
1038 	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1039 #endif
1040 #ifdef CONFIG_UCLAMP_TASK
1041 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1042 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1043 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1044 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1045 #endif
1046 	P(policy);
1047 	P(prio);
1048 	if (task_has_dl_policy(p)) {
1049 		P(dl.runtime);
1050 		P(dl.deadline);
1051 	}
1052 #undef PN_SCHEDSTAT
1053 #undef P_SCHEDSTAT
1054 
1055 	{
1056 		unsigned int this_cpu = raw_smp_processor_id();
1057 		u64 t0, t1;
1058 
1059 		t0 = cpu_clock(this_cpu);
1060 		t1 = cpu_clock(this_cpu);
1061 		__PS("clock-delta", t1-t0);
1062 	}
1063 
1064 	sched_show_numa(p, m);
1065 }
1066 
proc_sched_set_task(struct task_struct * p)1067 void proc_sched_set_task(struct task_struct *p)
1068 {
1069 #ifdef CONFIG_SCHEDSTATS
1070 	memset(&p->stats, 0, sizeof(p->stats));
1071 #endif
1072 }
1073 
resched_latency_warn(int cpu,u64 latency)1074 void resched_latency_warn(int cpu, u64 latency)
1075 {
1076 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1077 
1078 	WARN(__ratelimit(&latency_check_ratelimit),
1079 	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1080 	     "without schedule\n",
1081 	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1082 }
1083