• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Scheduler topology setup/handling methods
4  */
5 
6 #include <linux/bsearch.h>
7 #include <trace/hooks/sched.h>
8 
9 DEFINE_MUTEX(sched_domains_mutex);
10 #ifdef CONFIG_LOCKDEP
11 EXPORT_SYMBOL_GPL(sched_domains_mutex);
12 #endif
13 
14 /* Protected by sched_domains_mutex: */
15 static cpumask_var_t sched_domains_tmpmask;
16 static cpumask_var_t sched_domains_tmpmask2;
17 
18 #ifdef CONFIG_SCHED_DEBUG
19 
sched_debug_setup(char * str)20 static int __init sched_debug_setup(char *str)
21 {
22 	sched_debug_verbose = true;
23 
24 	return 0;
25 }
26 early_param("sched_verbose", sched_debug_setup);
27 
sched_debug(void)28 static inline bool sched_debug(void)
29 {
30 	return sched_debug_verbose;
31 }
32 
33 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
34 const struct sd_flag_debug sd_flag_debug[] = {
35 #include <linux/sched/sd_flags.h>
36 };
37 #undef SD_FLAG
38 
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)39 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
40 				  struct cpumask *groupmask)
41 {
42 	struct sched_group *group = sd->groups;
43 	unsigned long flags = sd->flags;
44 	unsigned int idx;
45 
46 	cpumask_clear(groupmask);
47 
48 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
49 	printk(KERN_CONT "span=%*pbl level=%s\n",
50 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
51 
52 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
53 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
54 	}
55 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
56 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
57 	}
58 
59 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
60 		unsigned int flag = BIT(idx);
61 		unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
62 
63 		if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
64 		    !(sd->child->flags & flag))
65 			printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
66 			       sd_flag_debug[idx].name);
67 
68 		if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
69 		    !(sd->parent->flags & flag))
70 			printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
71 			       sd_flag_debug[idx].name);
72 	}
73 
74 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
75 	do {
76 		if (!group) {
77 			printk("\n");
78 			printk(KERN_ERR "ERROR: group is NULL\n");
79 			break;
80 		}
81 
82 		if (cpumask_empty(sched_group_span(group))) {
83 			printk(KERN_CONT "\n");
84 			printk(KERN_ERR "ERROR: empty group\n");
85 			break;
86 		}
87 
88 		if (!(sd->flags & SD_OVERLAP) &&
89 		    cpumask_intersects(groupmask, sched_group_span(group))) {
90 			printk(KERN_CONT "\n");
91 			printk(KERN_ERR "ERROR: repeated CPUs\n");
92 			break;
93 		}
94 
95 		cpumask_or(groupmask, groupmask, sched_group_span(group));
96 
97 		printk(KERN_CONT " %d:{ span=%*pbl",
98 				group->sgc->id,
99 				cpumask_pr_args(sched_group_span(group)));
100 
101 		if ((sd->flags & SD_OVERLAP) &&
102 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
103 			printk(KERN_CONT " mask=%*pbl",
104 				cpumask_pr_args(group_balance_mask(group)));
105 		}
106 
107 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
108 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
109 
110 		if (group == sd->groups && sd->child &&
111 		    !cpumask_equal(sched_domain_span(sd->child),
112 				   sched_group_span(group))) {
113 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
114 		}
115 
116 		printk(KERN_CONT " }");
117 
118 		group = group->next;
119 
120 		if (group != sd->groups)
121 			printk(KERN_CONT ",");
122 
123 	} while (group != sd->groups);
124 	printk(KERN_CONT "\n");
125 
126 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
127 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
128 
129 	if (sd->parent &&
130 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
131 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
132 	return 0;
133 }
134 
sched_domain_debug(struct sched_domain * sd,int cpu)135 static void sched_domain_debug(struct sched_domain *sd, int cpu)
136 {
137 	int level = 0;
138 
139 	if (!sched_debug_verbose)
140 		return;
141 
142 	if (!sd) {
143 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
144 		return;
145 	}
146 
147 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
148 
149 	for (;;) {
150 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
151 			break;
152 		level++;
153 		sd = sd->parent;
154 		if (!sd)
155 			break;
156 	}
157 }
158 #else /* !CONFIG_SCHED_DEBUG */
159 
160 # define sched_debug_verbose 0
161 # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)162 static inline bool sched_debug(void)
163 {
164 	return false;
165 }
166 #endif /* CONFIG_SCHED_DEBUG */
167 
168 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
169 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
170 static const unsigned int SD_DEGENERATE_GROUPS_MASK =
171 #include <linux/sched/sd_flags.h>
172 0;
173 #undef SD_FLAG
174 
sd_degenerate(struct sched_domain * sd)175 static int sd_degenerate(struct sched_domain *sd)
176 {
177 	if (cpumask_weight(sched_domain_span(sd)) == 1)
178 		return 1;
179 
180 	/* Following flags need at least 2 groups */
181 	if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
182 	    (sd->groups != sd->groups->next))
183 		return 0;
184 
185 	/* Following flags don't use groups */
186 	if (sd->flags & (SD_WAKE_AFFINE))
187 		return 0;
188 
189 	return 1;
190 }
191 
192 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)193 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
194 {
195 	unsigned long cflags = sd->flags, pflags = parent->flags;
196 
197 	if (sd_degenerate(parent))
198 		return 1;
199 
200 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
201 		return 0;
202 
203 	/* Flags needing groups don't count if only 1 group in parent */
204 	if (parent->groups == parent->groups->next)
205 		pflags &= ~SD_DEGENERATE_GROUPS_MASK;
206 
207 	if (~cflags & pflags)
208 		return 0;
209 
210 	return 1;
211 }
212 
213 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
214 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
215 static unsigned int sysctl_sched_energy_aware = 1;
216 static DEFINE_MUTEX(sched_energy_mutex);
217 static bool sched_energy_update;
218 
rebuild_sched_domains_energy(void)219 void rebuild_sched_domains_energy(void)
220 {
221 	mutex_lock(&sched_energy_mutex);
222 	sched_energy_update = true;
223 	rebuild_sched_domains();
224 	sched_energy_update = false;
225 	mutex_unlock(&sched_energy_mutex);
226 }
227 
228 #ifdef CONFIG_PROC_SYSCTL
sched_energy_aware_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)229 static int sched_energy_aware_handler(struct ctl_table *table, int write,
230 		void *buffer, size_t *lenp, loff_t *ppos)
231 {
232 	int ret, state;
233 
234 	if (write && !capable(CAP_SYS_ADMIN))
235 		return -EPERM;
236 
237 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
238 	if (!ret && write) {
239 		state = static_branch_unlikely(&sched_energy_present);
240 		if (state != sysctl_sched_energy_aware)
241 			rebuild_sched_domains_energy();
242 	}
243 
244 	return ret;
245 }
246 
247 static struct ctl_table sched_energy_aware_sysctls[] = {
248 	{
249 		.procname       = "sched_energy_aware",
250 		.data           = &sysctl_sched_energy_aware,
251 		.maxlen         = sizeof(unsigned int),
252 		.mode           = 0644,
253 		.proc_handler   = sched_energy_aware_handler,
254 		.extra1         = SYSCTL_ZERO,
255 		.extra2         = SYSCTL_ONE,
256 	},
257 	{}
258 };
259 
sched_energy_aware_sysctl_init(void)260 static int __init sched_energy_aware_sysctl_init(void)
261 {
262 	register_sysctl_init("kernel", sched_energy_aware_sysctls);
263 	return 0;
264 }
265 
266 late_initcall(sched_energy_aware_sysctl_init);
267 #endif
268 
free_pd(struct perf_domain * pd)269 static void free_pd(struct perf_domain *pd)
270 {
271 	struct perf_domain *tmp;
272 
273 	while (pd) {
274 		tmp = pd->next;
275 		kfree(pd);
276 		pd = tmp;
277 	}
278 }
279 
find_pd(struct perf_domain * pd,int cpu)280 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
281 {
282 	while (pd) {
283 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
284 			return pd;
285 		pd = pd->next;
286 	}
287 
288 	return NULL;
289 }
290 
pd_init(int cpu)291 static struct perf_domain *pd_init(int cpu)
292 {
293 	struct em_perf_domain *obj = em_cpu_get(cpu);
294 	struct perf_domain *pd;
295 
296 	if (!obj) {
297 		if (sched_debug())
298 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
299 		return NULL;
300 	}
301 
302 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
303 	if (!pd)
304 		return NULL;
305 	pd->em_pd = obj;
306 
307 	return pd;
308 }
309 
perf_domain_debug(const struct cpumask * cpu_map,struct perf_domain * pd)310 static void perf_domain_debug(const struct cpumask *cpu_map,
311 						struct perf_domain *pd)
312 {
313 	if (!sched_debug() || !pd)
314 		return;
315 
316 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
317 
318 	while (pd) {
319 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
320 				cpumask_first(perf_domain_span(pd)),
321 				cpumask_pr_args(perf_domain_span(pd)),
322 				em_pd_nr_perf_states(pd->em_pd));
323 		pd = pd->next;
324 	}
325 
326 	printk(KERN_CONT "\n");
327 }
328 
destroy_perf_domain_rcu(struct rcu_head * rp)329 static void destroy_perf_domain_rcu(struct rcu_head *rp)
330 {
331 	struct perf_domain *pd;
332 
333 	pd = container_of(rp, struct perf_domain, rcu);
334 	free_pd(pd);
335 }
336 
sched_energy_set(bool has_eas)337 static void sched_energy_set(bool has_eas)
338 {
339 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
340 		if (sched_debug())
341 			pr_info("%s: stopping EAS\n", __func__);
342 		static_branch_disable_cpuslocked(&sched_energy_present);
343 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
344 		if (sched_debug())
345 			pr_info("%s: starting EAS\n", __func__);
346 		static_branch_enable_cpuslocked(&sched_energy_present);
347 	}
348 }
349 
350 /*
351  * EAS can be used on a root domain if it meets all the following conditions:
352  *    1. an Energy Model (EM) is available;
353  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
354  *    3. no SMT is detected.
355  *    4. the EM complexity is low enough to keep scheduling overheads low;
356  *    5. frequency invariance support is present;
357  *
358  * The complexity of the Energy Model is defined as:
359  *
360  *              C = nr_pd * (nr_cpus + nr_ps)
361  *
362  * with parameters defined as:
363  *  - nr_pd:    the number of performance domains
364  *  - nr_cpus:  the number of CPUs
365  *  - nr_ps:    the sum of the number of performance states of all performance
366  *              domains (for example, on a system with 2 performance domains,
367  *              with 10 performance states each, nr_ps = 2 * 10 = 20).
368  *
369  * It is generally not a good idea to use such a model in the wake-up path on
370  * very complex platforms because of the associated scheduling overheads. The
371  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
372  * with per-CPU DVFS and less than 8 performance states each, for example.
373  */
374 #define EM_MAX_COMPLEXITY 2048
375 
build_perf_domains(const struct cpumask * cpu_map)376 static bool build_perf_domains(const struct cpumask *cpu_map)
377 {
378 	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
379 	struct perf_domain *pd = NULL, *tmp;
380 	int cpu = cpumask_first(cpu_map);
381 	struct root_domain *rd = cpu_rq(cpu)->rd;
382 	bool eas_check = false;
383 
384 	if (!sysctl_sched_energy_aware)
385 		goto free;
386 
387 	/*
388 	 * EAS is enabled for asymmetric CPU capacity topologies.
389 	 * Allow vendor to override if desired.
390 	 */
391 	trace_android_rvh_build_perf_domains(&eas_check);
392 	if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) {
393 		if (sched_debug()) {
394 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
395 					cpumask_pr_args(cpu_map));
396 		}
397 		goto free;
398 	}
399 
400 	/* EAS definitely does *not* handle SMT */
401 	if (sched_smt_active()) {
402 		pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
403 			cpumask_pr_args(cpu_map));
404 		goto free;
405 	}
406 
407 	if (!arch_scale_freq_invariant()) {
408 		if (sched_debug()) {
409 			pr_warn("rd %*pbl: Disabling EAS: frequency-invariant load tracking not yet supported",
410 				cpumask_pr_args(cpu_map));
411 		}
412 		goto free;
413 	}
414 
415 	for_each_cpu(i, cpu_map) {
416 		/* Skip already covered CPUs. */
417 		if (find_pd(pd, i))
418 			continue;
419 
420 		/* Create the new pd and add it to the local list. */
421 		tmp = pd_init(i);
422 		if (!tmp)
423 			goto free;
424 		tmp->next = pd;
425 		pd = tmp;
426 
427 		/*
428 		 * Count performance domains and performance states for the
429 		 * complexity check.
430 		 */
431 		nr_pd++;
432 		nr_ps += em_pd_nr_perf_states(pd->em_pd);
433 	}
434 
435 	/* Bail out if the Energy Model complexity is too high. */
436 	if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
437 		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
438 						cpumask_pr_args(cpu_map));
439 		goto free;
440 	}
441 
442 	perf_domain_debug(cpu_map, pd);
443 
444 	/* Attach the new list of performance domains to the root domain. */
445 	tmp = rd->pd;
446 	rcu_assign_pointer(rd->pd, pd);
447 	if (tmp)
448 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
449 
450 	return !!pd;
451 
452 free:
453 	free_pd(pd);
454 	tmp = rd->pd;
455 	rcu_assign_pointer(rd->pd, NULL);
456 	if (tmp)
457 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
458 
459 	return false;
460 }
461 #else
free_pd(struct perf_domain * pd)462 static void free_pd(struct perf_domain *pd) { }
463 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
464 
free_rootdomain(struct rcu_head * rcu)465 static void free_rootdomain(struct rcu_head *rcu)
466 {
467 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
468 
469 	cpupri_cleanup(&rd->cpupri);
470 	cpudl_cleanup(&rd->cpudl);
471 	free_cpumask_var(rd->dlo_mask);
472 	free_cpumask_var(rd->rto_mask);
473 	free_cpumask_var(rd->online);
474 	free_cpumask_var(rd->span);
475 	free_pd(rd->pd);
476 	kfree(rd);
477 }
478 
rq_attach_root(struct rq * rq,struct root_domain * rd)479 void rq_attach_root(struct rq *rq, struct root_domain *rd)
480 {
481 	struct root_domain *old_rd = NULL;
482 	struct rq_flags rf;
483 
484 	rq_lock_irqsave(rq, &rf);
485 
486 	if (rq->rd) {
487 		old_rd = rq->rd;
488 
489 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
490 			set_rq_offline(rq);
491 
492 		cpumask_clear_cpu(rq->cpu, old_rd->span);
493 
494 		/*
495 		 * If we dont want to free the old_rd yet then
496 		 * set old_rd to NULL to skip the freeing later
497 		 * in this function:
498 		 */
499 		if (!atomic_dec_and_test(&old_rd->refcount))
500 			old_rd = NULL;
501 	}
502 
503 	atomic_inc(&rd->refcount);
504 	rq->rd = rd;
505 
506 	cpumask_set_cpu(rq->cpu, rd->span);
507 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
508 		set_rq_online(rq);
509 
510 	rq_unlock_irqrestore(rq, &rf);
511 
512 	if (old_rd)
513 		call_rcu(&old_rd->rcu, free_rootdomain);
514 }
515 
sched_get_rd(struct root_domain * rd)516 void sched_get_rd(struct root_domain *rd)
517 {
518 	atomic_inc(&rd->refcount);
519 }
520 
sched_put_rd(struct root_domain * rd)521 void sched_put_rd(struct root_domain *rd)
522 {
523 	if (!atomic_dec_and_test(&rd->refcount))
524 		return;
525 
526 	call_rcu(&rd->rcu, free_rootdomain);
527 }
528 
init_rootdomain(struct root_domain * rd)529 static int init_rootdomain(struct root_domain *rd)
530 {
531 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
532 		goto out;
533 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
534 		goto free_span;
535 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
536 		goto free_online;
537 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
538 		goto free_dlo_mask;
539 
540 #ifdef HAVE_RT_PUSH_IPI
541 	rd->rto_cpu = -1;
542 	raw_spin_lock_init(&rd->rto_lock);
543 	rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
544 #endif
545 
546 	rd->visit_gen = 0;
547 	init_dl_bw(&rd->dl_bw);
548 	if (cpudl_init(&rd->cpudl) != 0)
549 		goto free_rto_mask;
550 
551 	if (cpupri_init(&rd->cpupri) != 0)
552 		goto free_cpudl;
553 	return 0;
554 
555 free_cpudl:
556 	cpudl_cleanup(&rd->cpudl);
557 free_rto_mask:
558 	free_cpumask_var(rd->rto_mask);
559 free_dlo_mask:
560 	free_cpumask_var(rd->dlo_mask);
561 free_online:
562 	free_cpumask_var(rd->online);
563 free_span:
564 	free_cpumask_var(rd->span);
565 out:
566 	return -ENOMEM;
567 }
568 
569 /*
570  * By default the system creates a single root-domain with all CPUs as
571  * members (mimicking the global state we have today).
572  */
573 struct root_domain def_root_domain;
574 
init_defrootdomain(void)575 void __init init_defrootdomain(void)
576 {
577 	init_rootdomain(&def_root_domain);
578 
579 	atomic_set(&def_root_domain.refcount, 1);
580 }
581 
alloc_rootdomain(void)582 static struct root_domain *alloc_rootdomain(void)
583 {
584 	struct root_domain *rd;
585 
586 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
587 	if (!rd)
588 		return NULL;
589 
590 	if (init_rootdomain(rd) != 0) {
591 		kfree(rd);
592 		return NULL;
593 	}
594 
595 	return rd;
596 }
597 
free_sched_groups(struct sched_group * sg,int free_sgc)598 static void free_sched_groups(struct sched_group *sg, int free_sgc)
599 {
600 	struct sched_group *tmp, *first;
601 
602 	if (!sg)
603 		return;
604 
605 	first = sg;
606 	do {
607 		tmp = sg->next;
608 
609 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
610 			kfree(sg->sgc);
611 
612 		if (atomic_dec_and_test(&sg->ref))
613 			kfree(sg);
614 		sg = tmp;
615 	} while (sg != first);
616 }
617 
destroy_sched_domain(struct sched_domain * sd)618 static void destroy_sched_domain(struct sched_domain *sd)
619 {
620 	/*
621 	 * A normal sched domain may have multiple group references, an
622 	 * overlapping domain, having private groups, only one.  Iterate,
623 	 * dropping group/capacity references, freeing where none remain.
624 	 */
625 	free_sched_groups(sd->groups, 1);
626 
627 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
628 		kfree(sd->shared);
629 	kfree(sd);
630 }
631 
destroy_sched_domains_rcu(struct rcu_head * rcu)632 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
633 {
634 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
635 
636 	while (sd) {
637 		struct sched_domain *parent = sd->parent;
638 		destroy_sched_domain(sd);
639 		sd = parent;
640 	}
641 }
642 
destroy_sched_domains(struct sched_domain * sd)643 static void destroy_sched_domains(struct sched_domain *sd)
644 {
645 	if (sd)
646 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
647 }
648 
649 /*
650  * Keep a special pointer to the highest sched_domain that has
651  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
652  * allows us to avoid some pointer chasing select_idle_sibling().
653  *
654  * Also keep a unique ID per domain (we use the first CPU number in
655  * the cpumask of the domain), this allows us to quickly tell if
656  * two CPUs are in the same cache domain, see cpus_share_cache().
657  */
658 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
659 DEFINE_PER_CPU(int, sd_llc_size);
660 DEFINE_PER_CPU(int, sd_llc_id);
661 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
662 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
663 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
664 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
665 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
666 
update_top_cache_domain(int cpu)667 static void update_top_cache_domain(int cpu)
668 {
669 	struct sched_domain_shared *sds = NULL;
670 	struct sched_domain *sd;
671 	int id = cpu;
672 	int size = 1;
673 
674 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
675 	if (sd) {
676 		id = cpumask_first(sched_domain_span(sd));
677 		size = cpumask_weight(sched_domain_span(sd));
678 		sds = sd->shared;
679 	}
680 
681 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
682 	per_cpu(sd_llc_size, cpu) = size;
683 	per_cpu(sd_llc_id, cpu) = id;
684 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
685 
686 	sd = lowest_flag_domain(cpu, SD_NUMA);
687 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
688 
689 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
690 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
691 
692 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY_FULL);
693 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
694 }
695 
696 /*
697  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
698  * hold the hotplug lock.
699  */
700 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)701 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
702 {
703 	struct rq *rq = cpu_rq(cpu);
704 	struct sched_domain *tmp;
705 
706 	/* Remove the sched domains which do not contribute to scheduling. */
707 	for (tmp = sd; tmp; ) {
708 		struct sched_domain *parent = tmp->parent;
709 		if (!parent)
710 			break;
711 
712 		if (sd_parent_degenerate(tmp, parent)) {
713 			tmp->parent = parent->parent;
714 
715 			if (parent->parent) {
716 				parent->parent->child = tmp;
717 				parent->parent->groups->flags = tmp->flags;
718 			}
719 
720 			/*
721 			 * Transfer SD_PREFER_SIBLING down in case of a
722 			 * degenerate parent; the spans match for this
723 			 * so the property transfers.
724 			 */
725 			if (parent->flags & SD_PREFER_SIBLING)
726 				tmp->flags |= SD_PREFER_SIBLING;
727 			destroy_sched_domain(parent);
728 		} else
729 			tmp = tmp->parent;
730 	}
731 
732 	if (sd && sd_degenerate(sd)) {
733 		tmp = sd;
734 		sd = sd->parent;
735 		destroy_sched_domain(tmp);
736 		if (sd) {
737 			struct sched_group *sg = sd->groups;
738 
739 			/*
740 			 * sched groups hold the flags of the child sched
741 			 * domain for convenience. Clear such flags since
742 			 * the child is being destroyed.
743 			 */
744 			do {
745 				sg->flags = 0;
746 			} while (sg != sd->groups);
747 
748 			sd->child = NULL;
749 		}
750 	}
751 
752 	sched_domain_debug(sd, cpu);
753 
754 	rq_attach_root(rq, rd);
755 	tmp = rq->sd;
756 	rcu_assign_pointer(rq->sd, sd);
757 	dirty_sched_domain_sysctl(cpu);
758 	destroy_sched_domains(tmp);
759 
760 	update_top_cache_domain(cpu);
761 }
762 
763 struct s_data {
764 	struct sched_domain * __percpu *sd;
765 	struct root_domain	*rd;
766 };
767 
768 enum s_alloc {
769 	sa_rootdomain,
770 	sa_sd,
771 	sa_sd_storage,
772 	sa_none,
773 };
774 
775 /*
776  * Return the canonical balance CPU for this group, this is the first CPU
777  * of this group that's also in the balance mask.
778  *
779  * The balance mask are all those CPUs that could actually end up at this
780  * group. See build_balance_mask().
781  *
782  * Also see should_we_balance().
783  */
group_balance_cpu(struct sched_group * sg)784 int group_balance_cpu(struct sched_group *sg)
785 {
786 	return cpumask_first(group_balance_mask(sg));
787 }
788 
789 
790 /*
791  * NUMA topology (first read the regular topology blurb below)
792  *
793  * Given a node-distance table, for example:
794  *
795  *   node   0   1   2   3
796  *     0:  10  20  30  20
797  *     1:  20  10  20  30
798  *     2:  30  20  10  20
799  *     3:  20  30  20  10
800  *
801  * which represents a 4 node ring topology like:
802  *
803  *   0 ----- 1
804  *   |       |
805  *   |       |
806  *   |       |
807  *   3 ----- 2
808  *
809  * We want to construct domains and groups to represent this. The way we go
810  * about doing this is to build the domains on 'hops'. For each NUMA level we
811  * construct the mask of all nodes reachable in @level hops.
812  *
813  * For the above NUMA topology that gives 3 levels:
814  *
815  * NUMA-2	0-3		0-3		0-3		0-3
816  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
817  *
818  * NUMA-1	0-1,3		0-2		1-3		0,2-3
819  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
820  *
821  * NUMA-0	0		1		2		3
822  *
823  *
824  * As can be seen; things don't nicely line up as with the regular topology.
825  * When we iterate a domain in child domain chunks some nodes can be
826  * represented multiple times -- hence the "overlap" naming for this part of
827  * the topology.
828  *
829  * In order to minimize this overlap, we only build enough groups to cover the
830  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
831  *
832  * Because:
833  *
834  *  - the first group of each domain is its child domain; this
835  *    gets us the first 0-1,3
836  *  - the only uncovered node is 2, who's child domain is 1-3.
837  *
838  * However, because of the overlap, computing a unique CPU for each group is
839  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
840  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
841  * end up at those groups (they would end up in group: 0-1,3).
842  *
843  * To correct this we have to introduce the group balance mask. This mask
844  * will contain those CPUs in the group that can reach this group given the
845  * (child) domain tree.
846  *
847  * With this we can once again compute balance_cpu and sched_group_capacity
848  * relations.
849  *
850  * XXX include words on how balance_cpu is unique and therefore can be
851  * used for sched_group_capacity links.
852  *
853  *
854  * Another 'interesting' topology is:
855  *
856  *   node   0   1   2   3
857  *     0:  10  20  20  30
858  *     1:  20  10  20  20
859  *     2:  20  20  10  20
860  *     3:  30  20  20  10
861  *
862  * Which looks a little like:
863  *
864  *   0 ----- 1
865  *   |     / |
866  *   |   /   |
867  *   | /     |
868  *   2 ----- 3
869  *
870  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
871  * are not.
872  *
873  * This leads to a few particularly weird cases where the sched_domain's are
874  * not of the same number for each CPU. Consider:
875  *
876  * NUMA-2	0-3						0-3
877  *  groups:	{0-2},{1-3}					{1-3},{0-2}
878  *
879  * NUMA-1	0-2		0-3		0-3		1-3
880  *
881  * NUMA-0	0		1		2		3
882  *
883  */
884 
885 
886 /*
887  * Build the balance mask; it contains only those CPUs that can arrive at this
888  * group and should be considered to continue balancing.
889  *
890  * We do this during the group creation pass, therefore the group information
891  * isn't complete yet, however since each group represents a (child) domain we
892  * can fully construct this using the sched_domain bits (which are already
893  * complete).
894  */
895 static void
build_balance_mask(struct sched_domain * sd,struct sched_group * sg,struct cpumask * mask)896 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
897 {
898 	const struct cpumask *sg_span = sched_group_span(sg);
899 	struct sd_data *sdd = sd->private;
900 	struct sched_domain *sibling;
901 	int i;
902 
903 	cpumask_clear(mask);
904 
905 	for_each_cpu(i, sg_span) {
906 		sibling = *per_cpu_ptr(sdd->sd, i);
907 
908 		/*
909 		 * Can happen in the asymmetric case, where these siblings are
910 		 * unused. The mask will not be empty because those CPUs that
911 		 * do have the top domain _should_ span the domain.
912 		 */
913 		if (!sibling->child)
914 			continue;
915 
916 		/* If we would not end up here, we can't continue from here */
917 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
918 			continue;
919 
920 		cpumask_set_cpu(i, mask);
921 	}
922 
923 	/* We must not have empty masks here */
924 	WARN_ON_ONCE(cpumask_empty(mask));
925 }
926 
927 /*
928  * XXX: This creates per-node group entries; since the load-balancer will
929  * immediately access remote memory to construct this group's load-balance
930  * statistics having the groups node local is of dubious benefit.
931  */
932 static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain * sd,int cpu)933 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
934 {
935 	struct sched_group *sg;
936 	struct cpumask *sg_span;
937 
938 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
939 			GFP_KERNEL, cpu_to_node(cpu));
940 
941 	if (!sg)
942 		return NULL;
943 
944 	sg_span = sched_group_span(sg);
945 	if (sd->child) {
946 		cpumask_copy(sg_span, sched_domain_span(sd->child));
947 		sg->flags = sd->child->flags;
948 	} else {
949 		cpumask_copy(sg_span, sched_domain_span(sd));
950 	}
951 
952 	atomic_inc(&sg->ref);
953 	return sg;
954 }
955 
init_overlap_sched_group(struct sched_domain * sd,struct sched_group * sg)956 static void init_overlap_sched_group(struct sched_domain *sd,
957 				     struct sched_group *sg)
958 {
959 	struct cpumask *mask = sched_domains_tmpmask2;
960 	struct sd_data *sdd = sd->private;
961 	struct cpumask *sg_span;
962 	int cpu;
963 
964 	build_balance_mask(sd, sg, mask);
965 	cpu = cpumask_first(mask);
966 
967 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
968 	if (atomic_inc_return(&sg->sgc->ref) == 1)
969 		cpumask_copy(group_balance_mask(sg), mask);
970 	else
971 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
972 
973 	/*
974 	 * Initialize sgc->capacity such that even if we mess up the
975 	 * domains and no possible iteration will get us here, we won't
976 	 * die on a /0 trap.
977 	 */
978 	sg_span = sched_group_span(sg);
979 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
980 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
981 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
982 }
983 
984 static struct sched_domain *
find_descended_sibling(struct sched_domain * sd,struct sched_domain * sibling)985 find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
986 {
987 	/*
988 	 * The proper descendant would be the one whose child won't span out
989 	 * of sd
990 	 */
991 	while (sibling->child &&
992 	       !cpumask_subset(sched_domain_span(sibling->child),
993 			       sched_domain_span(sd)))
994 		sibling = sibling->child;
995 
996 	/*
997 	 * As we are referencing sgc across different topology level, we need
998 	 * to go down to skip those sched_domains which don't contribute to
999 	 * scheduling because they will be degenerated in cpu_attach_domain
1000 	 */
1001 	while (sibling->child &&
1002 	       cpumask_equal(sched_domain_span(sibling->child),
1003 			     sched_domain_span(sibling)))
1004 		sibling = sibling->child;
1005 
1006 	return sibling;
1007 }
1008 
1009 static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)1010 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1011 {
1012 	struct sched_group *first = NULL, *last = NULL, *sg;
1013 	const struct cpumask *span = sched_domain_span(sd);
1014 	struct cpumask *covered = sched_domains_tmpmask;
1015 	struct sd_data *sdd = sd->private;
1016 	struct sched_domain *sibling;
1017 	int i;
1018 
1019 	cpumask_clear(covered);
1020 
1021 	for_each_cpu_wrap(i, span, cpu) {
1022 		struct cpumask *sg_span;
1023 
1024 		if (cpumask_test_cpu(i, covered))
1025 			continue;
1026 
1027 		sibling = *per_cpu_ptr(sdd->sd, i);
1028 
1029 		/*
1030 		 * Asymmetric node setups can result in situations where the
1031 		 * domain tree is of unequal depth, make sure to skip domains
1032 		 * that already cover the entire range.
1033 		 *
1034 		 * In that case build_sched_domains() will have terminated the
1035 		 * iteration early and our sibling sd spans will be empty.
1036 		 * Domains should always include the CPU they're built on, so
1037 		 * check that.
1038 		 */
1039 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
1040 			continue;
1041 
1042 		/*
1043 		 * Usually we build sched_group by sibling's child sched_domain
1044 		 * But for machines whose NUMA diameter are 3 or above, we move
1045 		 * to build sched_group by sibling's proper descendant's child
1046 		 * domain because sibling's child sched_domain will span out of
1047 		 * the sched_domain being built as below.
1048 		 *
1049 		 * Smallest diameter=3 topology is:
1050 		 *
1051 		 *   node   0   1   2   3
1052 		 *     0:  10  20  30  40
1053 		 *     1:  20  10  20  30
1054 		 *     2:  30  20  10  20
1055 		 *     3:  40  30  20  10
1056 		 *
1057 		 *   0 --- 1 --- 2 --- 3
1058 		 *
1059 		 * NUMA-3       0-3             N/A             N/A             0-3
1060 		 *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
1061 		 *
1062 		 * NUMA-2       0-2             0-3             0-3             1-3
1063 		 *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
1064 		 *
1065 		 * NUMA-1       0-1             0-2             1-3             2-3
1066 		 *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
1067 		 *
1068 		 * NUMA-0       0               1               2               3
1069 		 *
1070 		 * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
1071 		 * group span isn't a subset of the domain span.
1072 		 */
1073 		if (sibling->child &&
1074 		    !cpumask_subset(sched_domain_span(sibling->child), span))
1075 			sibling = find_descended_sibling(sd, sibling);
1076 
1077 		sg = build_group_from_child_sched_domain(sibling, cpu);
1078 		if (!sg)
1079 			goto fail;
1080 
1081 		sg_span = sched_group_span(sg);
1082 		cpumask_or(covered, covered, sg_span);
1083 
1084 		init_overlap_sched_group(sibling, sg);
1085 
1086 		if (!first)
1087 			first = sg;
1088 		if (last)
1089 			last->next = sg;
1090 		last = sg;
1091 		last->next = first;
1092 	}
1093 	sd->groups = first;
1094 
1095 	return 0;
1096 
1097 fail:
1098 	free_sched_groups(first, 0);
1099 
1100 	return -ENOMEM;
1101 }
1102 
1103 
1104 /*
1105  * Package topology (also see the load-balance blurb in fair.c)
1106  *
1107  * The scheduler builds a tree structure to represent a number of important
1108  * topology features. By default (default_topology[]) these include:
1109  *
1110  *  - Simultaneous multithreading (SMT)
1111  *  - Multi-Core Cache (MC)
1112  *  - Package (DIE)
1113  *
1114  * Where the last one more or less denotes everything up to a NUMA node.
1115  *
1116  * The tree consists of 3 primary data structures:
1117  *
1118  *	sched_domain -> sched_group -> sched_group_capacity
1119  *	    ^ ^             ^ ^
1120  *          `-'             `-'
1121  *
1122  * The sched_domains are per-CPU and have a two way link (parent & child) and
1123  * denote the ever growing mask of CPUs belonging to that level of topology.
1124  *
1125  * Each sched_domain has a circular (double) linked list of sched_group's, each
1126  * denoting the domains of the level below (or individual CPUs in case of the
1127  * first domain level). The sched_group linked by a sched_domain includes the
1128  * CPU of that sched_domain [*].
1129  *
1130  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1131  *
1132  * CPU   0   1   2   3   4   5   6   7
1133  *
1134  * DIE  [                             ]
1135  * MC   [             ] [             ]
1136  * SMT  [     ] [     ] [     ] [     ]
1137  *
1138  *  - or -
1139  *
1140  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1141  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1142  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1143  *
1144  * CPU   0   1   2   3   4   5   6   7
1145  *
1146  * One way to think about it is: sched_domain moves you up and down among these
1147  * topology levels, while sched_group moves you sideways through it, at child
1148  * domain granularity.
1149  *
1150  * sched_group_capacity ensures each unique sched_group has shared storage.
1151  *
1152  * There are two related construction problems, both require a CPU that
1153  * uniquely identify each group (for a given domain):
1154  *
1155  *  - The first is the balance_cpu (see should_we_balance() and the
1156  *    load-balance blub in fair.c); for each group we only want 1 CPU to
1157  *    continue balancing at a higher domain.
1158  *
1159  *  - The second is the sched_group_capacity; we want all identical groups
1160  *    to share a single sched_group_capacity.
1161  *
1162  * Since these topologies are exclusive by construction. That is, its
1163  * impossible for an SMT thread to belong to multiple cores, and cores to
1164  * be part of multiple caches. There is a very clear and unique location
1165  * for each CPU in the hierarchy.
1166  *
1167  * Therefore computing a unique CPU for each group is trivial (the iteration
1168  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1169  * group), we can simply pick the first CPU in each group.
1170  *
1171  *
1172  * [*] in other words, the first group of each domain is its child domain.
1173  */
1174 
get_group(int cpu,struct sd_data * sdd)1175 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1176 {
1177 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1178 	struct sched_domain *child = sd->child;
1179 	struct sched_group *sg;
1180 	bool already_visited;
1181 
1182 	if (child)
1183 		cpu = cpumask_first(sched_domain_span(child));
1184 
1185 	sg = *per_cpu_ptr(sdd->sg, cpu);
1186 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1187 
1188 	/* Increase refcounts for claim_allocations: */
1189 	already_visited = atomic_inc_return(&sg->ref) > 1;
1190 	/* sgc visits should follow a similar trend as sg */
1191 	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1192 
1193 	/* If we have already visited that group, it's already initialized. */
1194 	if (already_visited)
1195 		return sg;
1196 
1197 	if (child) {
1198 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1199 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1200 		sg->flags = child->flags;
1201 	} else {
1202 		cpumask_set_cpu(cpu, sched_group_span(sg));
1203 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1204 	}
1205 
1206 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1207 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1208 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1209 
1210 	return sg;
1211 }
1212 
1213 /*
1214  * build_sched_groups will build a circular linked list of the groups
1215  * covered by the given span, will set each group's ->cpumask correctly,
1216  * and will initialize their ->sgc.
1217  *
1218  * Assumes the sched_domain tree is fully constructed
1219  */
1220 static int
build_sched_groups(struct sched_domain * sd,int cpu)1221 build_sched_groups(struct sched_domain *sd, int cpu)
1222 {
1223 	struct sched_group *first = NULL, *last = NULL;
1224 	struct sd_data *sdd = sd->private;
1225 	const struct cpumask *span = sched_domain_span(sd);
1226 	struct cpumask *covered;
1227 	int i;
1228 
1229 	lockdep_assert_held(&sched_domains_mutex);
1230 	covered = sched_domains_tmpmask;
1231 
1232 	cpumask_clear(covered);
1233 
1234 	for_each_cpu_wrap(i, span, cpu) {
1235 		struct sched_group *sg;
1236 
1237 		if (cpumask_test_cpu(i, covered))
1238 			continue;
1239 
1240 		sg = get_group(i, sdd);
1241 
1242 		cpumask_or(covered, covered, sched_group_span(sg));
1243 
1244 		if (!first)
1245 			first = sg;
1246 		if (last)
1247 			last->next = sg;
1248 		last = sg;
1249 	}
1250 	last->next = first;
1251 	sd->groups = first;
1252 
1253 	return 0;
1254 }
1255 
1256 /*
1257  * Initialize sched groups cpu_capacity.
1258  *
1259  * cpu_capacity indicates the capacity of sched group, which is used while
1260  * distributing the load between different sched groups in a sched domain.
1261  * Typically cpu_capacity for all the groups in a sched domain will be same
1262  * unless there are asymmetries in the topology. If there are asymmetries,
1263  * group having more cpu_capacity will pickup more load compared to the
1264  * group having less cpu_capacity.
1265  */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)1266 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1267 {
1268 	struct sched_group *sg = sd->groups;
1269 	struct cpumask *mask = sched_domains_tmpmask2;
1270 
1271 	WARN_ON(!sg);
1272 
1273 	do {
1274 		int cpu, cores = 0, max_cpu = -1;
1275 
1276 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1277 
1278 		cpumask_copy(mask, sched_group_span(sg));
1279 		for_each_cpu(cpu, mask) {
1280 			cores++;
1281 #ifdef CONFIG_SCHED_SMT
1282 			cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
1283 #endif
1284 		}
1285 		sg->cores = cores;
1286 
1287 		if (!(sd->flags & SD_ASYM_PACKING))
1288 			goto next;
1289 
1290 		for_each_cpu(cpu, sched_group_span(sg)) {
1291 			if (max_cpu < 0)
1292 				max_cpu = cpu;
1293 			else if (sched_asym_prefer(cpu, max_cpu))
1294 				max_cpu = cpu;
1295 		}
1296 		sg->asym_prefer_cpu = max_cpu;
1297 
1298 next:
1299 		sg = sg->next;
1300 	} while (sg != sd->groups);
1301 
1302 	if (cpu != group_balance_cpu(sg))
1303 		return;
1304 
1305 	update_group_capacity(sd, cpu);
1306 }
1307 
1308 /*
1309  * Asymmetric CPU capacity bits
1310  */
1311 struct asym_cap_data {
1312 	struct list_head link;
1313 	unsigned long capacity;
1314 	unsigned long cpus[];
1315 };
1316 
1317 /*
1318  * Set of available CPUs grouped by their corresponding capacities
1319  * Each list entry contains a CPU mask reflecting CPUs that share the same
1320  * capacity.
1321  * The lifespan of data is unlimited.
1322  */
1323 static LIST_HEAD(asym_cap_list);
1324 
1325 #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus)
1326 
1327 /*
1328  * Verify whether there is any CPU capacity asymmetry in a given sched domain.
1329  * Provides sd_flags reflecting the asymmetry scope.
1330  */
1331 static inline int
asym_cpu_capacity_classify(const struct cpumask * sd_span,const struct cpumask * cpu_map)1332 asym_cpu_capacity_classify(const struct cpumask *sd_span,
1333 			   const struct cpumask *cpu_map)
1334 {
1335 	struct asym_cap_data *entry;
1336 	int count = 0, miss = 0;
1337 
1338 	/*
1339 	 * Count how many unique CPU capacities this domain spans across
1340 	 * (compare sched_domain CPUs mask with ones representing  available
1341 	 * CPUs capacities). Take into account CPUs that might be offline:
1342 	 * skip those.
1343 	 */
1344 	list_for_each_entry(entry, &asym_cap_list, link) {
1345 		if (cpumask_intersects(sd_span, cpu_capacity_span(entry)))
1346 			++count;
1347 		else if (cpumask_intersects(cpu_map, cpu_capacity_span(entry)))
1348 			++miss;
1349 	}
1350 
1351 	WARN_ON_ONCE(!count && !list_empty(&asym_cap_list));
1352 
1353 	/* No asymmetry detected */
1354 	if (count < 2)
1355 		return 0;
1356 	/* Some of the available CPU capacity values have not been detected */
1357 	if (miss)
1358 		return SD_ASYM_CPUCAPACITY;
1359 
1360 	/* Full asymmetry */
1361 	return SD_ASYM_CPUCAPACITY | SD_ASYM_CPUCAPACITY_FULL;
1362 
1363 }
1364 
asym_cpu_capacity_update_data(int cpu)1365 static inline void asym_cpu_capacity_update_data(int cpu)
1366 {
1367 	unsigned long capacity = arch_scale_cpu_capacity(cpu);
1368 	struct asym_cap_data *entry = NULL;
1369 
1370 	list_for_each_entry(entry, &asym_cap_list, link) {
1371 		if (capacity == entry->capacity)
1372 			goto done;
1373 	}
1374 
1375 	entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL);
1376 	if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry data\n"))
1377 		return;
1378 	entry->capacity = capacity;
1379 	list_add(&entry->link, &asym_cap_list);
1380 done:
1381 	__cpumask_set_cpu(cpu, cpu_capacity_span(entry));
1382 }
1383 
1384 /*
1385  * Build-up/update list of CPUs grouped by their capacities
1386  * An update requires explicit request to rebuild sched domains
1387  * with state indicating CPU topology changes.
1388  */
asym_cpu_capacity_scan(void)1389 static void asym_cpu_capacity_scan(void)
1390 {
1391 	struct asym_cap_data *entry, *next;
1392 	int cpu;
1393 
1394 	list_for_each_entry(entry, &asym_cap_list, link)
1395 		cpumask_clear(cpu_capacity_span(entry));
1396 
1397 	for_each_cpu_and(cpu, cpu_possible_mask, housekeeping_cpumask(HK_TYPE_DOMAIN))
1398 		asym_cpu_capacity_update_data(cpu);
1399 
1400 	list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
1401 		if (cpumask_empty(cpu_capacity_span(entry))) {
1402 			list_del(&entry->link);
1403 			kfree(entry);
1404 		}
1405 	}
1406 
1407 	/*
1408 	 * Only one capacity value has been detected i.e. this system is symmetric.
1409 	 * No need to keep this data around.
1410 	 */
1411 	if (list_is_singular(&asym_cap_list)) {
1412 		entry = list_first_entry(&asym_cap_list, typeof(*entry), link);
1413 		list_del(&entry->link);
1414 		kfree(entry);
1415 	}
1416 }
1417 
1418 /*
1419  * Initializers for schedule domains
1420  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1421  */
1422 
1423 static int default_relax_domain_level = -1;
1424 int sched_domain_level_max;
1425 
setup_relax_domain_level(char * str)1426 static int __init setup_relax_domain_level(char *str)
1427 {
1428 	if (kstrtoint(str, 0, &default_relax_domain_level))
1429 		pr_warn("Unable to set relax_domain_level\n");
1430 
1431 	return 1;
1432 }
1433 __setup("relax_domain_level=", setup_relax_domain_level);
1434 
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)1435 static void set_domain_attribute(struct sched_domain *sd,
1436 				 struct sched_domain_attr *attr)
1437 {
1438 	int request;
1439 
1440 	if (!attr || attr->relax_domain_level < 0) {
1441 		if (default_relax_domain_level < 0)
1442 			return;
1443 		request = default_relax_domain_level;
1444 	} else
1445 		request = attr->relax_domain_level;
1446 
1447 	if (sd->level >= request) {
1448 		/* Turn off idle balance on this domain: */
1449 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1450 	}
1451 }
1452 
1453 static void __sdt_free(const struct cpumask *cpu_map);
1454 static int __sdt_alloc(const struct cpumask *cpu_map);
1455 
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)1456 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1457 				 const struct cpumask *cpu_map)
1458 {
1459 	switch (what) {
1460 	case sa_rootdomain:
1461 		if (!atomic_read(&d->rd->refcount))
1462 			free_rootdomain(&d->rd->rcu);
1463 		fallthrough;
1464 	case sa_sd:
1465 		free_percpu(d->sd);
1466 		fallthrough;
1467 	case sa_sd_storage:
1468 		__sdt_free(cpu_map);
1469 		fallthrough;
1470 	case sa_none:
1471 		break;
1472 	}
1473 }
1474 
1475 static enum s_alloc
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)1476 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1477 {
1478 	memset(d, 0, sizeof(*d));
1479 
1480 	if (__sdt_alloc(cpu_map))
1481 		return sa_sd_storage;
1482 	d->sd = alloc_percpu(struct sched_domain *);
1483 	if (!d->sd)
1484 		return sa_sd_storage;
1485 	d->rd = alloc_rootdomain();
1486 	if (!d->rd)
1487 		return sa_sd;
1488 
1489 	return sa_rootdomain;
1490 }
1491 
1492 /*
1493  * NULL the sd_data elements we've used to build the sched_domain and
1494  * sched_group structure so that the subsequent __free_domain_allocs()
1495  * will not free the data we're using.
1496  */
claim_allocations(int cpu,struct sched_domain * sd)1497 static void claim_allocations(int cpu, struct sched_domain *sd)
1498 {
1499 	struct sd_data *sdd = sd->private;
1500 
1501 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1502 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1503 
1504 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1505 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1506 
1507 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1508 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1509 
1510 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1511 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1512 }
1513 
1514 #ifdef CONFIG_NUMA
1515 enum numa_topology_type sched_numa_topology_type;
1516 
1517 static int			sched_domains_numa_levels;
1518 static int			sched_domains_curr_level;
1519 
1520 int				sched_max_numa_distance;
1521 static int			*sched_domains_numa_distance;
1522 static struct cpumask		***sched_domains_numa_masks;
1523 #endif
1524 
1525 /*
1526  * SD_flags allowed in topology descriptions.
1527  *
1528  * These flags are purely descriptive of the topology and do not prescribe
1529  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1530  * function:
1531  *
1532  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1533  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1534  *   SD_NUMA                - describes NUMA topologies
1535  *
1536  * Odd one out, which beside describing the topology has a quirk also
1537  * prescribes the desired behaviour that goes along with it:
1538  *
1539  *   SD_ASYM_PACKING        - describes SMT quirks
1540  */
1541 #define TOPOLOGY_SD_FLAGS		\
1542 	(SD_SHARE_CPUCAPACITY	|	\
1543 	 SD_SHARE_PKG_RESOURCES |	\
1544 	 SD_NUMA		|	\
1545 	 SD_ASYM_PACKING)
1546 
1547 static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int cpu)1548 sd_init(struct sched_domain_topology_level *tl,
1549 	const struct cpumask *cpu_map,
1550 	struct sched_domain *child, int cpu)
1551 {
1552 	struct sd_data *sdd = &tl->data;
1553 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1554 	int sd_id, sd_weight, sd_flags = 0;
1555 	struct cpumask *sd_span;
1556 
1557 #ifdef CONFIG_NUMA
1558 	/*
1559 	 * Ugly hack to pass state to sd_numa_mask()...
1560 	 */
1561 	sched_domains_curr_level = tl->numa_level;
1562 #endif
1563 
1564 	sd_weight = cpumask_weight(tl->mask(cpu));
1565 
1566 	if (tl->sd_flags)
1567 		sd_flags = (*tl->sd_flags)();
1568 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1569 			"wrong sd_flags in topology description\n"))
1570 		sd_flags &= TOPOLOGY_SD_FLAGS;
1571 
1572 	*sd = (struct sched_domain){
1573 		.min_interval		= sd_weight,
1574 		.max_interval		= 2*sd_weight,
1575 		.busy_factor		= 16,
1576 		.imbalance_pct		= 117,
1577 
1578 		.cache_nice_tries	= 0,
1579 
1580 		.flags			= 1*SD_BALANCE_NEWIDLE
1581 					| 1*SD_BALANCE_EXEC
1582 					| 1*SD_BALANCE_FORK
1583 					| 0*SD_BALANCE_WAKE
1584 					| 1*SD_WAKE_AFFINE
1585 					| 0*SD_SHARE_CPUCAPACITY
1586 					| 0*SD_SHARE_PKG_RESOURCES
1587 					| 0*SD_SERIALIZE
1588 					| 1*SD_PREFER_SIBLING
1589 					| 0*SD_NUMA
1590 					| sd_flags
1591 					,
1592 
1593 		.last_balance		= jiffies,
1594 		.balance_interval	= sd_weight,
1595 		.max_newidle_lb_cost	= 0,
1596 		.last_decay_max_lb_cost	= jiffies,
1597 		.child			= child,
1598 #ifdef CONFIG_SCHED_DEBUG
1599 		.name			= tl->name,
1600 #endif
1601 	};
1602 
1603 	sd_span = sched_domain_span(sd);
1604 	cpumask_and(sd_span, cpu_map, tl->mask(cpu));
1605 	sd_id = cpumask_first(sd_span);
1606 
1607 	sd->flags |= asym_cpu_capacity_classify(sd_span, cpu_map);
1608 
1609 	WARN_ONCE((sd->flags & (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY)) ==
1610 		  (SD_SHARE_CPUCAPACITY | SD_ASYM_CPUCAPACITY),
1611 		  "CPU capacity asymmetry not supported on SMT\n");
1612 
1613 	/*
1614 	 * Convert topological properties into behaviour.
1615 	 */
1616 	/* Don't attempt to spread across CPUs of different capacities. */
1617 	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1618 		sd->child->flags &= ~SD_PREFER_SIBLING;
1619 
1620 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1621 		sd->imbalance_pct = 110;
1622 
1623 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1624 		sd->imbalance_pct = 117;
1625 		sd->cache_nice_tries = 1;
1626 
1627 #ifdef CONFIG_NUMA
1628 	} else if (sd->flags & SD_NUMA) {
1629 		sd->cache_nice_tries = 2;
1630 
1631 		sd->flags &= ~SD_PREFER_SIBLING;
1632 		sd->flags |= SD_SERIALIZE;
1633 		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1634 			sd->flags &= ~(SD_BALANCE_EXEC |
1635 				       SD_BALANCE_FORK |
1636 				       SD_WAKE_AFFINE);
1637 		}
1638 
1639 #endif
1640 	} else {
1641 		sd->cache_nice_tries = 1;
1642 	}
1643 
1644 	/*
1645 	 * For all levels sharing cache; connect a sched_domain_shared
1646 	 * instance.
1647 	 */
1648 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1649 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1650 		atomic_inc(&sd->shared->ref);
1651 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1652 	}
1653 
1654 	sd->private = sdd;
1655 
1656 	return sd;
1657 }
1658 
1659 /*
1660  * Topology list, bottom-up.
1661  */
1662 static struct sched_domain_topology_level default_topology[] = {
1663 #ifdef CONFIG_SCHED_SMT
1664 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1665 #endif
1666 
1667 #ifdef CONFIG_SCHED_CLUSTER
1668 	{ cpu_clustergroup_mask, cpu_cluster_flags, SD_INIT_NAME(CLS) },
1669 #endif
1670 
1671 #ifdef CONFIG_SCHED_MC
1672 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1673 #endif
1674 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1675 	{ NULL, },
1676 };
1677 
1678 static struct sched_domain_topology_level *sched_domain_topology =
1679 	default_topology;
1680 static struct sched_domain_topology_level *sched_domain_topology_saved;
1681 
1682 #define for_each_sd_topology(tl)			\
1683 	for (tl = sched_domain_topology; tl->mask; tl++)
1684 
set_sched_topology(struct sched_domain_topology_level * tl)1685 void __init set_sched_topology(struct sched_domain_topology_level *tl)
1686 {
1687 	if (WARN_ON_ONCE(sched_smp_initialized))
1688 		return;
1689 
1690 	sched_domain_topology = tl;
1691 	sched_domain_topology_saved = NULL;
1692 }
1693 
1694 #ifdef CONFIG_NUMA
1695 
sd_numa_mask(int cpu)1696 static const struct cpumask *sd_numa_mask(int cpu)
1697 {
1698 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1699 }
1700 
sched_numa_warn(const char * str)1701 static void sched_numa_warn(const char *str)
1702 {
1703 	static int done = false;
1704 	int i,j;
1705 
1706 	if (done)
1707 		return;
1708 
1709 	done = true;
1710 
1711 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1712 
1713 	for (i = 0; i < nr_node_ids; i++) {
1714 		printk(KERN_WARNING "  ");
1715 		for (j = 0; j < nr_node_ids; j++) {
1716 			if (!node_state(i, N_CPU) || !node_state(j, N_CPU))
1717 				printk(KERN_CONT "(%02d) ", node_distance(i,j));
1718 			else
1719 				printk(KERN_CONT " %02d  ", node_distance(i,j));
1720 		}
1721 		printk(KERN_CONT "\n");
1722 	}
1723 	printk(KERN_WARNING "\n");
1724 }
1725 
find_numa_distance(int distance)1726 bool find_numa_distance(int distance)
1727 {
1728 	bool found = false;
1729 	int i, *distances;
1730 
1731 	if (distance == node_distance(0, 0))
1732 		return true;
1733 
1734 	rcu_read_lock();
1735 	distances = rcu_dereference(sched_domains_numa_distance);
1736 	if (!distances)
1737 		goto unlock;
1738 	for (i = 0; i < sched_domains_numa_levels; i++) {
1739 		if (distances[i] == distance) {
1740 			found = true;
1741 			break;
1742 		}
1743 	}
1744 unlock:
1745 	rcu_read_unlock();
1746 
1747 	return found;
1748 }
1749 
1750 #define for_each_cpu_node_but(n, nbut)		\
1751 	for_each_node_state(n, N_CPU)		\
1752 		if (n == nbut)			\
1753 			continue;		\
1754 		else
1755 
1756 /*
1757  * A system can have three types of NUMA topology:
1758  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1759  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1760  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1761  *
1762  * The difference between a glueless mesh topology and a backplane
1763  * topology lies in whether communication between not directly
1764  * connected nodes goes through intermediary nodes (where programs
1765  * could run), or through backplane controllers. This affects
1766  * placement of programs.
1767  *
1768  * The type of topology can be discerned with the following tests:
1769  * - If the maximum distance between any nodes is 1 hop, the system
1770  *   is directly connected.
1771  * - If for two nodes A and B, located N > 1 hops away from each other,
1772  *   there is an intermediary node C, which is < N hops away from both
1773  *   nodes A and B, the system is a glueless mesh.
1774  */
init_numa_topology_type(int offline_node)1775 static void init_numa_topology_type(int offline_node)
1776 {
1777 	int a, b, c, n;
1778 
1779 	n = sched_max_numa_distance;
1780 
1781 	if (sched_domains_numa_levels <= 2) {
1782 		sched_numa_topology_type = NUMA_DIRECT;
1783 		return;
1784 	}
1785 
1786 	for_each_cpu_node_but(a, offline_node) {
1787 		for_each_cpu_node_but(b, offline_node) {
1788 			/* Find two nodes furthest removed from each other. */
1789 			if (node_distance(a, b) < n)
1790 				continue;
1791 
1792 			/* Is there an intermediary node between a and b? */
1793 			for_each_cpu_node_but(c, offline_node) {
1794 				if (node_distance(a, c) < n &&
1795 				    node_distance(b, c) < n) {
1796 					sched_numa_topology_type =
1797 							NUMA_GLUELESS_MESH;
1798 					return;
1799 				}
1800 			}
1801 
1802 			sched_numa_topology_type = NUMA_BACKPLANE;
1803 			return;
1804 		}
1805 	}
1806 
1807 	pr_err("Failed to find a NUMA topology type, defaulting to DIRECT\n");
1808 	sched_numa_topology_type = NUMA_DIRECT;
1809 }
1810 
1811 
1812 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1813 
sched_init_numa(int offline_node)1814 void sched_init_numa(int offline_node)
1815 {
1816 	struct sched_domain_topology_level *tl;
1817 	unsigned long *distance_map;
1818 	int nr_levels = 0;
1819 	int i, j;
1820 	int *distances;
1821 	struct cpumask ***masks;
1822 
1823 	/*
1824 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1825 	 * unique distances in the node_distance() table.
1826 	 */
1827 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1828 	if (!distance_map)
1829 		return;
1830 
1831 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1832 	for_each_cpu_node_but(i, offline_node) {
1833 		for_each_cpu_node_but(j, offline_node) {
1834 			int distance = node_distance(i, j);
1835 
1836 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1837 				sched_numa_warn("Invalid distance value range");
1838 				bitmap_free(distance_map);
1839 				return;
1840 			}
1841 
1842 			bitmap_set(distance_map, distance, 1);
1843 		}
1844 	}
1845 	/*
1846 	 * We can now figure out how many unique distance values there are and
1847 	 * allocate memory accordingly.
1848 	 */
1849 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1850 
1851 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1852 	if (!distances) {
1853 		bitmap_free(distance_map);
1854 		return;
1855 	}
1856 
1857 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1858 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1859 		distances[i] = j;
1860 	}
1861 	rcu_assign_pointer(sched_domains_numa_distance, distances);
1862 
1863 	bitmap_free(distance_map);
1864 
1865 	/*
1866 	 * 'nr_levels' contains the number of unique distances
1867 	 *
1868 	 * The sched_domains_numa_distance[] array includes the actual distance
1869 	 * numbers.
1870 	 */
1871 
1872 	/*
1873 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1874 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1875 	 * the array will contain less then 'nr_levels' members. This could be
1876 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1877 	 * in other functions.
1878 	 *
1879 	 * We reset it to 'nr_levels' at the end of this function.
1880 	 */
1881 	sched_domains_numa_levels = 0;
1882 
1883 	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1884 	if (!masks)
1885 		return;
1886 
1887 	/*
1888 	 * Now for each level, construct a mask per node which contains all
1889 	 * CPUs of nodes that are that many hops away from us.
1890 	 */
1891 	for (i = 0; i < nr_levels; i++) {
1892 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1893 		if (!masks[i])
1894 			return;
1895 
1896 		for_each_cpu_node_but(j, offline_node) {
1897 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1898 			int k;
1899 
1900 			if (!mask)
1901 				return;
1902 
1903 			masks[i][j] = mask;
1904 
1905 			for_each_cpu_node_but(k, offline_node) {
1906 				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1907 					sched_numa_warn("Node-distance not symmetric");
1908 
1909 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1910 					continue;
1911 
1912 				cpumask_or(mask, mask, cpumask_of_node(k));
1913 			}
1914 		}
1915 	}
1916 	rcu_assign_pointer(sched_domains_numa_masks, masks);
1917 
1918 	/* Compute default topology size */
1919 	for (i = 0; sched_domain_topology[i].mask; i++);
1920 
1921 	tl = kzalloc((i + nr_levels + 1) *
1922 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1923 	if (!tl)
1924 		return;
1925 
1926 	/*
1927 	 * Copy the default topology bits..
1928 	 */
1929 	for (i = 0; sched_domain_topology[i].mask; i++)
1930 		tl[i] = sched_domain_topology[i];
1931 
1932 	/*
1933 	 * Add the NUMA identity distance, aka single NODE.
1934 	 */
1935 	tl[i++] = (struct sched_domain_topology_level){
1936 		.mask = sd_numa_mask,
1937 		.numa_level = 0,
1938 		SD_INIT_NAME(NODE)
1939 	};
1940 
1941 	/*
1942 	 * .. and append 'j' levels of NUMA goodness.
1943 	 */
1944 	for (j = 1; j < nr_levels; i++, j++) {
1945 		tl[i] = (struct sched_domain_topology_level){
1946 			.mask = sd_numa_mask,
1947 			.sd_flags = cpu_numa_flags,
1948 			.flags = SDTL_OVERLAP,
1949 			.numa_level = j,
1950 			SD_INIT_NAME(NUMA)
1951 		};
1952 	}
1953 
1954 	sched_domain_topology_saved = sched_domain_topology;
1955 	sched_domain_topology = tl;
1956 
1957 	sched_domains_numa_levels = nr_levels;
1958 	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
1959 
1960 	init_numa_topology_type(offline_node);
1961 }
1962 
1963 
sched_reset_numa(void)1964 static void sched_reset_numa(void)
1965 {
1966 	int nr_levels, *distances;
1967 	struct cpumask ***masks;
1968 
1969 	nr_levels = sched_domains_numa_levels;
1970 	sched_domains_numa_levels = 0;
1971 	sched_max_numa_distance = 0;
1972 	sched_numa_topology_type = NUMA_DIRECT;
1973 	distances = sched_domains_numa_distance;
1974 	rcu_assign_pointer(sched_domains_numa_distance, NULL);
1975 	masks = sched_domains_numa_masks;
1976 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
1977 	if (distances || masks) {
1978 		int i, j;
1979 
1980 		synchronize_rcu();
1981 		kfree(distances);
1982 		for (i = 0; i < nr_levels && masks; i++) {
1983 			if (!masks[i])
1984 				continue;
1985 			for_each_node(j)
1986 				kfree(masks[i][j]);
1987 			kfree(masks[i]);
1988 		}
1989 		kfree(masks);
1990 	}
1991 	if (sched_domain_topology_saved) {
1992 		kfree(sched_domain_topology);
1993 		sched_domain_topology = sched_domain_topology_saved;
1994 		sched_domain_topology_saved = NULL;
1995 	}
1996 }
1997 
1998 /*
1999  * Call with hotplug lock held
2000  */
sched_update_numa(int cpu,bool online)2001 void sched_update_numa(int cpu, bool online)
2002 {
2003 	int node;
2004 
2005 	node = cpu_to_node(cpu);
2006 	/*
2007 	 * Scheduler NUMA topology is updated when the first CPU of a
2008 	 * node is onlined or the last CPU of a node is offlined.
2009 	 */
2010 	if (cpumask_weight(cpumask_of_node(node)) != 1)
2011 		return;
2012 
2013 	sched_reset_numa();
2014 	sched_init_numa(online ? NUMA_NO_NODE : node);
2015 }
2016 
sched_domains_numa_masks_set(unsigned int cpu)2017 void sched_domains_numa_masks_set(unsigned int cpu)
2018 {
2019 	int node = cpu_to_node(cpu);
2020 	int i, j;
2021 
2022 	for (i = 0; i < sched_domains_numa_levels; i++) {
2023 		for (j = 0; j < nr_node_ids; j++) {
2024 			if (!node_state(j, N_CPU))
2025 				continue;
2026 
2027 			/* Set ourselves in the remote node's masks */
2028 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
2029 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
2030 		}
2031 	}
2032 }
2033 
sched_domains_numa_masks_clear(unsigned int cpu)2034 void sched_domains_numa_masks_clear(unsigned int cpu)
2035 {
2036 	int i, j;
2037 
2038 	for (i = 0; i < sched_domains_numa_levels; i++) {
2039 		for (j = 0; j < nr_node_ids; j++) {
2040 			if (sched_domains_numa_masks[i][j])
2041 				cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
2042 		}
2043 	}
2044 }
2045 
2046 /*
2047  * sched_numa_find_closest() - given the NUMA topology, find the cpu
2048  *                             closest to @cpu from @cpumask.
2049  * cpumask: cpumask to find a cpu from
2050  * cpu: cpu to be close to
2051  *
2052  * returns: cpu, or nr_cpu_ids when nothing found.
2053  */
sched_numa_find_closest(const struct cpumask * cpus,int cpu)2054 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
2055 {
2056 	int i, j = cpu_to_node(cpu), found = nr_cpu_ids;
2057 	struct cpumask ***masks;
2058 
2059 	rcu_read_lock();
2060 	masks = rcu_dereference(sched_domains_numa_masks);
2061 	if (!masks)
2062 		goto unlock;
2063 	for (i = 0; i < sched_domains_numa_levels; i++) {
2064 		if (!masks[i][j])
2065 			break;
2066 		cpu = cpumask_any_and(cpus, masks[i][j]);
2067 		if (cpu < nr_cpu_ids) {
2068 			found = cpu;
2069 			break;
2070 		}
2071 	}
2072 unlock:
2073 	rcu_read_unlock();
2074 
2075 	return found;
2076 }
2077 
2078 struct __cmp_key {
2079 	const struct cpumask *cpus;
2080 	struct cpumask ***masks;
2081 	int node;
2082 	int cpu;
2083 	int w;
2084 };
2085 
hop_cmp(const void * a,const void * b)2086 static int hop_cmp(const void *a, const void *b)
2087 {
2088 	struct cpumask **prev_hop, **cur_hop = *(struct cpumask ***)b;
2089 	struct __cmp_key *k = (struct __cmp_key *)a;
2090 
2091 	if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu)
2092 		return 1;
2093 
2094 	if (b == k->masks) {
2095 		k->w = 0;
2096 		return 0;
2097 	}
2098 
2099 	prev_hop = *((struct cpumask ***)b - 1);
2100 	k->w = cpumask_weight_and(k->cpus, prev_hop[k->node]);
2101 	if (k->w <= k->cpu)
2102 		return 0;
2103 
2104 	return -1;
2105 }
2106 
2107 /*
2108  * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu
2109  *                             closest to @cpu from @cpumask.
2110  * cpumask: cpumask to find a cpu from
2111  * cpu: Nth cpu to find
2112  *
2113  * returns: cpu, or nr_cpu_ids when nothing found.
2114  */
sched_numa_find_nth_cpu(const struct cpumask * cpus,int cpu,int node)2115 int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
2116 {
2117 	struct __cmp_key k = { .cpus = cpus, .cpu = cpu };
2118 	struct cpumask ***hop_masks;
2119 	int hop, ret = nr_cpu_ids;
2120 
2121 	if (node == NUMA_NO_NODE)
2122 		return cpumask_nth_and(cpu, cpus, cpu_online_mask);
2123 
2124 	rcu_read_lock();
2125 
2126 	/* CPU-less node entries are uninitialized in sched_domains_numa_masks */
2127 	node = numa_nearest_node(node, N_CPU);
2128 	k.node = node;
2129 
2130 	k.masks = rcu_dereference(sched_domains_numa_masks);
2131 	if (!k.masks)
2132 		goto unlock;
2133 
2134 	hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp);
2135 	hop = hop_masks	- k.masks;
2136 
2137 	ret = hop ?
2138 		cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) :
2139 		cpumask_nth_and(cpu, cpus, k.masks[0][node]);
2140 unlock:
2141 	rcu_read_unlock();
2142 	return ret;
2143 }
2144 EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu);
2145 
2146 /**
2147  * sched_numa_hop_mask() - Get the cpumask of CPUs at most @hops hops away from
2148  *                         @node
2149  * @node: The node to count hops from.
2150  * @hops: Include CPUs up to that many hops away. 0 means local node.
2151  *
2152  * Return: On success, a pointer to a cpumask of CPUs at most @hops away from
2153  * @node, an error value otherwise.
2154  *
2155  * Requires rcu_lock to be held. Returned cpumask is only valid within that
2156  * read-side section, copy it if required beyond that.
2157  *
2158  * Note that not all hops are equal in distance; see sched_init_numa() for how
2159  * distances and masks are handled.
2160  * Also note that this is a reflection of sched_domains_numa_masks, which may change
2161  * during the lifetime of the system (offline nodes are taken out of the masks).
2162  */
sched_numa_hop_mask(unsigned int node,unsigned int hops)2163 const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops)
2164 {
2165 	struct cpumask ***masks;
2166 
2167 	if (node >= nr_node_ids || hops >= sched_domains_numa_levels)
2168 		return ERR_PTR(-EINVAL);
2169 
2170 	masks = rcu_dereference(sched_domains_numa_masks);
2171 	if (!masks)
2172 		return ERR_PTR(-EBUSY);
2173 
2174 	return masks[hops][node];
2175 }
2176 EXPORT_SYMBOL_GPL(sched_numa_hop_mask);
2177 
2178 #endif /* CONFIG_NUMA */
2179 
__sdt_alloc(const struct cpumask * cpu_map)2180 static int __sdt_alloc(const struct cpumask *cpu_map)
2181 {
2182 	struct sched_domain_topology_level *tl;
2183 	int j;
2184 
2185 	for_each_sd_topology(tl) {
2186 		struct sd_data *sdd = &tl->data;
2187 
2188 		sdd->sd = alloc_percpu(struct sched_domain *);
2189 		if (!sdd->sd)
2190 			return -ENOMEM;
2191 
2192 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
2193 		if (!sdd->sds)
2194 			return -ENOMEM;
2195 
2196 		sdd->sg = alloc_percpu(struct sched_group *);
2197 		if (!sdd->sg)
2198 			return -ENOMEM;
2199 
2200 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
2201 		if (!sdd->sgc)
2202 			return -ENOMEM;
2203 
2204 		for_each_cpu(j, cpu_map) {
2205 			struct sched_domain *sd;
2206 			struct sched_domain_shared *sds;
2207 			struct sched_group *sg;
2208 			struct sched_group_capacity *sgc;
2209 
2210 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
2211 					GFP_KERNEL, cpu_to_node(j));
2212 			if (!sd)
2213 				return -ENOMEM;
2214 
2215 			*per_cpu_ptr(sdd->sd, j) = sd;
2216 
2217 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
2218 					GFP_KERNEL, cpu_to_node(j));
2219 			if (!sds)
2220 				return -ENOMEM;
2221 
2222 			*per_cpu_ptr(sdd->sds, j) = sds;
2223 
2224 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
2225 					GFP_KERNEL, cpu_to_node(j));
2226 			if (!sg)
2227 				return -ENOMEM;
2228 
2229 			sg->next = sg;
2230 
2231 			*per_cpu_ptr(sdd->sg, j) = sg;
2232 
2233 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
2234 					GFP_KERNEL, cpu_to_node(j));
2235 			if (!sgc)
2236 				return -ENOMEM;
2237 
2238 #ifdef CONFIG_SCHED_DEBUG
2239 			sgc->id = j;
2240 #endif
2241 
2242 			*per_cpu_ptr(sdd->sgc, j) = sgc;
2243 		}
2244 	}
2245 
2246 	return 0;
2247 }
2248 
__sdt_free(const struct cpumask * cpu_map)2249 static void __sdt_free(const struct cpumask *cpu_map)
2250 {
2251 	struct sched_domain_topology_level *tl;
2252 	int j;
2253 
2254 	for_each_sd_topology(tl) {
2255 		struct sd_data *sdd = &tl->data;
2256 
2257 		for_each_cpu(j, cpu_map) {
2258 			struct sched_domain *sd;
2259 
2260 			if (sdd->sd) {
2261 				sd = *per_cpu_ptr(sdd->sd, j);
2262 				if (sd && (sd->flags & SD_OVERLAP))
2263 					free_sched_groups(sd->groups, 0);
2264 				kfree(*per_cpu_ptr(sdd->sd, j));
2265 			}
2266 
2267 			if (sdd->sds)
2268 				kfree(*per_cpu_ptr(sdd->sds, j));
2269 			if (sdd->sg)
2270 				kfree(*per_cpu_ptr(sdd->sg, j));
2271 			if (sdd->sgc)
2272 				kfree(*per_cpu_ptr(sdd->sgc, j));
2273 		}
2274 		free_percpu(sdd->sd);
2275 		sdd->sd = NULL;
2276 		free_percpu(sdd->sds);
2277 		sdd->sds = NULL;
2278 		free_percpu(sdd->sg);
2279 		sdd->sg = NULL;
2280 		free_percpu(sdd->sgc);
2281 		sdd->sgc = NULL;
2282 	}
2283 }
2284 
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int cpu)2285 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
2286 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
2287 		struct sched_domain *child, int cpu)
2288 {
2289 	struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
2290 
2291 	if (child) {
2292 		sd->level = child->level + 1;
2293 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
2294 		child->parent = sd;
2295 
2296 		if (!cpumask_subset(sched_domain_span(child),
2297 				    sched_domain_span(sd))) {
2298 			pr_err("BUG: arch topology borken\n");
2299 #ifdef CONFIG_SCHED_DEBUG
2300 			pr_err("     the %s domain not a subset of the %s domain\n",
2301 					child->name, sd->name);
2302 #endif
2303 			/* Fixup, ensure @sd has at least @child CPUs. */
2304 			cpumask_or(sched_domain_span(sd),
2305 				   sched_domain_span(sd),
2306 				   sched_domain_span(child));
2307 		}
2308 
2309 	}
2310 	set_domain_attribute(sd, attr);
2311 
2312 	return sd;
2313 }
2314 
2315 /*
2316  * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
2317  * any two given CPUs at this (non-NUMA) topology level.
2318  */
topology_span_sane(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,int cpu)2319 static bool topology_span_sane(struct sched_domain_topology_level *tl,
2320 			      const struct cpumask *cpu_map, int cpu)
2321 {
2322 	int i;
2323 
2324 	/* NUMA levels are allowed to overlap */
2325 	if (tl->flags & SDTL_OVERLAP)
2326 		return true;
2327 
2328 	/*
2329 	 * Non-NUMA levels cannot partially overlap - they must be either
2330 	 * completely equal or completely disjoint. Otherwise we can end up
2331 	 * breaking the sched_group lists - i.e. a later get_group() pass
2332 	 * breaks the linking done for an earlier span.
2333 	 */
2334 	for_each_cpu(i, cpu_map) {
2335 		if (i == cpu)
2336 			continue;
2337 		/*
2338 		 * We should 'and' all those masks with 'cpu_map' to exactly
2339 		 * match the topology we're about to build, but that can only
2340 		 * remove CPUs, which only lessens our ability to detect
2341 		 * overlaps
2342 		 */
2343 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
2344 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
2345 			return false;
2346 	}
2347 
2348 	return true;
2349 }
2350 
2351 /*
2352  * Build sched domains for a given set of CPUs and attach the sched domains
2353  * to the individual CPUs
2354  */
2355 static int
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)2356 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
2357 {
2358 	enum s_alloc alloc_state = sa_none;
2359 	struct sched_domain *sd;
2360 	struct s_data d;
2361 	struct rq *rq = NULL;
2362 	int i, ret = -ENOMEM;
2363 	bool has_asym = false;
2364 
2365 	if (WARN_ON(cpumask_empty(cpu_map)))
2366 		goto error;
2367 
2368 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
2369 	if (alloc_state != sa_rootdomain)
2370 		goto error;
2371 
2372 	/* Set up domains for CPUs specified by the cpu_map: */
2373 	for_each_cpu(i, cpu_map) {
2374 		struct sched_domain_topology_level *tl;
2375 
2376 		sd = NULL;
2377 		for_each_sd_topology(tl) {
2378 
2379 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2380 				goto error;
2381 
2382 			sd = build_sched_domain(tl, cpu_map, attr, sd, i);
2383 
2384 			has_asym |= sd->flags & SD_ASYM_CPUCAPACITY;
2385 
2386 			if (tl == sched_domain_topology)
2387 				*per_cpu_ptr(d.sd, i) = sd;
2388 			if (tl->flags & SDTL_OVERLAP)
2389 				sd->flags |= SD_OVERLAP;
2390 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2391 				break;
2392 		}
2393 	}
2394 
2395 	/* Build the groups for the domains */
2396 	for_each_cpu(i, cpu_map) {
2397 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2398 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2399 			if (sd->flags & SD_OVERLAP) {
2400 				if (build_overlap_sched_groups(sd, i))
2401 					goto error;
2402 			} else {
2403 				if (build_sched_groups(sd, i))
2404 					goto error;
2405 			}
2406 		}
2407 	}
2408 
2409 	/*
2410 	 * Calculate an allowed NUMA imbalance such that LLCs do not get
2411 	 * imbalanced.
2412 	 */
2413 	for_each_cpu(i, cpu_map) {
2414 		unsigned int imb = 0;
2415 		unsigned int imb_span = 1;
2416 
2417 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2418 			struct sched_domain *child = sd->child;
2419 
2420 			if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
2421 			    (child->flags & SD_SHARE_PKG_RESOURCES)) {
2422 				struct sched_domain __rcu *top_p;
2423 				unsigned int nr_llcs;
2424 
2425 				/*
2426 				 * For a single LLC per node, allow an
2427 				 * imbalance up to 12.5% of the node. This is
2428 				 * arbitrary cutoff based two factors -- SMT and
2429 				 * memory channels. For SMT-2, the intent is to
2430 				 * avoid premature sharing of HT resources but
2431 				 * SMT-4 or SMT-8 *may* benefit from a different
2432 				 * cutoff. For memory channels, this is a very
2433 				 * rough estimate of how many channels may be
2434 				 * active and is based on recent CPUs with
2435 				 * many cores.
2436 				 *
2437 				 * For multiple LLCs, allow an imbalance
2438 				 * until multiple tasks would share an LLC
2439 				 * on one node while LLCs on another node
2440 				 * remain idle. This assumes that there are
2441 				 * enough logical CPUs per LLC to avoid SMT
2442 				 * factors and that there is a correlation
2443 				 * between LLCs and memory channels.
2444 				 */
2445 				nr_llcs = sd->span_weight / child->span_weight;
2446 				if (nr_llcs == 1)
2447 					imb = sd->span_weight >> 3;
2448 				else
2449 					imb = nr_llcs;
2450 				imb = max(1U, imb);
2451 				sd->imb_numa_nr = imb;
2452 
2453 				/* Set span based on the first NUMA domain. */
2454 				top_p = sd->parent;
2455 				while (top_p && !(top_p->flags & SD_NUMA)) {
2456 					top_p = top_p->parent;
2457 				}
2458 				imb_span = top_p ? top_p->span_weight : sd->span_weight;
2459 			} else {
2460 				int factor = max(1U, (sd->span_weight / imb_span));
2461 
2462 				sd->imb_numa_nr = imb * factor;
2463 			}
2464 		}
2465 	}
2466 
2467 	/* Calculate CPU capacity for physical packages and nodes */
2468 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2469 		if (!cpumask_test_cpu(i, cpu_map))
2470 			continue;
2471 
2472 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2473 			claim_allocations(i, sd);
2474 			init_sched_groups_capacity(i, sd);
2475 		}
2476 	}
2477 
2478 	/* Attach the domains */
2479 	rcu_read_lock();
2480 	for_each_cpu(i, cpu_map) {
2481 		rq = cpu_rq(i);
2482 		sd = *per_cpu_ptr(d.sd, i);
2483 
2484 		/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2485 		if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2486 			WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2487 
2488 		cpu_attach_domain(sd, d.rd, i);
2489 	}
2490 	rcu_read_unlock();
2491 
2492 	if (has_asym)
2493 		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2494 
2495 	if (rq && sched_debug_verbose) {
2496 		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2497 			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2498 	}
2499 	trace_android_vh_build_sched_domains(has_asym);
2500 
2501 	ret = 0;
2502 error:
2503 	__free_domain_allocs(&d, alloc_state, cpu_map);
2504 
2505 	return ret;
2506 }
2507 
2508 /* Current sched domains: */
2509 static cpumask_var_t			*doms_cur;
2510 
2511 /* Number of sched domains in 'doms_cur': */
2512 static int				ndoms_cur;
2513 
2514 /* Attributes of custom domains in 'doms_cur' */
2515 static struct sched_domain_attr		*dattr_cur;
2516 
2517 /*
2518  * Special case: If a kmalloc() of a doms_cur partition (array of
2519  * cpumask) fails, then fallback to a single sched domain,
2520  * as determined by the single cpumask fallback_doms.
2521  */
2522 static cpumask_var_t			fallback_doms;
2523 
2524 /*
2525  * arch_update_cpu_topology lets virtualized architectures update the
2526  * CPU core maps. It is supposed to return 1 if the topology changed
2527  * or 0 if it stayed the same.
2528  */
arch_update_cpu_topology(void)2529 int __weak arch_update_cpu_topology(void)
2530 {
2531 	return 0;
2532 }
2533 
alloc_sched_domains(unsigned int ndoms)2534 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2535 {
2536 	int i;
2537 	cpumask_var_t *doms;
2538 
2539 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2540 	if (!doms)
2541 		return NULL;
2542 	for (i = 0; i < ndoms; i++) {
2543 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2544 			free_sched_domains(doms, i);
2545 			return NULL;
2546 		}
2547 	}
2548 	return doms;
2549 }
2550 
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)2551 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2552 {
2553 	unsigned int i;
2554 	for (i = 0; i < ndoms; i++)
2555 		free_cpumask_var(doms[i]);
2556 	kfree(doms);
2557 }
2558 
2559 /*
2560  * Set up scheduler domains and groups.  For now this just excludes isolated
2561  * CPUs, but could be used to exclude other special cases in the future.
2562  */
sched_init_domains(const struct cpumask * cpu_map)2563 int __init sched_init_domains(const struct cpumask *cpu_map)
2564 {
2565 	int err;
2566 
2567 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2568 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2569 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2570 
2571 	arch_update_cpu_topology();
2572 	asym_cpu_capacity_scan();
2573 	ndoms_cur = 1;
2574 	doms_cur = alloc_sched_domains(ndoms_cur);
2575 	if (!doms_cur)
2576 		doms_cur = &fallback_doms;
2577 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_TYPE_DOMAIN));
2578 	err = build_sched_domains(doms_cur[0], NULL);
2579 
2580 	return err;
2581 }
2582 
2583 /*
2584  * Detach sched domains from a group of CPUs specified in cpu_map
2585  * These CPUs will now be attached to the NULL domain
2586  */
detach_destroy_domains(const struct cpumask * cpu_map)2587 static void detach_destroy_domains(const struct cpumask *cpu_map)
2588 {
2589 	unsigned int cpu = cpumask_any(cpu_map);
2590 	int i;
2591 
2592 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2593 		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2594 
2595 	rcu_read_lock();
2596 	for_each_cpu(i, cpu_map)
2597 		cpu_attach_domain(NULL, &def_root_domain, i);
2598 	rcu_read_unlock();
2599 }
2600 
2601 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)2602 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2603 			struct sched_domain_attr *new, int idx_new)
2604 {
2605 	struct sched_domain_attr tmp;
2606 
2607 	/* Fast path: */
2608 	if (!new && !cur)
2609 		return 1;
2610 
2611 	tmp = SD_ATTR_INIT;
2612 
2613 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2614 			new ? (new + idx_new) : &tmp,
2615 			sizeof(struct sched_domain_attr));
2616 }
2617 
2618 /*
2619  * Partition sched domains as specified by the 'ndoms_new'
2620  * cpumasks in the array doms_new[] of cpumasks. This compares
2621  * doms_new[] to the current sched domain partitioning, doms_cur[].
2622  * It destroys each deleted domain and builds each new domain.
2623  *
2624  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2625  * The masks don't intersect (don't overlap.) We should setup one
2626  * sched domain for each mask. CPUs not in any of the cpumasks will
2627  * not be load balanced. If the same cpumask appears both in the
2628  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2629  * it as it is.
2630  *
2631  * The passed in 'doms_new' should be allocated using
2632  * alloc_sched_domains.  This routine takes ownership of it and will
2633  * free_sched_domains it when done with it. If the caller failed the
2634  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2635  * and partition_sched_domains() will fallback to the single partition
2636  * 'fallback_doms', it also forces the domains to be rebuilt.
2637  *
2638  * If doms_new == NULL it will be replaced with cpu_online_mask.
2639  * ndoms_new == 0 is a special case for destroying existing domains,
2640  * and it will not create the default domain.
2641  *
2642  * Call with hotplug lock and sched_domains_mutex held
2643  */
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2644 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2645 				    struct sched_domain_attr *dattr_new)
2646 {
2647 	bool __maybe_unused has_eas = false;
2648 	int i, j, n;
2649 	int new_topology;
2650 
2651 	lockdep_assert_held(&sched_domains_mutex);
2652 
2653 	/* Let the architecture update CPU core mappings: */
2654 	new_topology = arch_update_cpu_topology();
2655 	/* Trigger rebuilding CPU capacity asymmetry data */
2656 	if (new_topology)
2657 		asym_cpu_capacity_scan();
2658 
2659 	if (!doms_new) {
2660 		WARN_ON_ONCE(dattr_new);
2661 		n = 0;
2662 		doms_new = alloc_sched_domains(1);
2663 		if (doms_new) {
2664 			n = 1;
2665 			cpumask_and(doms_new[0], cpu_active_mask,
2666 				    housekeeping_cpumask(HK_TYPE_DOMAIN));
2667 		}
2668 	} else {
2669 		n = ndoms_new;
2670 	}
2671 
2672 	/* Destroy deleted domains: */
2673 	for (i = 0; i < ndoms_cur; i++) {
2674 		for (j = 0; j < n && !new_topology; j++) {
2675 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2676 			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2677 				struct root_domain *rd;
2678 
2679 				/*
2680 				 * This domain won't be destroyed and as such
2681 				 * its dl_bw->total_bw needs to be cleared.  It
2682 				 * will be recomputed in function
2683 				 * update_tasks_root_domain().
2684 				 */
2685 				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2686 				dl_clear_root_domain(rd);
2687 				goto match1;
2688 			}
2689 		}
2690 		/* No match - a current sched domain not in new doms_new[] */
2691 		detach_destroy_domains(doms_cur[i]);
2692 match1:
2693 		;
2694 	}
2695 
2696 	n = ndoms_cur;
2697 	if (!doms_new) {
2698 		n = 0;
2699 		doms_new = &fallback_doms;
2700 		cpumask_and(doms_new[0], cpu_active_mask,
2701 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
2702 	}
2703 
2704 	/* Build new domains: */
2705 	for (i = 0; i < ndoms_new; i++) {
2706 		for (j = 0; j < n && !new_topology; j++) {
2707 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2708 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2709 				goto match2;
2710 		}
2711 		/* No match - add a new doms_new */
2712 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2713 match2:
2714 		;
2715 	}
2716 
2717 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2718 	/* Build perf. domains: */
2719 	for (i = 0; i < ndoms_new; i++) {
2720 		for (j = 0; j < n && !sched_energy_update; j++) {
2721 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2722 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2723 				has_eas = true;
2724 				goto match3;
2725 			}
2726 		}
2727 		/* No match - add perf. domains for a new rd */
2728 		has_eas |= build_perf_domains(doms_new[i]);
2729 match3:
2730 		;
2731 	}
2732 	sched_energy_set(has_eas);
2733 #endif
2734 
2735 	/* Remember the new sched domains: */
2736 	if (doms_cur != &fallback_doms)
2737 		free_sched_domains(doms_cur, ndoms_cur);
2738 
2739 	kfree(dattr_cur);
2740 	doms_cur = doms_new;
2741 	dattr_cur = dattr_new;
2742 	ndoms_cur = ndoms_new;
2743 
2744 	update_sched_domain_debugfs();
2745 }
2746 
2747 /*
2748  * Call with hotplug lock held
2749  */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2750 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2751 			     struct sched_domain_attr *dattr_new)
2752 {
2753 	mutex_lock(&sched_domains_mutex);
2754 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2755 	mutex_unlock(&sched_domains_mutex);
2756 }
2757