• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Scheduler topology setup/handling methods
4  */
5 #include "sched.h"
6 
7 DEFINE_MUTEX(sched_domains_mutex);
8 
9 /* Protected by sched_domains_mutex: */
10 static cpumask_var_t sched_domains_tmpmask;
11 static cpumask_var_t sched_domains_tmpmask2;
12 
13 #ifdef CONFIG_SCHED_DEBUG
14 
sched_debug_setup(char * str)15 static int __init sched_debug_setup(char *str)
16 {
17 	sched_debug_enabled = true;
18 
19 	return 0;
20 }
21 early_param("sched_debug", sched_debug_setup);
22 
sched_debug(void)23 static inline bool sched_debug(void)
24 {
25 	return sched_debug_enabled;
26 }
27 
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)28 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
29 				  struct cpumask *groupmask)
30 {
31 	struct sched_group *group = sd->groups;
32 
33 	cpumask_clear(groupmask);
34 
35 	printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
36 
37 	if (!(sd->flags & SD_LOAD_BALANCE)) {
38 		printk("does not load-balance\n");
39 		if (sd->parent)
40 			printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
41 		return -1;
42 	}
43 
44 	printk(KERN_CONT "span=%*pbl level=%s\n",
45 	       cpumask_pr_args(sched_domain_span(sd)), sd->name);
46 
47 	if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
48 		printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
49 	}
50 	if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
51 		printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
52 	}
53 
54 	printk(KERN_DEBUG "%*s groups:", level + 1, "");
55 	do {
56 		if (!group) {
57 			printk("\n");
58 			printk(KERN_ERR "ERROR: group is NULL\n");
59 			break;
60 		}
61 
62 		if (!cpumask_weight(sched_group_span(group))) {
63 			printk(KERN_CONT "\n");
64 			printk(KERN_ERR "ERROR: empty group\n");
65 			break;
66 		}
67 
68 		if (!(sd->flags & SD_OVERLAP) &&
69 		    cpumask_intersects(groupmask, sched_group_span(group))) {
70 			printk(KERN_CONT "\n");
71 			printk(KERN_ERR "ERROR: repeated CPUs\n");
72 			break;
73 		}
74 
75 		cpumask_or(groupmask, groupmask, sched_group_span(group));
76 
77 		printk(KERN_CONT " %d:{ span=%*pbl",
78 				group->sgc->id,
79 				cpumask_pr_args(sched_group_span(group)));
80 
81 		if ((sd->flags & SD_OVERLAP) &&
82 		    !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
83 			printk(KERN_CONT " mask=%*pbl",
84 				cpumask_pr_args(group_balance_mask(group)));
85 		}
86 
87 		if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
88 			printk(KERN_CONT " cap=%lu", group->sgc->capacity);
89 
90 		if (group == sd->groups && sd->child &&
91 		    !cpumask_equal(sched_domain_span(sd->child),
92 				   sched_group_span(group))) {
93 			printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
94 		}
95 
96 		printk(KERN_CONT " }");
97 
98 		group = group->next;
99 
100 		if (group != sd->groups)
101 			printk(KERN_CONT ",");
102 
103 	} while (group != sd->groups);
104 	printk(KERN_CONT "\n");
105 
106 	if (!cpumask_equal(sched_domain_span(sd), groupmask))
107 		printk(KERN_ERR "ERROR: groups don't span domain->span\n");
108 
109 	if (sd->parent &&
110 	    !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
111 		printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
112 	return 0;
113 }
114 
sched_domain_debug(struct sched_domain * sd,int cpu)115 static void sched_domain_debug(struct sched_domain *sd, int cpu)
116 {
117 	int level = 0;
118 
119 	if (!sched_debug_enabled)
120 		return;
121 
122 	if (!sd) {
123 		printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
124 		return;
125 	}
126 
127 	printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
128 
129 	for (;;) {
130 		if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
131 			break;
132 		level++;
133 		sd = sd->parent;
134 		if (!sd)
135 			break;
136 	}
137 }
138 #else /* !CONFIG_SCHED_DEBUG */
139 
140 # define sched_debug_enabled 0
141 # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)142 static inline bool sched_debug(void)
143 {
144 	return false;
145 }
146 #endif /* CONFIG_SCHED_DEBUG */
147 
sd_degenerate(struct sched_domain * sd)148 static int sd_degenerate(struct sched_domain *sd)
149 {
150 	if (cpumask_weight(sched_domain_span(sd)) == 1)
151 		return 1;
152 
153 	/* Following flags need at least 2 groups */
154 	if (sd->flags & (SD_LOAD_BALANCE |
155 			 SD_BALANCE_NEWIDLE |
156 			 SD_BALANCE_FORK |
157 			 SD_BALANCE_EXEC |
158 			 SD_SHARE_CPUCAPACITY |
159 			 SD_ASYM_CPUCAPACITY |
160 			 SD_SHARE_PKG_RESOURCES |
161 			 SD_SHARE_POWERDOMAIN)) {
162 		if (sd->groups != sd->groups->next)
163 			return 0;
164 	}
165 
166 	/* Following flags don't use groups */
167 	if (sd->flags & (SD_WAKE_AFFINE))
168 		return 0;
169 
170 	return 1;
171 }
172 
173 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)174 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
175 {
176 	unsigned long cflags = sd->flags, pflags = parent->flags;
177 
178 	if (sd_degenerate(parent))
179 		return 1;
180 
181 	if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
182 		return 0;
183 
184 	/* Flags needing groups don't count if only 1 group in parent */
185 	if (parent->groups == parent->groups->next) {
186 		pflags &= ~(SD_LOAD_BALANCE |
187 				SD_BALANCE_NEWIDLE |
188 				SD_BALANCE_FORK |
189 				SD_BALANCE_EXEC |
190 				SD_ASYM_CPUCAPACITY |
191 				SD_SHARE_CPUCAPACITY |
192 				SD_SHARE_PKG_RESOURCES |
193 				SD_PREFER_SIBLING |
194 				SD_SHARE_POWERDOMAIN);
195 		if (nr_node_ids == 1)
196 			pflags &= ~SD_SERIALIZE;
197 	}
198 	if (~cflags & pflags)
199 		return 0;
200 
201 	return 1;
202 }
203 
204 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
205 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
206 unsigned int sysctl_sched_energy_aware = 1;
207 DEFINE_MUTEX(sched_energy_mutex);
208 bool sched_energy_update;
209 
210 #ifdef CONFIG_PROC_SYSCTL
sched_energy_aware_handler(struct ctl_table * table,int write,void __user * buffer,size_t * lenp,loff_t * ppos)211 int sched_energy_aware_handler(struct ctl_table *table, int write,
212 			 void __user *buffer, size_t *lenp, loff_t *ppos)
213 {
214 	int ret, state;
215 
216 	if (write && !capable(CAP_SYS_ADMIN))
217 		return -EPERM;
218 
219 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
220 	if (!ret && write) {
221 		state = static_branch_unlikely(&sched_energy_present);
222 		if (state != sysctl_sched_energy_aware) {
223 			mutex_lock(&sched_energy_mutex);
224 			sched_energy_update = 1;
225 			rebuild_sched_domains();
226 			sched_energy_update = 0;
227 			mutex_unlock(&sched_energy_mutex);
228 		}
229 	}
230 
231 	return ret;
232 }
233 #endif
234 
free_pd(struct perf_domain * pd)235 static void free_pd(struct perf_domain *pd)
236 {
237 	struct perf_domain *tmp;
238 
239 	while (pd) {
240 		tmp = pd->next;
241 		kfree(pd);
242 		pd = tmp;
243 	}
244 }
245 
find_pd(struct perf_domain * pd,int cpu)246 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
247 {
248 	while (pd) {
249 		if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
250 			return pd;
251 		pd = pd->next;
252 	}
253 
254 	return NULL;
255 }
256 
pd_init(int cpu)257 static struct perf_domain *pd_init(int cpu)
258 {
259 	struct em_perf_domain *obj = em_cpu_get(cpu);
260 	struct perf_domain *pd;
261 
262 	if (!obj) {
263 		if (sched_debug())
264 			pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
265 		return NULL;
266 	}
267 
268 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
269 	if (!pd)
270 		return NULL;
271 	pd->em_pd = obj;
272 
273 	return pd;
274 }
275 
perf_domain_debug(const struct cpumask * cpu_map,struct perf_domain * pd)276 static void perf_domain_debug(const struct cpumask *cpu_map,
277 						struct perf_domain *pd)
278 {
279 	if (!sched_debug() || !pd)
280 		return;
281 
282 	printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
283 
284 	while (pd) {
285 		printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
286 				cpumask_first(perf_domain_span(pd)),
287 				cpumask_pr_args(perf_domain_span(pd)),
288 				em_pd_nr_cap_states(pd->em_pd));
289 		pd = pd->next;
290 	}
291 
292 	printk(KERN_CONT "\n");
293 }
294 
destroy_perf_domain_rcu(struct rcu_head * rp)295 static void destroy_perf_domain_rcu(struct rcu_head *rp)
296 {
297 	struct perf_domain *pd;
298 
299 	pd = container_of(rp, struct perf_domain, rcu);
300 	free_pd(pd);
301 }
302 
sched_energy_set(bool has_eas)303 static void sched_energy_set(bool has_eas)
304 {
305 	if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
306 		if (sched_debug())
307 			pr_info("%s: stopping EAS\n", __func__);
308 		static_branch_disable_cpuslocked(&sched_energy_present);
309 	} else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
310 		if (sched_debug())
311 			pr_info("%s: starting EAS\n", __func__);
312 		static_branch_enable_cpuslocked(&sched_energy_present);
313 	}
314 }
315 
316 /*
317  * EAS can be used on a root domain if it meets all the following conditions:
318  *    1. an Energy Model (EM) is available;
319  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
320  *    3. the EM complexity is low enough to keep scheduling overheads low;
321  *    4. schedutil is driving the frequency of all CPUs of the rd;
322  *
323  * The complexity of the Energy Model is defined as:
324  *
325  *              C = nr_pd * (nr_cpus + nr_cs)
326  *
327  * with parameters defined as:
328  *  - nr_pd:    the number of performance domains
329  *  - nr_cpus:  the number of CPUs
330  *  - nr_cs:    the sum of the number of capacity states of all performance
331  *              domains (for example, on a system with 2 performance domains,
332  *              with 10 capacity states each, nr_cs = 2 * 10 = 20).
333  *
334  * It is generally not a good idea to use such a model in the wake-up path on
335  * very complex platforms because of the associated scheduling overheads. The
336  * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
337  * with per-CPU DVFS and less than 8 capacity states each, for example.
338  */
339 #define EM_MAX_COMPLEXITY 2048
340 
341 extern struct cpufreq_governor schedutil_gov;
build_perf_domains(const struct cpumask * cpu_map)342 static bool build_perf_domains(const struct cpumask *cpu_map)
343 {
344 	int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
345 	struct perf_domain *pd = NULL, *tmp;
346 	int cpu = cpumask_first(cpu_map);
347 	struct root_domain *rd = cpu_rq(cpu)->rd;
348 	struct cpufreq_policy *policy;
349 	struct cpufreq_governor *gov;
350 
351 	if (!sysctl_sched_energy_aware)
352 		goto free;
353 
354 	/* EAS is enabled for asymmetric CPU capacity topologies. */
355 	if (!per_cpu(sd_asym_cpucapacity, cpu)) {
356 		if (sched_debug()) {
357 			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
358 					cpumask_pr_args(cpu_map));
359 		}
360 		goto free;
361 	}
362 
363 	for_each_cpu(i, cpu_map) {
364 		/* Skip already covered CPUs. */
365 		if (find_pd(pd, i))
366 			continue;
367 
368 		/* Do not attempt EAS if schedutil is not being used. */
369 		policy = cpufreq_cpu_get(i);
370 		if (!policy)
371 			goto free;
372 		gov = policy->governor;
373 		cpufreq_cpu_put(policy);
374 		if (gov != &schedutil_gov) {
375 			if (rd->pd)
376 				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
377 						cpumask_pr_args(cpu_map));
378 			goto free;
379 		}
380 
381 		/* Create the new pd and add it to the local list. */
382 		tmp = pd_init(i);
383 		if (!tmp)
384 			goto free;
385 		tmp->next = pd;
386 		pd = tmp;
387 
388 		/*
389 		 * Count performance domains and capacity states for the
390 		 * complexity check.
391 		 */
392 		nr_pd++;
393 		nr_cs += em_pd_nr_cap_states(pd->em_pd);
394 	}
395 
396 	/* Bail out if the Energy Model complexity is too high. */
397 	if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) {
398 		WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
399 						cpumask_pr_args(cpu_map));
400 		goto free;
401 	}
402 
403 	perf_domain_debug(cpu_map, pd);
404 
405 	/* Attach the new list of performance domains to the root domain. */
406 	tmp = rd->pd;
407 	rcu_assign_pointer(rd->pd, pd);
408 	if (tmp)
409 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
410 
411 	return !!pd;
412 
413 free:
414 	free_pd(pd);
415 	tmp = rd->pd;
416 	rcu_assign_pointer(rd->pd, NULL);
417 	if (tmp)
418 		call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
419 
420 	return false;
421 }
422 #else
free_pd(struct perf_domain * pd)423 static void free_pd(struct perf_domain *pd) { }
424 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
425 
free_rootdomain(struct rcu_head * rcu)426 static void free_rootdomain(struct rcu_head *rcu)
427 {
428 	struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
429 
430 	cpupri_cleanup(&rd->cpupri);
431 	cpudl_cleanup(&rd->cpudl);
432 	free_cpumask_var(rd->dlo_mask);
433 	free_cpumask_var(rd->rto_mask);
434 	free_cpumask_var(rd->online);
435 	free_cpumask_var(rd->span);
436 	free_pd(rd->pd);
437 	kfree(rd);
438 }
439 
rq_attach_root(struct rq * rq,struct root_domain * rd)440 void rq_attach_root(struct rq *rq, struct root_domain *rd)
441 {
442 	struct root_domain *old_rd = NULL;
443 	unsigned long flags;
444 
445 	raw_spin_lock_irqsave(&rq->lock, flags);
446 
447 	if (rq->rd) {
448 		old_rd = rq->rd;
449 
450 		if (cpumask_test_cpu(rq->cpu, old_rd->online))
451 			set_rq_offline(rq);
452 
453 		cpumask_clear_cpu(rq->cpu, old_rd->span);
454 
455 		/*
456 		 * If we dont want to free the old_rd yet then
457 		 * set old_rd to NULL to skip the freeing later
458 		 * in this function:
459 		 */
460 		if (!atomic_dec_and_test(&old_rd->refcount))
461 			old_rd = NULL;
462 	}
463 
464 	atomic_inc(&rd->refcount);
465 	rq->rd = rd;
466 
467 	cpumask_set_cpu(rq->cpu, rd->span);
468 	if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
469 		set_rq_online(rq);
470 
471 	raw_spin_unlock_irqrestore(&rq->lock, flags);
472 
473 	if (old_rd)
474 		call_rcu(&old_rd->rcu, free_rootdomain);
475 }
476 
sched_get_rd(struct root_domain * rd)477 void sched_get_rd(struct root_domain *rd)
478 {
479 	atomic_inc(&rd->refcount);
480 }
481 
sched_put_rd(struct root_domain * rd)482 void sched_put_rd(struct root_domain *rd)
483 {
484 	if (!atomic_dec_and_test(&rd->refcount))
485 		return;
486 
487 	call_rcu(&rd->rcu, free_rootdomain);
488 }
489 
init_rootdomain(struct root_domain * rd)490 static int init_rootdomain(struct root_domain *rd)
491 {
492 	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
493 		goto out;
494 	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
495 		goto free_span;
496 	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
497 		goto free_online;
498 	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
499 		goto free_dlo_mask;
500 
501 #ifdef HAVE_RT_PUSH_IPI
502 	rd->rto_cpu = -1;
503 	raw_spin_lock_init(&rd->rto_lock);
504 	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
505 #endif
506 
507 	init_dl_bw(&rd->dl_bw);
508 	if (cpudl_init(&rd->cpudl) != 0)
509 		goto free_rto_mask;
510 
511 	if (cpupri_init(&rd->cpupri) != 0)
512 		goto free_cpudl;
513 
514 	init_max_cpu_capacity(&rd->max_cpu_capacity);
515 
516 	return 0;
517 
518 free_cpudl:
519 	cpudl_cleanup(&rd->cpudl);
520 free_rto_mask:
521 	free_cpumask_var(rd->rto_mask);
522 free_dlo_mask:
523 	free_cpumask_var(rd->dlo_mask);
524 free_online:
525 	free_cpumask_var(rd->online);
526 free_span:
527 	free_cpumask_var(rd->span);
528 out:
529 	return -ENOMEM;
530 }
531 
532 /*
533  * By default the system creates a single root-domain with all CPUs as
534  * members (mimicking the global state we have today).
535  */
536 struct root_domain def_root_domain;
537 
init_defrootdomain(void)538 void init_defrootdomain(void)
539 {
540 	init_rootdomain(&def_root_domain);
541 
542 	atomic_set(&def_root_domain.refcount, 1);
543 }
544 
alloc_rootdomain(void)545 static struct root_domain *alloc_rootdomain(void)
546 {
547 	struct root_domain *rd;
548 
549 	rd = kzalloc(sizeof(*rd), GFP_KERNEL);
550 	if (!rd)
551 		return NULL;
552 
553 	if (init_rootdomain(rd) != 0) {
554 		kfree(rd);
555 		return NULL;
556 	}
557 
558 	return rd;
559 }
560 
free_sched_groups(struct sched_group * sg,int free_sgc)561 static void free_sched_groups(struct sched_group *sg, int free_sgc)
562 {
563 	struct sched_group *tmp, *first;
564 
565 	if (!sg)
566 		return;
567 
568 	first = sg;
569 	do {
570 		tmp = sg->next;
571 
572 		if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
573 			kfree(sg->sgc);
574 
575 		if (atomic_dec_and_test(&sg->ref))
576 			kfree(sg);
577 		sg = tmp;
578 	} while (sg != first);
579 }
580 
destroy_sched_domain(struct sched_domain * sd)581 static void destroy_sched_domain(struct sched_domain *sd)
582 {
583 	/*
584 	 * A normal sched domain may have multiple group references, an
585 	 * overlapping domain, having private groups, only one.  Iterate,
586 	 * dropping group/capacity references, freeing where none remain.
587 	 */
588 	free_sched_groups(sd->groups, 1);
589 
590 	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
591 		kfree(sd->shared);
592 	kfree(sd);
593 }
594 
destroy_sched_domains_rcu(struct rcu_head * rcu)595 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
596 {
597 	struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
598 
599 	while (sd) {
600 		struct sched_domain *parent = sd->parent;
601 		destroy_sched_domain(sd);
602 		sd = parent;
603 	}
604 }
605 
destroy_sched_domains(struct sched_domain * sd)606 static void destroy_sched_domains(struct sched_domain *sd)
607 {
608 	if (sd)
609 		call_rcu(&sd->rcu, destroy_sched_domains_rcu);
610 }
611 
612 /*
613  * Keep a special pointer to the highest sched_domain that has
614  * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
615  * allows us to avoid some pointer chasing select_idle_sibling().
616  *
617  * Also keep a unique ID per domain (we use the first CPU number in
618  * the cpumask of the domain), this allows us to quickly tell if
619  * two CPUs are in the same cache domain, see cpus_share_cache().
620  */
621 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
622 DEFINE_PER_CPU(int, sd_llc_size);
623 DEFINE_PER_CPU(int, sd_llc_id);
624 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
625 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
626 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
627 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
628 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
629 
update_top_cache_domain(int cpu)630 static void update_top_cache_domain(int cpu)
631 {
632 	struct sched_domain_shared *sds = NULL;
633 	struct sched_domain *sd;
634 	int id = cpu;
635 	int size = 1;
636 
637 	sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
638 	if (sd) {
639 		id = cpumask_first(sched_domain_span(sd));
640 		size = cpumask_weight(sched_domain_span(sd));
641 		sds = sd->shared;
642 	}
643 
644 	rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
645 	per_cpu(sd_llc_size, cpu) = size;
646 	per_cpu(sd_llc_id, cpu) = id;
647 	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
648 
649 	sd = lowest_flag_domain(cpu, SD_NUMA);
650 	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
651 
652 	sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
653 	rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
654 
655 	sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
656 	rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
657 }
658 
659 /*
660  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
661  * hold the hotplug lock.
662  */
663 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)664 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
665 {
666 	struct rq *rq = cpu_rq(cpu);
667 	struct sched_domain *tmp;
668 
669 	/* Remove the sched domains which do not contribute to scheduling. */
670 	for (tmp = sd; tmp; ) {
671 		struct sched_domain *parent = tmp->parent;
672 		if (!parent)
673 			break;
674 
675 		if (sd_parent_degenerate(tmp, parent)) {
676 			tmp->parent = parent->parent;
677 			if (parent->parent)
678 				parent->parent->child = tmp;
679 			/*
680 			 * Transfer SD_PREFER_SIBLING down in case of a
681 			 * degenerate parent; the spans match for this
682 			 * so the property transfers.
683 			 */
684 			if (parent->flags & SD_PREFER_SIBLING)
685 				tmp->flags |= SD_PREFER_SIBLING;
686 			destroy_sched_domain(parent);
687 		} else
688 			tmp = tmp->parent;
689 	}
690 
691 	if (sd && sd_degenerate(sd)) {
692 		tmp = sd;
693 		sd = sd->parent;
694 		destroy_sched_domain(tmp);
695 		if (sd)
696 			sd->child = NULL;
697 	}
698 
699 	sched_domain_debug(sd, cpu);
700 
701 	rq_attach_root(rq, rd);
702 	tmp = rq->sd;
703 	rcu_assign_pointer(rq->sd, sd);
704 	dirty_sched_domain_sysctl(cpu);
705 	destroy_sched_domains(tmp);
706 
707 	update_top_cache_domain(cpu);
708 }
709 
710 struct s_data {
711 	struct sched_domain * __percpu *sd;
712 	struct root_domain	*rd;
713 };
714 
715 enum s_alloc {
716 	sa_rootdomain,
717 	sa_sd,
718 	sa_sd_storage,
719 	sa_none,
720 };
721 
722 /*
723  * Return the canonical balance CPU for this group, this is the first CPU
724  * of this group that's also in the balance mask.
725  *
726  * The balance mask are all those CPUs that could actually end up at this
727  * group. See build_balance_mask().
728  *
729  * Also see should_we_balance().
730  */
group_balance_cpu(struct sched_group * sg)731 int group_balance_cpu(struct sched_group *sg)
732 {
733 	return cpumask_first(group_balance_mask(sg));
734 }
735 
736 
737 /*
738  * NUMA topology (first read the regular topology blurb below)
739  *
740  * Given a node-distance table, for example:
741  *
742  *   node   0   1   2   3
743  *     0:  10  20  30  20
744  *     1:  20  10  20  30
745  *     2:  30  20  10  20
746  *     3:  20  30  20  10
747  *
748  * which represents a 4 node ring topology like:
749  *
750  *   0 ----- 1
751  *   |       |
752  *   |       |
753  *   |       |
754  *   3 ----- 2
755  *
756  * We want to construct domains and groups to represent this. The way we go
757  * about doing this is to build the domains on 'hops'. For each NUMA level we
758  * construct the mask of all nodes reachable in @level hops.
759  *
760  * For the above NUMA topology that gives 3 levels:
761  *
762  * NUMA-2	0-3		0-3		0-3		0-3
763  *  groups:	{0-1,3},{1-3}	{0-2},{0,2-3}	{1-3},{0-1,3}	{0,2-3},{0-2}
764  *
765  * NUMA-1	0-1,3		0-2		1-3		0,2-3
766  *  groups:	{0},{1},{3}	{0},{1},{2}	{1},{2},{3}	{0},{2},{3}
767  *
768  * NUMA-0	0		1		2		3
769  *
770  *
771  * As can be seen; things don't nicely line up as with the regular topology.
772  * When we iterate a domain in child domain chunks some nodes can be
773  * represented multiple times -- hence the "overlap" naming for this part of
774  * the topology.
775  *
776  * In order to minimize this overlap, we only build enough groups to cover the
777  * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
778  *
779  * Because:
780  *
781  *  - the first group of each domain is its child domain; this
782  *    gets us the first 0-1,3
783  *  - the only uncovered node is 2, who's child domain is 1-3.
784  *
785  * However, because of the overlap, computing a unique CPU for each group is
786  * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
787  * groups include the CPUs of Node-0, while those CPUs would not in fact ever
788  * end up at those groups (they would end up in group: 0-1,3).
789  *
790  * To correct this we have to introduce the group balance mask. This mask
791  * will contain those CPUs in the group that can reach this group given the
792  * (child) domain tree.
793  *
794  * With this we can once again compute balance_cpu and sched_group_capacity
795  * relations.
796  *
797  * XXX include words on how balance_cpu is unique and therefore can be
798  * used for sched_group_capacity links.
799  *
800  *
801  * Another 'interesting' topology is:
802  *
803  *   node   0   1   2   3
804  *     0:  10  20  20  30
805  *     1:  20  10  20  20
806  *     2:  20  20  10  20
807  *     3:  30  20  20  10
808  *
809  * Which looks a little like:
810  *
811  *   0 ----- 1
812  *   |     / |
813  *   |   /   |
814  *   | /     |
815  *   2 ----- 3
816  *
817  * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
818  * are not.
819  *
820  * This leads to a few particularly weird cases where the sched_domain's are
821  * not of the same number for each CPU. Consider:
822  *
823  * NUMA-2	0-3						0-3
824  *  groups:	{0-2},{1-3}					{1-3},{0-2}
825  *
826  * NUMA-1	0-2		0-3		0-3		1-3
827  *
828  * NUMA-0	0		1		2		3
829  *
830  */
831 
832 
833 /*
834  * Build the balance mask; it contains only those CPUs that can arrive at this
835  * group and should be considered to continue balancing.
836  *
837  * We do this during the group creation pass, therefore the group information
838  * isn't complete yet, however since each group represents a (child) domain we
839  * can fully construct this using the sched_domain bits (which are already
840  * complete).
841  */
842 static void
build_balance_mask(struct sched_domain * sd,struct sched_group * sg,struct cpumask * mask)843 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
844 {
845 	const struct cpumask *sg_span = sched_group_span(sg);
846 	struct sd_data *sdd = sd->private;
847 	struct sched_domain *sibling;
848 	int i;
849 
850 	cpumask_clear(mask);
851 
852 	for_each_cpu(i, sg_span) {
853 		sibling = *per_cpu_ptr(sdd->sd, i);
854 
855 		/*
856 		 * Can happen in the asymmetric case, where these siblings are
857 		 * unused. The mask will not be empty because those CPUs that
858 		 * do have the top domain _should_ span the domain.
859 		 */
860 		if (!sibling->child)
861 			continue;
862 
863 		/* If we would not end up here, we can't continue from here */
864 		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
865 			continue;
866 
867 		cpumask_set_cpu(i, mask);
868 	}
869 
870 	/* We must not have empty masks here */
871 	WARN_ON_ONCE(cpumask_empty(mask));
872 }
873 
874 /*
875  * XXX: This creates per-node group entries; since the load-balancer will
876  * immediately access remote memory to construct this group's load-balance
877  * statistics having the groups node local is of dubious benefit.
878  */
879 static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain * sd,int cpu)880 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
881 {
882 	struct sched_group *sg;
883 	struct cpumask *sg_span;
884 
885 	sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
886 			GFP_KERNEL, cpu_to_node(cpu));
887 
888 	if (!sg)
889 		return NULL;
890 
891 	sg_span = sched_group_span(sg);
892 	if (sd->child)
893 		cpumask_copy(sg_span, sched_domain_span(sd->child));
894 	else
895 		cpumask_copy(sg_span, sched_domain_span(sd));
896 
897 	atomic_inc(&sg->ref);
898 	return sg;
899 }
900 
init_overlap_sched_group(struct sched_domain * sd,struct sched_group * sg)901 static void init_overlap_sched_group(struct sched_domain *sd,
902 				     struct sched_group *sg)
903 {
904 	struct cpumask *mask = sched_domains_tmpmask2;
905 	struct sd_data *sdd = sd->private;
906 	struct cpumask *sg_span;
907 	int cpu;
908 
909 	build_balance_mask(sd, sg, mask);
910 	cpu = cpumask_first_and(sched_group_span(sg), mask);
911 
912 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
913 	if (atomic_inc_return(&sg->sgc->ref) == 1)
914 		cpumask_copy(group_balance_mask(sg), mask);
915 	else
916 		WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
917 
918 	/*
919 	 * Initialize sgc->capacity such that even if we mess up the
920 	 * domains and no possible iteration will get us here, we won't
921 	 * die on a /0 trap.
922 	 */
923 	sg_span = sched_group_span(sg);
924 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
925 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
926 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
927 }
928 
929 static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)930 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
931 {
932 	struct sched_group *first = NULL, *last = NULL, *sg;
933 	const struct cpumask *span = sched_domain_span(sd);
934 	struct cpumask *covered = sched_domains_tmpmask;
935 	struct sd_data *sdd = sd->private;
936 	struct sched_domain *sibling;
937 	int i;
938 
939 	cpumask_clear(covered);
940 
941 	for_each_cpu_wrap(i, span, cpu) {
942 		struct cpumask *sg_span;
943 
944 		if (cpumask_test_cpu(i, covered))
945 			continue;
946 
947 		sibling = *per_cpu_ptr(sdd->sd, i);
948 
949 		/*
950 		 * Asymmetric node setups can result in situations where the
951 		 * domain tree is of unequal depth, make sure to skip domains
952 		 * that already cover the entire range.
953 		 *
954 		 * In that case build_sched_domains() will have terminated the
955 		 * iteration early and our sibling sd spans will be empty.
956 		 * Domains should always include the CPU they're built on, so
957 		 * check that.
958 		 */
959 		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
960 			continue;
961 
962 		sg = build_group_from_child_sched_domain(sibling, cpu);
963 		if (!sg)
964 			goto fail;
965 
966 		sg_span = sched_group_span(sg);
967 		cpumask_or(covered, covered, sg_span);
968 
969 		init_overlap_sched_group(sd, sg);
970 
971 		if (!first)
972 			first = sg;
973 		if (last)
974 			last->next = sg;
975 		last = sg;
976 		last->next = first;
977 	}
978 	sd->groups = first;
979 
980 	return 0;
981 
982 fail:
983 	free_sched_groups(first, 0);
984 
985 	return -ENOMEM;
986 }
987 
988 
989 /*
990  * Package topology (also see the load-balance blurb in fair.c)
991  *
992  * The scheduler builds a tree structure to represent a number of important
993  * topology features. By default (default_topology[]) these include:
994  *
995  *  - Simultaneous multithreading (SMT)
996  *  - Multi-Core Cache (MC)
997  *  - Package (DIE)
998  *
999  * Where the last one more or less denotes everything up to a NUMA node.
1000  *
1001  * The tree consists of 3 primary data structures:
1002  *
1003  *	sched_domain -> sched_group -> sched_group_capacity
1004  *	    ^ ^             ^ ^
1005  *          `-'             `-'
1006  *
1007  * The sched_domains are per-CPU and have a two way link (parent & child) and
1008  * denote the ever growing mask of CPUs belonging to that level of topology.
1009  *
1010  * Each sched_domain has a circular (double) linked list of sched_group's, each
1011  * denoting the domains of the level below (or individual CPUs in case of the
1012  * first domain level). The sched_group linked by a sched_domain includes the
1013  * CPU of that sched_domain [*].
1014  *
1015  * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1016  *
1017  * CPU   0   1   2   3   4   5   6   7
1018  *
1019  * DIE  [                             ]
1020  * MC   [             ] [             ]
1021  * SMT  [     ] [     ] [     ] [     ]
1022  *
1023  *  - or -
1024  *
1025  * DIE  0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1026  * MC	0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1027  * SMT  0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1028  *
1029  * CPU   0   1   2   3   4   5   6   7
1030  *
1031  * One way to think about it is: sched_domain moves you up and down among these
1032  * topology levels, while sched_group moves you sideways through it, at child
1033  * domain granularity.
1034  *
1035  * sched_group_capacity ensures each unique sched_group has shared storage.
1036  *
1037  * There are two related construction problems, both require a CPU that
1038  * uniquely identify each group (for a given domain):
1039  *
1040  *  - The first is the balance_cpu (see should_we_balance() and the
1041  *    load-balance blub in fair.c); for each group we only want 1 CPU to
1042  *    continue balancing at a higher domain.
1043  *
1044  *  - The second is the sched_group_capacity; we want all identical groups
1045  *    to share a single sched_group_capacity.
1046  *
1047  * Since these topologies are exclusive by construction. That is, its
1048  * impossible for an SMT thread to belong to multiple cores, and cores to
1049  * be part of multiple caches. There is a very clear and unique location
1050  * for each CPU in the hierarchy.
1051  *
1052  * Therefore computing a unique CPU for each group is trivial (the iteration
1053  * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1054  * group), we can simply pick the first CPU in each group.
1055  *
1056  *
1057  * [*] in other words, the first group of each domain is its child domain.
1058  */
1059 
get_group(int cpu,struct sd_data * sdd)1060 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1061 {
1062 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1063 	struct sched_domain *child = sd->child;
1064 	struct sched_group *sg;
1065 	bool already_visited;
1066 
1067 	if (child)
1068 		cpu = cpumask_first(sched_domain_span(child));
1069 
1070 	sg = *per_cpu_ptr(sdd->sg, cpu);
1071 	sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1072 
1073 	/* Increase refcounts for claim_allocations: */
1074 	already_visited = atomic_inc_return(&sg->ref) > 1;
1075 	/* sgc visits should follow a similar trend as sg */
1076 	WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1077 
1078 	/* If we have already visited that group, it's already initialized. */
1079 	if (already_visited)
1080 		return sg;
1081 
1082 	if (child) {
1083 		cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1084 		cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1085 	} else {
1086 		cpumask_set_cpu(cpu, sched_group_span(sg));
1087 		cpumask_set_cpu(cpu, group_balance_mask(sg));
1088 	}
1089 
1090 	sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1091 	sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1092 	sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1093 
1094 	return sg;
1095 }
1096 
1097 /*
1098  * build_sched_groups will build a circular linked list of the groups
1099  * covered by the given span, will set each group's ->cpumask correctly,
1100  * and will initialize their ->sgc.
1101  *
1102  * Assumes the sched_domain tree is fully constructed
1103  */
1104 static int
build_sched_groups(struct sched_domain * sd,int cpu)1105 build_sched_groups(struct sched_domain *sd, int cpu)
1106 {
1107 	struct sched_group *first = NULL, *last = NULL;
1108 	struct sd_data *sdd = sd->private;
1109 	const struct cpumask *span = sched_domain_span(sd);
1110 	struct cpumask *covered;
1111 	int i;
1112 
1113 	lockdep_assert_held(&sched_domains_mutex);
1114 	covered = sched_domains_tmpmask;
1115 
1116 	cpumask_clear(covered);
1117 
1118 	for_each_cpu_wrap(i, span, cpu) {
1119 		struct sched_group *sg;
1120 
1121 		if (cpumask_test_cpu(i, covered))
1122 			continue;
1123 
1124 		sg = get_group(i, sdd);
1125 
1126 		cpumask_or(covered, covered, sched_group_span(sg));
1127 
1128 		if (!first)
1129 			first = sg;
1130 		if (last)
1131 			last->next = sg;
1132 		last = sg;
1133 	}
1134 	last->next = first;
1135 	sd->groups = first;
1136 
1137 	return 0;
1138 }
1139 
1140 /*
1141  * Initialize sched groups cpu_capacity.
1142  *
1143  * cpu_capacity indicates the capacity of sched group, which is used while
1144  * distributing the load between different sched groups in a sched domain.
1145  * Typically cpu_capacity for all the groups in a sched domain will be same
1146  * unless there are asymmetries in the topology. If there are asymmetries,
1147  * group having more cpu_capacity will pickup more load compared to the
1148  * group having less cpu_capacity.
1149  */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)1150 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1151 {
1152 	struct sched_group *sg = sd->groups;
1153 
1154 	WARN_ON(!sg);
1155 
1156 	do {
1157 		int cpu, max_cpu = -1;
1158 
1159 		sg->group_weight = cpumask_weight(sched_group_span(sg));
1160 
1161 		if (!(sd->flags & SD_ASYM_PACKING))
1162 			goto next;
1163 
1164 		for_each_cpu(cpu, sched_group_span(sg)) {
1165 			if (max_cpu < 0)
1166 				max_cpu = cpu;
1167 			else if (sched_asym_prefer(cpu, max_cpu))
1168 				max_cpu = cpu;
1169 		}
1170 		sg->asym_prefer_cpu = max_cpu;
1171 
1172 next:
1173 		sg = sg->next;
1174 	} while (sg != sd->groups);
1175 
1176 	if (cpu != group_balance_cpu(sg))
1177 		return;
1178 
1179 	update_group_capacity(sd, cpu);
1180 }
1181 
1182 /*
1183  * Initializers for schedule domains
1184  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1185  */
1186 
1187 static int default_relax_domain_level = -1;
1188 int sched_domain_level_max;
1189 
setup_relax_domain_level(char * str)1190 static int __init setup_relax_domain_level(char *str)
1191 {
1192 	if (kstrtoint(str, 0, &default_relax_domain_level))
1193 		pr_warn("Unable to set relax_domain_level\n");
1194 
1195 	return 1;
1196 }
1197 __setup("relax_domain_level=", setup_relax_domain_level);
1198 
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)1199 static void set_domain_attribute(struct sched_domain *sd,
1200 				 struct sched_domain_attr *attr)
1201 {
1202 	int request;
1203 
1204 	if (!attr || attr->relax_domain_level < 0) {
1205 		if (default_relax_domain_level < 0)
1206 			return;
1207 		else
1208 			request = default_relax_domain_level;
1209 	} else
1210 		request = attr->relax_domain_level;
1211 	if (request < sd->level) {
1212 		/* Turn off idle balance on this domain: */
1213 		sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1214 	} else {
1215 		/* Turn on idle balance on this domain: */
1216 		sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1217 	}
1218 }
1219 
1220 static void __sdt_free(const struct cpumask *cpu_map);
1221 static int __sdt_alloc(const struct cpumask *cpu_map);
1222 
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)1223 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1224 				 const struct cpumask *cpu_map)
1225 {
1226 	switch (what) {
1227 	case sa_rootdomain:
1228 		if (!atomic_read(&d->rd->refcount))
1229 			free_rootdomain(&d->rd->rcu);
1230 		/* Fall through */
1231 	case sa_sd:
1232 		free_percpu(d->sd);
1233 		/* Fall through */
1234 	case sa_sd_storage:
1235 		__sdt_free(cpu_map);
1236 		/* Fall through */
1237 	case sa_none:
1238 		break;
1239 	}
1240 }
1241 
1242 static enum s_alloc
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)1243 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1244 {
1245 	memset(d, 0, sizeof(*d));
1246 
1247 	if (__sdt_alloc(cpu_map))
1248 		return sa_sd_storage;
1249 	d->sd = alloc_percpu(struct sched_domain *);
1250 	if (!d->sd)
1251 		return sa_sd_storage;
1252 	d->rd = alloc_rootdomain();
1253 	if (!d->rd)
1254 		return sa_sd;
1255 
1256 	return sa_rootdomain;
1257 }
1258 
1259 /*
1260  * NULL the sd_data elements we've used to build the sched_domain and
1261  * sched_group structure so that the subsequent __free_domain_allocs()
1262  * will not free the data we're using.
1263  */
claim_allocations(int cpu,struct sched_domain * sd)1264 static void claim_allocations(int cpu, struct sched_domain *sd)
1265 {
1266 	struct sd_data *sdd = sd->private;
1267 
1268 	WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1269 	*per_cpu_ptr(sdd->sd, cpu) = NULL;
1270 
1271 	if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1272 		*per_cpu_ptr(sdd->sds, cpu) = NULL;
1273 
1274 	if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1275 		*per_cpu_ptr(sdd->sg, cpu) = NULL;
1276 
1277 	if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1278 		*per_cpu_ptr(sdd->sgc, cpu) = NULL;
1279 }
1280 
1281 #ifdef CONFIG_NUMA
1282 enum numa_topology_type sched_numa_topology_type;
1283 
1284 static int			sched_domains_numa_levels;
1285 static int			sched_domains_curr_level;
1286 
1287 int				sched_max_numa_distance;
1288 static int			*sched_domains_numa_distance;
1289 static struct cpumask		***sched_domains_numa_masks;
1290 int __read_mostly		node_reclaim_distance = RECLAIM_DISTANCE;
1291 #endif
1292 
1293 /*
1294  * SD_flags allowed in topology descriptions.
1295  *
1296  * These flags are purely descriptive of the topology and do not prescribe
1297  * behaviour. Behaviour is artificial and mapped in the below sd_init()
1298  * function:
1299  *
1300  *   SD_SHARE_CPUCAPACITY   - describes SMT topologies
1301  *   SD_SHARE_PKG_RESOURCES - describes shared caches
1302  *   SD_NUMA                - describes NUMA topologies
1303  *   SD_SHARE_POWERDOMAIN   - describes shared power domain
1304  *
1305  * Odd one out, which beside describing the topology has a quirk also
1306  * prescribes the desired behaviour that goes along with it:
1307  *
1308  *   SD_ASYM_PACKING        - describes SMT quirks
1309  */
1310 #define TOPOLOGY_SD_FLAGS		\
1311 	(SD_SHARE_CPUCAPACITY	|	\
1312 	 SD_SHARE_PKG_RESOURCES |	\
1313 	 SD_NUMA		|	\
1314 	 SD_ASYM_PACKING	|	\
1315 	 SD_SHARE_POWERDOMAIN)
1316 
1317 static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int dflags,int cpu)1318 sd_init(struct sched_domain_topology_level *tl,
1319 	const struct cpumask *cpu_map,
1320 	struct sched_domain *child, int dflags, int cpu)
1321 {
1322 	struct sd_data *sdd = &tl->data;
1323 	struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1324 	int sd_id, sd_weight, sd_flags = 0;
1325 
1326 #ifdef CONFIG_NUMA
1327 	/*
1328 	 * Ugly hack to pass state to sd_numa_mask()...
1329 	 */
1330 	sched_domains_curr_level = tl->numa_level;
1331 #endif
1332 
1333 	sd_weight = cpumask_weight(tl->mask(cpu));
1334 
1335 	if (tl->sd_flags)
1336 		sd_flags = (*tl->sd_flags)();
1337 	if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1338 			"wrong sd_flags in topology description\n"))
1339 		sd_flags &= TOPOLOGY_SD_FLAGS;
1340 
1341 	/* Apply detected topology flags */
1342 	sd_flags |= dflags;
1343 
1344 	*sd = (struct sched_domain){
1345 		.min_interval		= sd_weight,
1346 		.max_interval		= 2*sd_weight,
1347 		.busy_factor		= 32,
1348 		.imbalance_pct		= 125,
1349 
1350 		.cache_nice_tries	= 0,
1351 
1352 		.flags			= 1*SD_LOAD_BALANCE
1353 					| 1*SD_BALANCE_NEWIDLE
1354 					| 1*SD_BALANCE_EXEC
1355 					| 1*SD_BALANCE_FORK
1356 					| 0*SD_BALANCE_WAKE
1357 					| 1*SD_WAKE_AFFINE
1358 					| 0*SD_SHARE_CPUCAPACITY
1359 					| 0*SD_SHARE_PKG_RESOURCES
1360 					| 0*SD_SERIALIZE
1361 					| 1*SD_PREFER_SIBLING
1362 					| 0*SD_NUMA
1363 					| sd_flags
1364 					,
1365 
1366 		.last_balance		= jiffies,
1367 		.balance_interval	= sd_weight,
1368 		.max_newidle_lb_cost	= 0,
1369 		.next_decay_max_lb_cost	= jiffies,
1370 		.child			= child,
1371 #ifdef CONFIG_SCHED_DEBUG
1372 		.name			= tl->name,
1373 #endif
1374 	};
1375 
1376 	cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1377 	sd_id = cpumask_first(sched_domain_span(sd));
1378 
1379 	/*
1380 	 * Convert topological properties into behaviour.
1381 	 */
1382 
1383 	/* Don't attempt to spread across CPUs of different capacities. */
1384 	if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1385 		sd->child->flags &= ~SD_PREFER_SIBLING;
1386 
1387 	if (sd->flags & SD_SHARE_CPUCAPACITY) {
1388 		sd->imbalance_pct = 110;
1389 
1390 	} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1391 		sd->imbalance_pct = 117;
1392 		sd->cache_nice_tries = 1;
1393 
1394 #ifdef CONFIG_NUMA
1395 	} else if (sd->flags & SD_NUMA) {
1396 		sd->cache_nice_tries = 2;
1397 
1398 		sd->flags &= ~SD_PREFER_SIBLING;
1399 		sd->flags |= SD_SERIALIZE;
1400 		if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1401 			sd->flags &= ~(SD_BALANCE_EXEC |
1402 				       SD_BALANCE_FORK |
1403 				       SD_WAKE_AFFINE);
1404 		}
1405 
1406 #endif
1407 	} else {
1408 		sd->cache_nice_tries = 1;
1409 	}
1410 
1411 	/*
1412 	 * For all levels sharing cache; connect a sched_domain_shared
1413 	 * instance.
1414 	 */
1415 	if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1416 		sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1417 		atomic_inc(&sd->shared->ref);
1418 		atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1419 	}
1420 
1421 	sd->private = sdd;
1422 
1423 	return sd;
1424 }
1425 
1426 /*
1427  * Topology list, bottom-up.
1428  */
1429 static struct sched_domain_topology_level default_topology[] = {
1430 #ifdef CONFIG_SCHED_SMT
1431 	{ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1432 #endif
1433 #ifdef CONFIG_SCHED_MC
1434 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1435 #endif
1436 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
1437 	{ NULL, },
1438 };
1439 
1440 static struct sched_domain_topology_level *sched_domain_topology =
1441 	default_topology;
1442 
1443 #define for_each_sd_topology(tl)			\
1444 	for (tl = sched_domain_topology; tl->mask; tl++)
1445 
set_sched_topology(struct sched_domain_topology_level * tl)1446 void set_sched_topology(struct sched_domain_topology_level *tl)
1447 {
1448 	if (WARN_ON_ONCE(sched_smp_initialized))
1449 		return;
1450 
1451 	sched_domain_topology = tl;
1452 }
1453 
1454 #ifdef CONFIG_NUMA
1455 
sd_numa_mask(int cpu)1456 static const struct cpumask *sd_numa_mask(int cpu)
1457 {
1458 	return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1459 }
1460 
sched_numa_warn(const char * str)1461 static void sched_numa_warn(const char *str)
1462 {
1463 	static int done = false;
1464 	int i,j;
1465 
1466 	if (done)
1467 		return;
1468 
1469 	done = true;
1470 
1471 	printk(KERN_WARNING "ERROR: %s\n\n", str);
1472 
1473 	for (i = 0; i < nr_node_ids; i++) {
1474 		printk(KERN_WARNING "  ");
1475 		for (j = 0; j < nr_node_ids; j++)
1476 			printk(KERN_CONT "%02d ", node_distance(i,j));
1477 		printk(KERN_CONT "\n");
1478 	}
1479 	printk(KERN_WARNING "\n");
1480 }
1481 
find_numa_distance(int distance)1482 bool find_numa_distance(int distance)
1483 {
1484 	int i;
1485 
1486 	if (distance == node_distance(0, 0))
1487 		return true;
1488 
1489 	for (i = 0; i < sched_domains_numa_levels; i++) {
1490 		if (sched_domains_numa_distance[i] == distance)
1491 			return true;
1492 	}
1493 
1494 	return false;
1495 }
1496 
1497 /*
1498  * A system can have three types of NUMA topology:
1499  * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1500  * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1501  * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1502  *
1503  * The difference between a glueless mesh topology and a backplane
1504  * topology lies in whether communication between not directly
1505  * connected nodes goes through intermediary nodes (where programs
1506  * could run), or through backplane controllers. This affects
1507  * placement of programs.
1508  *
1509  * The type of topology can be discerned with the following tests:
1510  * - If the maximum distance between any nodes is 1 hop, the system
1511  *   is directly connected.
1512  * - If for two nodes A and B, located N > 1 hops away from each other,
1513  *   there is an intermediary node C, which is < N hops away from both
1514  *   nodes A and B, the system is a glueless mesh.
1515  */
init_numa_topology_type(void)1516 static void init_numa_topology_type(void)
1517 {
1518 	int a, b, c, n;
1519 
1520 	n = sched_max_numa_distance;
1521 
1522 	if (sched_domains_numa_levels <= 2) {
1523 		sched_numa_topology_type = NUMA_DIRECT;
1524 		return;
1525 	}
1526 
1527 	for_each_online_node(a) {
1528 		for_each_online_node(b) {
1529 			/* Find two nodes furthest removed from each other. */
1530 			if (node_distance(a, b) < n)
1531 				continue;
1532 
1533 			/* Is there an intermediary node between a and b? */
1534 			for_each_online_node(c) {
1535 				if (node_distance(a, c) < n &&
1536 				    node_distance(b, c) < n) {
1537 					sched_numa_topology_type =
1538 							NUMA_GLUELESS_MESH;
1539 					return;
1540 				}
1541 			}
1542 
1543 			sched_numa_topology_type = NUMA_BACKPLANE;
1544 			return;
1545 		}
1546 	}
1547 }
1548 
1549 
1550 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1551 
sched_init_numa(void)1552 void sched_init_numa(void)
1553 {
1554 	struct sched_domain_topology_level *tl;
1555 	unsigned long *distance_map;
1556 	int nr_levels = 0;
1557 	int i, j;
1558 
1559 	/*
1560 	 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1561 	 * unique distances in the node_distance() table.
1562 	 */
1563 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1564 	if (!distance_map)
1565 		return;
1566 
1567 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1568 	for (i = 0; i < nr_node_ids; i++) {
1569 		for (j = 0; j < nr_node_ids; j++) {
1570 			int distance = node_distance(i, j);
1571 
1572 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1573 				sched_numa_warn("Invalid distance value range");
1574 				return;
1575 			}
1576 
1577 			bitmap_set(distance_map, distance, 1);
1578 		}
1579 	}
1580 	/*
1581 	 * We can now figure out how many unique distance values there are and
1582 	 * allocate memory accordingly.
1583 	 */
1584 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1585 
1586 	sched_domains_numa_distance = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1587 	if (!sched_domains_numa_distance) {
1588 		bitmap_free(distance_map);
1589 		return;
1590 	}
1591 
1592 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
1593 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1594 		sched_domains_numa_distance[i] = j;
1595 	}
1596 
1597 	bitmap_free(distance_map);
1598 
1599 	/*
1600 	 * 'nr_levels' contains the number of unique distances
1601 	 *
1602 	 * The sched_domains_numa_distance[] array includes the actual distance
1603 	 * numbers.
1604 	 */
1605 
1606 	/*
1607 	 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1608 	 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1609 	 * the array will contain less then 'nr_levels' members. This could be
1610 	 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1611 	 * in other functions.
1612 	 *
1613 	 * We reset it to 'nr_levels' at the end of this function.
1614 	 */
1615 	sched_domains_numa_levels = 0;
1616 
1617 	sched_domains_numa_masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1618 	if (!sched_domains_numa_masks)
1619 		return;
1620 
1621 	/*
1622 	 * Now for each level, construct a mask per node which contains all
1623 	 * CPUs of nodes that are that many hops away from us.
1624 	 */
1625 	for (i = 0; i < nr_levels; i++) {
1626 		sched_domains_numa_masks[i] =
1627 			kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1628 		if (!sched_domains_numa_masks[i])
1629 			return;
1630 
1631 		for (j = 0; j < nr_node_ids; j++) {
1632 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1633 			int k;
1634 
1635 			if (!mask)
1636 				return;
1637 
1638 			sched_domains_numa_masks[i][j] = mask;
1639 
1640 			for_each_node(k) {
1641 				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1642 					sched_numa_warn("Node-distance not symmetric");
1643 
1644 				if (node_distance(j, k) > sched_domains_numa_distance[i])
1645 					continue;
1646 
1647 				cpumask_or(mask, mask, cpumask_of_node(k));
1648 			}
1649 		}
1650 	}
1651 
1652 	/* Compute default topology size */
1653 	for (i = 0; sched_domain_topology[i].mask; i++);
1654 
1655 	tl = kzalloc((i + nr_levels + 1) *
1656 			sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1657 	if (!tl)
1658 		return;
1659 
1660 	/*
1661 	 * Copy the default topology bits..
1662 	 */
1663 	for (i = 0; sched_domain_topology[i].mask; i++)
1664 		tl[i] = sched_domain_topology[i];
1665 
1666 	/*
1667 	 * Add the NUMA identity distance, aka single NODE.
1668 	 */
1669 	tl[i++] = (struct sched_domain_topology_level){
1670 		.mask = sd_numa_mask,
1671 		.numa_level = 0,
1672 		SD_INIT_NAME(NODE)
1673 	};
1674 
1675 	/*
1676 	 * .. and append 'j' levels of NUMA goodness.
1677 	 */
1678 	for (j = 1; j < nr_levels; i++, j++) {
1679 		tl[i] = (struct sched_domain_topology_level){
1680 			.mask = sd_numa_mask,
1681 			.sd_flags = cpu_numa_flags,
1682 			.flags = SDTL_OVERLAP,
1683 			.numa_level = j,
1684 			SD_INIT_NAME(NUMA)
1685 		};
1686 	}
1687 
1688 	sched_domain_topology = tl;
1689 
1690 	sched_domains_numa_levels = nr_levels;
1691 	sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
1692 
1693 	init_numa_topology_type();
1694 }
1695 
sched_domains_numa_masks_set(unsigned int cpu)1696 void sched_domains_numa_masks_set(unsigned int cpu)
1697 {
1698 	int node = cpu_to_node(cpu);
1699 	int i, j;
1700 
1701 	for (i = 0; i < sched_domains_numa_levels; i++) {
1702 		for (j = 0; j < nr_node_ids; j++) {
1703 			if (node_distance(j, node) <= sched_domains_numa_distance[i])
1704 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1705 		}
1706 	}
1707 }
1708 
sched_domains_numa_masks_clear(unsigned int cpu)1709 void sched_domains_numa_masks_clear(unsigned int cpu)
1710 {
1711 	int i, j;
1712 
1713 	for (i = 0; i < sched_domains_numa_levels; i++) {
1714 		for (j = 0; j < nr_node_ids; j++)
1715 			cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1716 	}
1717 }
1718 
1719 /*
1720  * sched_numa_find_closest() - given the NUMA topology, find the cpu
1721  *                             closest to @cpu from @cpumask.
1722  * cpumask: cpumask to find a cpu from
1723  * cpu: cpu to be close to
1724  *
1725  * returns: cpu, or nr_cpu_ids when nothing found.
1726  */
sched_numa_find_closest(const struct cpumask * cpus,int cpu)1727 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1728 {
1729 	int i, j = cpu_to_node(cpu);
1730 
1731 	for (i = 0; i < sched_domains_numa_levels; i++) {
1732 		cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
1733 		if (cpu < nr_cpu_ids)
1734 			return cpu;
1735 	}
1736 	return nr_cpu_ids;
1737 }
1738 
1739 #endif /* CONFIG_NUMA */
1740 
__sdt_alloc(const struct cpumask * cpu_map)1741 static int __sdt_alloc(const struct cpumask *cpu_map)
1742 {
1743 	struct sched_domain_topology_level *tl;
1744 	int j;
1745 
1746 	for_each_sd_topology(tl) {
1747 		struct sd_data *sdd = &tl->data;
1748 
1749 		sdd->sd = alloc_percpu(struct sched_domain *);
1750 		if (!sdd->sd)
1751 			return -ENOMEM;
1752 
1753 		sdd->sds = alloc_percpu(struct sched_domain_shared *);
1754 		if (!sdd->sds)
1755 			return -ENOMEM;
1756 
1757 		sdd->sg = alloc_percpu(struct sched_group *);
1758 		if (!sdd->sg)
1759 			return -ENOMEM;
1760 
1761 		sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1762 		if (!sdd->sgc)
1763 			return -ENOMEM;
1764 
1765 		for_each_cpu(j, cpu_map) {
1766 			struct sched_domain *sd;
1767 			struct sched_domain_shared *sds;
1768 			struct sched_group *sg;
1769 			struct sched_group_capacity *sgc;
1770 
1771 			sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1772 					GFP_KERNEL, cpu_to_node(j));
1773 			if (!sd)
1774 				return -ENOMEM;
1775 
1776 			*per_cpu_ptr(sdd->sd, j) = sd;
1777 
1778 			sds = kzalloc_node(sizeof(struct sched_domain_shared),
1779 					GFP_KERNEL, cpu_to_node(j));
1780 			if (!sds)
1781 				return -ENOMEM;
1782 
1783 			*per_cpu_ptr(sdd->sds, j) = sds;
1784 
1785 			sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1786 					GFP_KERNEL, cpu_to_node(j));
1787 			if (!sg)
1788 				return -ENOMEM;
1789 
1790 			sg->next = sg;
1791 
1792 			*per_cpu_ptr(sdd->sg, j) = sg;
1793 
1794 			sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1795 					GFP_KERNEL, cpu_to_node(j));
1796 			if (!sgc)
1797 				return -ENOMEM;
1798 
1799 #ifdef CONFIG_SCHED_DEBUG
1800 			sgc->id = j;
1801 #endif
1802 
1803 			*per_cpu_ptr(sdd->sgc, j) = sgc;
1804 		}
1805 	}
1806 
1807 	return 0;
1808 }
1809 
__sdt_free(const struct cpumask * cpu_map)1810 static void __sdt_free(const struct cpumask *cpu_map)
1811 {
1812 	struct sched_domain_topology_level *tl;
1813 	int j;
1814 
1815 	for_each_sd_topology(tl) {
1816 		struct sd_data *sdd = &tl->data;
1817 
1818 		for_each_cpu(j, cpu_map) {
1819 			struct sched_domain *sd;
1820 
1821 			if (sdd->sd) {
1822 				sd = *per_cpu_ptr(sdd->sd, j);
1823 				if (sd && (sd->flags & SD_OVERLAP))
1824 					free_sched_groups(sd->groups, 0);
1825 				kfree(*per_cpu_ptr(sdd->sd, j));
1826 			}
1827 
1828 			if (sdd->sds)
1829 				kfree(*per_cpu_ptr(sdd->sds, j));
1830 			if (sdd->sg)
1831 				kfree(*per_cpu_ptr(sdd->sg, j));
1832 			if (sdd->sgc)
1833 				kfree(*per_cpu_ptr(sdd->sgc, j));
1834 		}
1835 		free_percpu(sdd->sd);
1836 		sdd->sd = NULL;
1837 		free_percpu(sdd->sds);
1838 		sdd->sds = NULL;
1839 		free_percpu(sdd->sg);
1840 		sdd->sg = NULL;
1841 		free_percpu(sdd->sgc);
1842 		sdd->sgc = NULL;
1843 	}
1844 }
1845 
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int dflags,int cpu)1846 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1847 		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1848 		struct sched_domain *child, int dflags, int cpu)
1849 {
1850 	struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
1851 
1852 	if (child) {
1853 		sd->level = child->level + 1;
1854 		sched_domain_level_max = max(sched_domain_level_max, sd->level);
1855 		child->parent = sd;
1856 
1857 		if (!cpumask_subset(sched_domain_span(child),
1858 				    sched_domain_span(sd))) {
1859 			pr_err("BUG: arch topology borken\n");
1860 #ifdef CONFIG_SCHED_DEBUG
1861 			pr_err("     the %s domain not a subset of the %s domain\n",
1862 					child->name, sd->name);
1863 #endif
1864 			/* Fixup, ensure @sd has at least @child CPUs. */
1865 			cpumask_or(sched_domain_span(sd),
1866 				   sched_domain_span(sd),
1867 				   sched_domain_span(child));
1868 		}
1869 
1870 	}
1871 	set_domain_attribute(sd, attr);
1872 
1873 	return sd;
1874 }
1875 
1876 /*
1877  * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
1878  * any two given CPUs at this (non-NUMA) topology level.
1879  */
topology_span_sane(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,int cpu)1880 static bool topology_span_sane(struct sched_domain_topology_level *tl,
1881 			      const struct cpumask *cpu_map, int cpu)
1882 {
1883 	int i;
1884 
1885 	/* NUMA levels are allowed to overlap */
1886 	if (tl->flags & SDTL_OVERLAP)
1887 		return true;
1888 
1889 	/*
1890 	 * Non-NUMA levels cannot partially overlap - they must be either
1891 	 * completely equal or completely disjoint. Otherwise we can end up
1892 	 * breaking the sched_group lists - i.e. a later get_group() pass
1893 	 * breaks the linking done for an earlier span.
1894 	 */
1895 	for_each_cpu(i, cpu_map) {
1896 		if (i == cpu)
1897 			continue;
1898 		/*
1899 		 * We should 'and' all those masks with 'cpu_map' to exactly
1900 		 * match the topology we're about to build, but that can only
1901 		 * remove CPUs, which only lessens our ability to detect
1902 		 * overlaps
1903 		 */
1904 		if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
1905 		    cpumask_intersects(tl->mask(cpu), tl->mask(i)))
1906 			return false;
1907 	}
1908 
1909 	return true;
1910 }
1911 
1912 /*
1913  * Find the sched_domain_topology_level where all CPU capacities are visible
1914  * for all CPUs.
1915  */
1916 static struct sched_domain_topology_level
asym_cpu_capacity_level(const struct cpumask * cpu_map)1917 *asym_cpu_capacity_level(const struct cpumask *cpu_map)
1918 {
1919 	int i, j, asym_level = 0;
1920 	bool asym = false;
1921 	struct sched_domain_topology_level *tl, *asym_tl = NULL;
1922 	unsigned long cap;
1923 
1924 	/* Is there any asymmetry? */
1925 	cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
1926 
1927 	for_each_cpu(i, cpu_map) {
1928 		if (arch_scale_cpu_capacity(i) != cap) {
1929 			asym = true;
1930 			break;
1931 		}
1932 	}
1933 
1934 	if (!asym)
1935 		return NULL;
1936 
1937 	/*
1938 	 * Examine topology from all CPU's point of views to detect the lowest
1939 	 * sched_domain_topology_level where a highest capacity CPU is visible
1940 	 * to everyone.
1941 	 */
1942 	for_each_cpu(i, cpu_map) {
1943 		unsigned long max_capacity = arch_scale_cpu_capacity(i);
1944 		int tl_id = 0;
1945 
1946 		for_each_sd_topology(tl) {
1947 			if (tl_id < asym_level)
1948 				goto next_level;
1949 
1950 			for_each_cpu_and(j, tl->mask(i), cpu_map) {
1951 				unsigned long capacity;
1952 
1953 				capacity = arch_scale_cpu_capacity(j);
1954 
1955 				if (capacity <= max_capacity)
1956 					continue;
1957 
1958 				max_capacity = capacity;
1959 				asym_level = tl_id;
1960 				asym_tl = tl;
1961 			}
1962 next_level:
1963 			tl_id++;
1964 		}
1965 	}
1966 
1967 	return asym_tl;
1968 }
1969 
1970 
1971 /*
1972  * Build sched domains for a given set of CPUs and attach the sched domains
1973  * to the individual CPUs
1974  */
1975 static int
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)1976 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1977 {
1978 	enum s_alloc alloc_state = sa_none;
1979 	struct sched_domain *sd;
1980 	struct s_data d;
1981 	int i, ret = -ENOMEM;
1982 	struct sched_domain_topology_level *tl_asym;
1983 	bool has_asym = false;
1984 
1985 	if (WARN_ON(cpumask_empty(cpu_map)))
1986 		goto error;
1987 
1988 	alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1989 	if (alloc_state != sa_rootdomain)
1990 		goto error;
1991 
1992 	tl_asym = asym_cpu_capacity_level(cpu_map);
1993 
1994 	/* Set up domains for CPUs specified by the cpu_map: */
1995 	for_each_cpu(i, cpu_map) {
1996 		struct sched_domain_topology_level *tl;
1997 
1998 		sd = NULL;
1999 		for_each_sd_topology(tl) {
2000 			int dflags = 0;
2001 
2002 			if (tl == tl_asym) {
2003 				dflags |= SD_ASYM_CPUCAPACITY;
2004 				has_asym = true;
2005 			}
2006 
2007 			if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2008 				goto error;
2009 
2010 			sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
2011 
2012 			if (tl == sched_domain_topology)
2013 				*per_cpu_ptr(d.sd, i) = sd;
2014 			if (tl->flags & SDTL_OVERLAP)
2015 				sd->flags |= SD_OVERLAP;
2016 			if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2017 				break;
2018 		}
2019 	}
2020 
2021 	/* Build the groups for the domains */
2022 	for_each_cpu(i, cpu_map) {
2023 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2024 			sd->span_weight = cpumask_weight(sched_domain_span(sd));
2025 			if (sd->flags & SD_OVERLAP) {
2026 				if (build_overlap_sched_groups(sd, i))
2027 					goto error;
2028 			} else {
2029 				if (build_sched_groups(sd, i))
2030 					goto error;
2031 			}
2032 		}
2033 	}
2034 
2035 	/* Calculate CPU capacity for physical packages and nodes */
2036 	for (i = nr_cpumask_bits-1; i >= 0; i--) {
2037 		if (!cpumask_test_cpu(i, cpu_map))
2038 			continue;
2039 
2040 		for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2041 			claim_allocations(i, sd);
2042 			init_sched_groups_capacity(i, sd);
2043 		}
2044 	}
2045 
2046 	/* Attach the domains */
2047 	rcu_read_lock();
2048 	for_each_cpu(i, cpu_map) {
2049 		sd = *per_cpu_ptr(d.sd, i);
2050 		cpu_attach_domain(sd, d.rd, i);
2051 	}
2052 	rcu_read_unlock();
2053 
2054 	if (has_asym)
2055 		static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2056 
2057 	ret = 0;
2058 error:
2059 	__free_domain_allocs(&d, alloc_state, cpu_map);
2060 
2061 	return ret;
2062 }
2063 
2064 /* Current sched domains: */
2065 static cpumask_var_t			*doms_cur;
2066 
2067 /* Number of sched domains in 'doms_cur': */
2068 static int				ndoms_cur;
2069 
2070 /* Attribues of custom domains in 'doms_cur' */
2071 static struct sched_domain_attr		*dattr_cur;
2072 
2073 /*
2074  * Special case: If a kmalloc() of a doms_cur partition (array of
2075  * cpumask) fails, then fallback to a single sched domain,
2076  * as determined by the single cpumask fallback_doms.
2077  */
2078 static cpumask_var_t			fallback_doms;
2079 
2080 /*
2081  * arch_update_cpu_topology lets virtualized architectures update the
2082  * CPU core maps. It is supposed to return 1 if the topology changed
2083  * or 0 if it stayed the same.
2084  */
arch_update_cpu_topology(void)2085 int __weak arch_update_cpu_topology(void)
2086 {
2087 	return 0;
2088 }
2089 
alloc_sched_domains(unsigned int ndoms)2090 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2091 {
2092 	int i;
2093 	cpumask_var_t *doms;
2094 
2095 	doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2096 	if (!doms)
2097 		return NULL;
2098 	for (i = 0; i < ndoms; i++) {
2099 		if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2100 			free_sched_domains(doms, i);
2101 			return NULL;
2102 		}
2103 	}
2104 	return doms;
2105 }
2106 
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)2107 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2108 {
2109 	unsigned int i;
2110 	for (i = 0; i < ndoms; i++)
2111 		free_cpumask_var(doms[i]);
2112 	kfree(doms);
2113 }
2114 
2115 /*
2116  * Set up scheduler domains and groups.  For now this just excludes isolated
2117  * CPUs, but could be used to exclude other special cases in the future.
2118  */
sched_init_domains(const struct cpumask * cpu_map)2119 int sched_init_domains(const struct cpumask *cpu_map)
2120 {
2121 	int err;
2122 
2123 	zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2124 	zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2125 	zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2126 
2127 	arch_update_cpu_topology();
2128 	ndoms_cur = 1;
2129 	doms_cur = alloc_sched_domains(ndoms_cur);
2130 	if (!doms_cur)
2131 		doms_cur = &fallback_doms;
2132 	cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
2133 	err = build_sched_domains(doms_cur[0], NULL);
2134 	register_sched_domain_sysctl();
2135 
2136 	return err;
2137 }
2138 
2139 /*
2140  * Detach sched domains from a group of CPUs specified in cpu_map
2141  * These CPUs will now be attached to the NULL domain
2142  */
detach_destroy_domains(const struct cpumask * cpu_map)2143 static void detach_destroy_domains(const struct cpumask *cpu_map)
2144 {
2145 	unsigned int cpu = cpumask_any(cpu_map);
2146 	int i;
2147 
2148 	if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2149 		static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2150 
2151 	rcu_read_lock();
2152 	for_each_cpu(i, cpu_map)
2153 		cpu_attach_domain(NULL, &def_root_domain, i);
2154 	rcu_read_unlock();
2155 }
2156 
2157 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)2158 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2159 			struct sched_domain_attr *new, int idx_new)
2160 {
2161 	struct sched_domain_attr tmp;
2162 
2163 	/* Fast path: */
2164 	if (!new && !cur)
2165 		return 1;
2166 
2167 	tmp = SD_ATTR_INIT;
2168 
2169 	return !memcmp(cur ? (cur + idx_cur) : &tmp,
2170 			new ? (new + idx_new) : &tmp,
2171 			sizeof(struct sched_domain_attr));
2172 }
2173 
2174 /*
2175  * Partition sched domains as specified by the 'ndoms_new'
2176  * cpumasks in the array doms_new[] of cpumasks. This compares
2177  * doms_new[] to the current sched domain partitioning, doms_cur[].
2178  * It destroys each deleted domain and builds each new domain.
2179  *
2180  * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2181  * The masks don't intersect (don't overlap.) We should setup one
2182  * sched domain for each mask. CPUs not in any of the cpumasks will
2183  * not be load balanced. If the same cpumask appears both in the
2184  * current 'doms_cur' domains and in the new 'doms_new', we can leave
2185  * it as it is.
2186  *
2187  * The passed in 'doms_new' should be allocated using
2188  * alloc_sched_domains.  This routine takes ownership of it and will
2189  * free_sched_domains it when done with it. If the caller failed the
2190  * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2191  * and partition_sched_domains() will fallback to the single partition
2192  * 'fallback_doms', it also forces the domains to be rebuilt.
2193  *
2194  * If doms_new == NULL it will be replaced with cpu_online_mask.
2195  * ndoms_new == 0 is a special case for destroying existing domains,
2196  * and it will not create the default domain.
2197  *
2198  * Call with hotplug lock and sched_domains_mutex held
2199  */
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2200 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2201 				    struct sched_domain_attr *dattr_new)
2202 {
2203 	bool __maybe_unused has_eas = false;
2204 	int i, j, n;
2205 	int new_topology;
2206 
2207 	lockdep_assert_held(&sched_domains_mutex);
2208 
2209 	/* Always unregister in case we don't destroy any domains: */
2210 	unregister_sched_domain_sysctl();
2211 
2212 	/* Let the architecture update CPU core mappings: */
2213 	new_topology = arch_update_cpu_topology();
2214 
2215 	if (!doms_new) {
2216 		WARN_ON_ONCE(dattr_new);
2217 		n = 0;
2218 		doms_new = alloc_sched_domains(1);
2219 		if (doms_new) {
2220 			n = 1;
2221 			cpumask_and(doms_new[0], cpu_active_mask,
2222 				    housekeeping_cpumask(HK_FLAG_DOMAIN));
2223 		}
2224 	} else {
2225 		n = ndoms_new;
2226 	}
2227 
2228 	/* Destroy deleted domains: */
2229 	for (i = 0; i < ndoms_cur; i++) {
2230 		for (j = 0; j < n && !new_topology; j++) {
2231 			if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2232 			    dattrs_equal(dattr_cur, i, dattr_new, j)) {
2233 				struct root_domain *rd;
2234 
2235 				/*
2236 				 * This domain won't be destroyed and as such
2237 				 * its dl_bw->total_bw needs to be cleared.  It
2238 				 * will be recomputed in function
2239 				 * update_tasks_root_domain().
2240 				 */
2241 				rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2242 				dl_clear_root_domain(rd);
2243 				goto match1;
2244 			}
2245 		}
2246 		/* No match - a current sched domain not in new doms_new[] */
2247 		detach_destroy_domains(doms_cur[i]);
2248 match1:
2249 		;
2250 	}
2251 
2252 	n = ndoms_cur;
2253 	if (!doms_new) {
2254 		n = 0;
2255 		doms_new = &fallback_doms;
2256 		cpumask_and(doms_new[0], cpu_active_mask,
2257 			    housekeeping_cpumask(HK_FLAG_DOMAIN));
2258 	}
2259 
2260 	/* Build new domains: */
2261 	for (i = 0; i < ndoms_new; i++) {
2262 		for (j = 0; j < n && !new_topology; j++) {
2263 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2264 			    dattrs_equal(dattr_new, i, dattr_cur, j))
2265 				goto match2;
2266 		}
2267 		/* No match - add a new doms_new */
2268 		build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2269 match2:
2270 		;
2271 	}
2272 
2273 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2274 	/* Build perf. domains: */
2275 	for (i = 0; i < ndoms_new; i++) {
2276 		for (j = 0; j < n && !sched_energy_update; j++) {
2277 			if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2278 			    cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2279 				has_eas = true;
2280 				goto match3;
2281 			}
2282 		}
2283 		/* No match - add perf. domains for a new rd */
2284 		has_eas |= build_perf_domains(doms_new[i]);
2285 match3:
2286 		;
2287 	}
2288 	sched_energy_set(has_eas);
2289 #endif
2290 
2291 	/* Remember the new sched domains: */
2292 	if (doms_cur != &fallback_doms)
2293 		free_sched_domains(doms_cur, ndoms_cur);
2294 
2295 	kfree(dattr_cur);
2296 	doms_cur = doms_new;
2297 	dattr_cur = dattr_new;
2298 	ndoms_cur = ndoms_new;
2299 
2300 	register_sched_domain_sysctl();
2301 }
2302 
2303 /*
2304  * Call with hotplug lock held
2305  */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2306 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2307 			     struct sched_domain_attr *dattr_new)
2308 {
2309 	mutex_lock(&sched_domains_mutex);
2310 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2311 	mutex_unlock(&sched_domains_mutex);
2312 }
2313