1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Scheduler topology setup/handling methods
4 */
5 #include "sched.h"
6
7 #include <trace/hooks/sched.h>
8
9 DEFINE_MUTEX(sched_domains_mutex);
10 #ifdef CONFIG_LOCKDEP
11 EXPORT_SYMBOL_GPL(sched_domains_mutex);
12 #endif
13
14 /* Protected by sched_domains_mutex: */
15 static cpumask_var_t sched_domains_tmpmask;
16 static cpumask_var_t sched_domains_tmpmask2;
17
18 #ifdef CONFIG_SCHED_DEBUG
19
sched_debug_setup(char * str)20 static int __init sched_debug_setup(char *str)
21 {
22 sched_debug_enabled = true;
23
24 return 0;
25 }
26 early_param("sched_debug", sched_debug_setup);
27
sched_debug(void)28 static inline bool sched_debug(void)
29 {
30 return sched_debug_enabled;
31 }
32
33 #define SD_FLAG(_name, mflags) [__##_name] = { .meta_flags = mflags, .name = #_name },
34 const struct sd_flag_debug sd_flag_debug[] = {
35 #include <linux/sched/sd_flags.h>
36 };
37 #undef SD_FLAG
38
sched_domain_debug_one(struct sched_domain * sd,int cpu,int level,struct cpumask * groupmask)39 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
40 struct cpumask *groupmask)
41 {
42 struct sched_group *group = sd->groups;
43 unsigned long flags = sd->flags;
44 unsigned int idx;
45
46 cpumask_clear(groupmask);
47
48 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
49 printk(KERN_CONT "span=%*pbl level=%s\n",
50 cpumask_pr_args(sched_domain_span(sd)), sd->name);
51
52 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
53 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
54 }
55 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
56 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
57 }
58
59 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
60 unsigned int flag = BIT(idx);
61 unsigned int meta_flags = sd_flag_debug[idx].meta_flags;
62
63 if ((meta_flags & SDF_SHARED_CHILD) && sd->child &&
64 !(sd->child->flags & flag))
65 printk(KERN_ERR "ERROR: flag %s set here but not in child\n",
66 sd_flag_debug[idx].name);
67
68 if ((meta_flags & SDF_SHARED_PARENT) && sd->parent &&
69 !(sd->parent->flags & flag))
70 printk(KERN_ERR "ERROR: flag %s set here but not in parent\n",
71 sd_flag_debug[idx].name);
72 }
73
74 printk(KERN_DEBUG "%*s groups:", level + 1, "");
75 do {
76 if (!group) {
77 printk("\n");
78 printk(KERN_ERR "ERROR: group is NULL\n");
79 break;
80 }
81
82 if (!cpumask_weight(sched_group_span(group))) {
83 printk(KERN_CONT "\n");
84 printk(KERN_ERR "ERROR: empty group\n");
85 break;
86 }
87
88 if (!(sd->flags & SD_OVERLAP) &&
89 cpumask_intersects(groupmask, sched_group_span(group))) {
90 printk(KERN_CONT "\n");
91 printk(KERN_ERR "ERROR: repeated CPUs\n");
92 break;
93 }
94
95 cpumask_or(groupmask, groupmask, sched_group_span(group));
96
97 printk(KERN_CONT " %d:{ span=%*pbl",
98 group->sgc->id,
99 cpumask_pr_args(sched_group_span(group)));
100
101 if ((sd->flags & SD_OVERLAP) &&
102 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
103 printk(KERN_CONT " mask=%*pbl",
104 cpumask_pr_args(group_balance_mask(group)));
105 }
106
107 if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
108 printk(KERN_CONT " cap=%lu", group->sgc->capacity);
109
110 if (group == sd->groups && sd->child &&
111 !cpumask_equal(sched_domain_span(sd->child),
112 sched_group_span(group))) {
113 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
114 }
115
116 printk(KERN_CONT " }");
117
118 group = group->next;
119
120 if (group != sd->groups)
121 printk(KERN_CONT ",");
122
123 } while (group != sd->groups);
124 printk(KERN_CONT "\n");
125
126 if (!cpumask_equal(sched_domain_span(sd), groupmask))
127 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
128
129 if (sd->parent &&
130 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
131 printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
132 return 0;
133 }
134
sched_domain_debug(struct sched_domain * sd,int cpu)135 static void sched_domain_debug(struct sched_domain *sd, int cpu)
136 {
137 int level = 0;
138
139 if (!sched_debug_enabled)
140 return;
141
142 if (!sd) {
143 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
144 return;
145 }
146
147 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
148
149 for (;;) {
150 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
151 break;
152 level++;
153 sd = sd->parent;
154 if (!sd)
155 break;
156 }
157 }
158 #else /* !CONFIG_SCHED_DEBUG */
159
160 # define sched_debug_enabled 0
161 # define sched_domain_debug(sd, cpu) do { } while (0)
sched_debug(void)162 static inline bool sched_debug(void)
163 {
164 return false;
165 }
166 #endif /* CONFIG_SCHED_DEBUG */
167
168 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
169 #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_NEEDS_GROUPS)) |
170 static const unsigned int SD_DEGENERATE_GROUPS_MASK =
171 #include <linux/sched/sd_flags.h>
172 0;
173 #undef SD_FLAG
174
sd_degenerate(struct sched_domain * sd)175 static int sd_degenerate(struct sched_domain *sd)
176 {
177 if (cpumask_weight(sched_domain_span(sd)) == 1)
178 return 1;
179
180 /* Following flags need at least 2 groups */
181 if ((sd->flags & SD_DEGENERATE_GROUPS_MASK) &&
182 (sd->groups != sd->groups->next))
183 return 0;
184
185 /* Following flags don't use groups */
186 if (sd->flags & (SD_WAKE_AFFINE))
187 return 0;
188
189 return 1;
190 }
191
192 static int
sd_parent_degenerate(struct sched_domain * sd,struct sched_domain * parent)193 sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
194 {
195 unsigned long cflags = sd->flags, pflags = parent->flags;
196
197 if (sd_degenerate(parent))
198 return 1;
199
200 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
201 return 0;
202
203 /* Flags needing groups don't count if only 1 group in parent */
204 if (parent->groups == parent->groups->next)
205 pflags &= ~SD_DEGENERATE_GROUPS_MASK;
206
207 if (~cflags & pflags)
208 return 0;
209
210 return 1;
211 }
212
213 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
214 DEFINE_STATIC_KEY_FALSE(sched_energy_present);
215 unsigned int sysctl_sched_energy_aware = 1;
216 DEFINE_MUTEX(sched_energy_mutex);
217 bool sched_energy_update;
218
219 #ifdef CONFIG_PROC_SYSCTL
sched_energy_aware_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)220 int sched_energy_aware_handler(struct ctl_table *table, int write,
221 void *buffer, size_t *lenp, loff_t *ppos)
222 {
223 int ret, state;
224
225 if (write && !capable(CAP_SYS_ADMIN))
226 return -EPERM;
227
228 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
229 if (!ret && write) {
230 state = static_branch_unlikely(&sched_energy_present);
231 if (state != sysctl_sched_energy_aware) {
232 mutex_lock(&sched_energy_mutex);
233 sched_energy_update = 1;
234 rebuild_sched_domains();
235 sched_energy_update = 0;
236 mutex_unlock(&sched_energy_mutex);
237 }
238 }
239
240 return ret;
241 }
242 #endif
243
free_pd(struct perf_domain * pd)244 static void free_pd(struct perf_domain *pd)
245 {
246 struct perf_domain *tmp;
247
248 while (pd) {
249 tmp = pd->next;
250 kfree(pd);
251 pd = tmp;
252 }
253 }
254
find_pd(struct perf_domain * pd,int cpu)255 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
256 {
257 while (pd) {
258 if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
259 return pd;
260 pd = pd->next;
261 }
262
263 return NULL;
264 }
265
pd_init(int cpu)266 static struct perf_domain *pd_init(int cpu)
267 {
268 struct em_perf_domain *obj = em_cpu_get(cpu);
269 struct perf_domain *pd;
270
271 if (!obj) {
272 if (sched_debug())
273 pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
274 return NULL;
275 }
276
277 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
278 if (!pd)
279 return NULL;
280 pd->em_pd = obj;
281
282 return pd;
283 }
284
perf_domain_debug(const struct cpumask * cpu_map,struct perf_domain * pd)285 static void perf_domain_debug(const struct cpumask *cpu_map,
286 struct perf_domain *pd)
287 {
288 if (!sched_debug() || !pd)
289 return;
290
291 printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
292
293 while (pd) {
294 printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_pstate=%d }",
295 cpumask_first(perf_domain_span(pd)),
296 cpumask_pr_args(perf_domain_span(pd)),
297 em_pd_nr_perf_states(pd->em_pd));
298 pd = pd->next;
299 }
300
301 printk(KERN_CONT "\n");
302 }
303
destroy_perf_domain_rcu(struct rcu_head * rp)304 static void destroy_perf_domain_rcu(struct rcu_head *rp)
305 {
306 struct perf_domain *pd;
307
308 pd = container_of(rp, struct perf_domain, rcu);
309 free_pd(pd);
310 }
311
sched_energy_set(bool has_eas)312 static void sched_energy_set(bool has_eas)
313 {
314 if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
315 if (sched_debug())
316 pr_info("%s: stopping EAS\n", __func__);
317 static_branch_disable_cpuslocked(&sched_energy_present);
318 } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
319 if (sched_debug())
320 pr_info("%s: starting EAS\n", __func__);
321 static_branch_enable_cpuslocked(&sched_energy_present);
322 }
323 }
324
325 /*
326 * EAS can be used on a root domain if it meets all the following conditions:
327 * 1. an Energy Model (EM) is available;
328 * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
329 * 3. no SMT is detected.
330 * 4. the EM complexity is low enough to keep scheduling overheads low;
331 *
332 * The complexity of the Energy Model is defined as:
333 *
334 * C = nr_pd * (nr_cpus + nr_ps)
335 *
336 * with parameters defined as:
337 * - nr_pd: the number of performance domains
338 * - nr_cpus: the number of CPUs
339 * - nr_ps: the sum of the number of performance states of all performance
340 * domains (for example, on a system with 2 performance domains,
341 * with 10 performance states each, nr_ps = 2 * 10 = 20).
342 *
343 * It is generally not a good idea to use such a model in the wake-up path on
344 * very complex platforms because of the associated scheduling overheads. The
345 * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
346 * with per-CPU DVFS and less than 8 performance states each, for example.
347 */
348 #define EM_MAX_COMPLEXITY 2048
349
build_perf_domains(const struct cpumask * cpu_map)350 static bool build_perf_domains(const struct cpumask *cpu_map)
351 {
352 int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
353 struct perf_domain *pd = NULL, *tmp;
354 int cpu = cpumask_first(cpu_map);
355 struct root_domain *rd = cpu_rq(cpu)->rd;
356 bool eas_check = false;
357
358 if (!sysctl_sched_energy_aware)
359 goto free;
360
361 /*
362 * EAS is enabled for asymmetric CPU capacity topologies.
363 * Allow vendor to override if desired.
364 */
365 trace_android_rvh_build_perf_domains(&eas_check);
366 if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) {
367 if (sched_debug()) {
368 pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
369 cpumask_pr_args(cpu_map));
370 }
371 goto free;
372 }
373
374 /* EAS definitely does *not* handle SMT */
375 if (sched_smt_active()) {
376 pr_warn("rd %*pbl: Disabling EAS, SMT is not supported\n",
377 cpumask_pr_args(cpu_map));
378 goto free;
379 }
380
381 for_each_cpu(i, cpu_map) {
382 /* Skip already covered CPUs. */
383 if (find_pd(pd, i))
384 continue;
385
386 /* Create the new pd and add it to the local list. */
387 tmp = pd_init(i);
388 if (!tmp)
389 goto free;
390 tmp->next = pd;
391 pd = tmp;
392
393 /*
394 * Count performance domains and performance states for the
395 * complexity check.
396 */
397 nr_pd++;
398 nr_ps += em_pd_nr_perf_states(pd->em_pd);
399 }
400
401 /* Bail out if the Energy Model complexity is too high. */
402 if (nr_pd * (nr_ps + nr_cpus) > EM_MAX_COMPLEXITY) {
403 WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
404 cpumask_pr_args(cpu_map));
405 goto free;
406 }
407
408 perf_domain_debug(cpu_map, pd);
409
410 /* Attach the new list of performance domains to the root domain. */
411 tmp = rd->pd;
412 rcu_assign_pointer(rd->pd, pd);
413 if (tmp)
414 call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
415
416 return !!pd;
417
418 free:
419 free_pd(pd);
420 tmp = rd->pd;
421 rcu_assign_pointer(rd->pd, NULL);
422 if (tmp)
423 call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
424
425 return false;
426 }
427 #else
free_pd(struct perf_domain * pd)428 static void free_pd(struct perf_domain *pd) { }
429 #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
430
free_rootdomain(struct rcu_head * rcu)431 static void free_rootdomain(struct rcu_head *rcu)
432 {
433 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
434
435 cpupri_cleanup(&rd->cpupri);
436 cpudl_cleanup(&rd->cpudl);
437 free_cpumask_var(rd->dlo_mask);
438 free_cpumask_var(rd->rto_mask);
439 free_cpumask_var(rd->online);
440 free_cpumask_var(rd->span);
441 free_pd(rd->pd);
442 kfree(rd);
443 }
444
rq_attach_root(struct rq * rq,struct root_domain * rd)445 void rq_attach_root(struct rq *rq, struct root_domain *rd)
446 {
447 struct root_domain *old_rd = NULL;
448 unsigned long flags;
449
450 raw_spin_lock_irqsave(&rq->lock, flags);
451
452 if (rq->rd) {
453 old_rd = rq->rd;
454
455 if (cpumask_test_cpu(rq->cpu, old_rd->online))
456 set_rq_offline(rq);
457
458 cpumask_clear_cpu(rq->cpu, old_rd->span);
459
460 /*
461 * If we dont want to free the old_rd yet then
462 * set old_rd to NULL to skip the freeing later
463 * in this function:
464 */
465 if (!atomic_dec_and_test(&old_rd->refcount))
466 old_rd = NULL;
467 }
468
469 atomic_inc(&rd->refcount);
470 rq->rd = rd;
471
472 cpumask_set_cpu(rq->cpu, rd->span);
473 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
474 set_rq_online(rq);
475
476 raw_spin_unlock_irqrestore(&rq->lock, flags);
477
478 if (old_rd)
479 call_rcu(&old_rd->rcu, free_rootdomain);
480 }
481
sched_get_rd(struct root_domain * rd)482 void sched_get_rd(struct root_domain *rd)
483 {
484 atomic_inc(&rd->refcount);
485 }
486
sched_put_rd(struct root_domain * rd)487 void sched_put_rd(struct root_domain *rd)
488 {
489 if (!atomic_dec_and_test(&rd->refcount))
490 return;
491
492 call_rcu(&rd->rcu, free_rootdomain);
493 }
494
init_rootdomain(struct root_domain * rd)495 static int init_rootdomain(struct root_domain *rd)
496 {
497 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
498 goto out;
499 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
500 goto free_span;
501 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
502 goto free_online;
503 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
504 goto free_dlo_mask;
505
506 #ifdef HAVE_RT_PUSH_IPI
507 rd->rto_cpu = -1;
508 raw_spin_lock_init(&rd->rto_lock);
509 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
510 #endif
511
512 init_dl_bw(&rd->dl_bw);
513 if (cpudl_init(&rd->cpudl) != 0)
514 goto free_rto_mask;
515
516 if (cpupri_init(&rd->cpupri) != 0)
517 goto free_cpudl;
518 return 0;
519
520 free_cpudl:
521 cpudl_cleanup(&rd->cpudl);
522 free_rto_mask:
523 free_cpumask_var(rd->rto_mask);
524 free_dlo_mask:
525 free_cpumask_var(rd->dlo_mask);
526 free_online:
527 free_cpumask_var(rd->online);
528 free_span:
529 free_cpumask_var(rd->span);
530 out:
531 return -ENOMEM;
532 }
533
534 /*
535 * By default the system creates a single root-domain with all CPUs as
536 * members (mimicking the global state we have today).
537 */
538 struct root_domain def_root_domain;
539
init_defrootdomain(void)540 void init_defrootdomain(void)
541 {
542 init_rootdomain(&def_root_domain);
543
544 atomic_set(&def_root_domain.refcount, 1);
545 }
546
alloc_rootdomain(void)547 static struct root_domain *alloc_rootdomain(void)
548 {
549 struct root_domain *rd;
550
551 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
552 if (!rd)
553 return NULL;
554
555 if (init_rootdomain(rd) != 0) {
556 kfree(rd);
557 return NULL;
558 }
559
560 return rd;
561 }
562
free_sched_groups(struct sched_group * sg,int free_sgc)563 static void free_sched_groups(struct sched_group *sg, int free_sgc)
564 {
565 struct sched_group *tmp, *first;
566
567 if (!sg)
568 return;
569
570 first = sg;
571 do {
572 tmp = sg->next;
573
574 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
575 kfree(sg->sgc);
576
577 if (atomic_dec_and_test(&sg->ref))
578 kfree(sg);
579 sg = tmp;
580 } while (sg != first);
581 }
582
destroy_sched_domain(struct sched_domain * sd)583 static void destroy_sched_domain(struct sched_domain *sd)
584 {
585 /*
586 * A normal sched domain may have multiple group references, an
587 * overlapping domain, having private groups, only one. Iterate,
588 * dropping group/capacity references, freeing where none remain.
589 */
590 free_sched_groups(sd->groups, 1);
591
592 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
593 kfree(sd->shared);
594 kfree(sd);
595 }
596
destroy_sched_domains_rcu(struct rcu_head * rcu)597 static void destroy_sched_domains_rcu(struct rcu_head *rcu)
598 {
599 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
600
601 while (sd) {
602 struct sched_domain *parent = sd->parent;
603 destroy_sched_domain(sd);
604 sd = parent;
605 }
606 }
607
destroy_sched_domains(struct sched_domain * sd)608 static void destroy_sched_domains(struct sched_domain *sd)
609 {
610 if (sd)
611 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
612 }
613
614 /*
615 * Keep a special pointer to the highest sched_domain that has
616 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
617 * allows us to avoid some pointer chasing select_idle_sibling().
618 *
619 * Also keep a unique ID per domain (we use the first CPU number in
620 * the cpumask of the domain), this allows us to quickly tell if
621 * two CPUs are in the same cache domain, see cpus_share_cache().
622 */
623 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
624 DEFINE_PER_CPU(int, sd_llc_size);
625 DEFINE_PER_CPU(int, sd_llc_id);
626 DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
627 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
628 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
629 DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
630 DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
631
update_top_cache_domain(int cpu)632 static void update_top_cache_domain(int cpu)
633 {
634 struct sched_domain_shared *sds = NULL;
635 struct sched_domain *sd;
636 int id = cpu;
637 int size = 1;
638
639 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
640 if (sd) {
641 id = cpumask_first(sched_domain_span(sd));
642 size = cpumask_weight(sched_domain_span(sd));
643 sds = sd->shared;
644 }
645
646 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
647 per_cpu(sd_llc_size, cpu) = size;
648 per_cpu(sd_llc_id, cpu) = id;
649 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
650
651 sd = lowest_flag_domain(cpu, SD_NUMA);
652 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
653
654 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
655 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
656
657 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
658 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
659 }
660
661 /*
662 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
663 * hold the hotplug lock.
664 */
665 static void
cpu_attach_domain(struct sched_domain * sd,struct root_domain * rd,int cpu)666 cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
667 {
668 struct rq *rq = cpu_rq(cpu);
669 struct sched_domain *tmp;
670
671 /* Remove the sched domains which do not contribute to scheduling. */
672 for (tmp = sd; tmp; ) {
673 struct sched_domain *parent = tmp->parent;
674 if (!parent)
675 break;
676
677 if (sd_parent_degenerate(tmp, parent)) {
678 tmp->parent = parent->parent;
679 if (parent->parent)
680 parent->parent->child = tmp;
681 /*
682 * Transfer SD_PREFER_SIBLING down in case of a
683 * degenerate parent; the spans match for this
684 * so the property transfers.
685 */
686 if (parent->flags & SD_PREFER_SIBLING)
687 tmp->flags |= SD_PREFER_SIBLING;
688 destroy_sched_domain(parent);
689 } else
690 tmp = tmp->parent;
691 }
692
693 if (sd && sd_degenerate(sd)) {
694 tmp = sd;
695 sd = sd->parent;
696 destroy_sched_domain(tmp);
697 if (sd)
698 sd->child = NULL;
699 }
700
701 sched_domain_debug(sd, cpu);
702
703 rq_attach_root(rq, rd);
704 tmp = rq->sd;
705 rcu_assign_pointer(rq->sd, sd);
706 dirty_sched_domain_sysctl(cpu);
707 destroy_sched_domains(tmp);
708
709 update_top_cache_domain(cpu);
710 }
711
712 struct s_data {
713 struct sched_domain * __percpu *sd;
714 struct root_domain *rd;
715 };
716
717 enum s_alloc {
718 sa_rootdomain,
719 sa_sd,
720 sa_sd_storage,
721 sa_none,
722 };
723
724 /*
725 * Return the canonical balance CPU for this group, this is the first CPU
726 * of this group that's also in the balance mask.
727 *
728 * The balance mask are all those CPUs that could actually end up at this
729 * group. See build_balance_mask().
730 *
731 * Also see should_we_balance().
732 */
group_balance_cpu(struct sched_group * sg)733 int group_balance_cpu(struct sched_group *sg)
734 {
735 return cpumask_first(group_balance_mask(sg));
736 }
737
738
739 /*
740 * NUMA topology (first read the regular topology blurb below)
741 *
742 * Given a node-distance table, for example:
743 *
744 * node 0 1 2 3
745 * 0: 10 20 30 20
746 * 1: 20 10 20 30
747 * 2: 30 20 10 20
748 * 3: 20 30 20 10
749 *
750 * which represents a 4 node ring topology like:
751 *
752 * 0 ----- 1
753 * | |
754 * | |
755 * | |
756 * 3 ----- 2
757 *
758 * We want to construct domains and groups to represent this. The way we go
759 * about doing this is to build the domains on 'hops'. For each NUMA level we
760 * construct the mask of all nodes reachable in @level hops.
761 *
762 * For the above NUMA topology that gives 3 levels:
763 *
764 * NUMA-2 0-3 0-3 0-3 0-3
765 * groups: {0-1,3},{1-3} {0-2},{0,2-3} {1-3},{0-1,3} {0,2-3},{0-2}
766 *
767 * NUMA-1 0-1,3 0-2 1-3 0,2-3
768 * groups: {0},{1},{3} {0},{1},{2} {1},{2},{3} {0},{2},{3}
769 *
770 * NUMA-0 0 1 2 3
771 *
772 *
773 * As can be seen; things don't nicely line up as with the regular topology.
774 * When we iterate a domain in child domain chunks some nodes can be
775 * represented multiple times -- hence the "overlap" naming for this part of
776 * the topology.
777 *
778 * In order to minimize this overlap, we only build enough groups to cover the
779 * domain. For instance Node-0 NUMA-2 would only get groups: 0-1,3 and 1-3.
780 *
781 * Because:
782 *
783 * - the first group of each domain is its child domain; this
784 * gets us the first 0-1,3
785 * - the only uncovered node is 2, who's child domain is 1-3.
786 *
787 * However, because of the overlap, computing a unique CPU for each group is
788 * more complicated. Consider for instance the groups of NODE-1 NUMA-2, both
789 * groups include the CPUs of Node-0, while those CPUs would not in fact ever
790 * end up at those groups (they would end up in group: 0-1,3).
791 *
792 * To correct this we have to introduce the group balance mask. This mask
793 * will contain those CPUs in the group that can reach this group given the
794 * (child) domain tree.
795 *
796 * With this we can once again compute balance_cpu and sched_group_capacity
797 * relations.
798 *
799 * XXX include words on how balance_cpu is unique and therefore can be
800 * used for sched_group_capacity links.
801 *
802 *
803 * Another 'interesting' topology is:
804 *
805 * node 0 1 2 3
806 * 0: 10 20 20 30
807 * 1: 20 10 20 20
808 * 2: 20 20 10 20
809 * 3: 30 20 20 10
810 *
811 * Which looks a little like:
812 *
813 * 0 ----- 1
814 * | / |
815 * | / |
816 * | / |
817 * 2 ----- 3
818 *
819 * This topology is asymmetric, nodes 1,2 are fully connected, but nodes 0,3
820 * are not.
821 *
822 * This leads to a few particularly weird cases where the sched_domain's are
823 * not of the same number for each CPU. Consider:
824 *
825 * NUMA-2 0-3 0-3
826 * groups: {0-2},{1-3} {1-3},{0-2}
827 *
828 * NUMA-1 0-2 0-3 0-3 1-3
829 *
830 * NUMA-0 0 1 2 3
831 *
832 */
833
834
835 /*
836 * Build the balance mask; it contains only those CPUs that can arrive at this
837 * group and should be considered to continue balancing.
838 *
839 * We do this during the group creation pass, therefore the group information
840 * isn't complete yet, however since each group represents a (child) domain we
841 * can fully construct this using the sched_domain bits (which are already
842 * complete).
843 */
844 static void
build_balance_mask(struct sched_domain * sd,struct sched_group * sg,struct cpumask * mask)845 build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
846 {
847 const struct cpumask *sg_span = sched_group_span(sg);
848 struct sd_data *sdd = sd->private;
849 struct sched_domain *sibling;
850 int i;
851
852 cpumask_clear(mask);
853
854 for_each_cpu(i, sg_span) {
855 sibling = *per_cpu_ptr(sdd->sd, i);
856
857 /*
858 * Can happen in the asymmetric case, where these siblings are
859 * unused. The mask will not be empty because those CPUs that
860 * do have the top domain _should_ span the domain.
861 */
862 if (!sibling->child)
863 continue;
864
865 /* If we would not end up here, we can't continue from here */
866 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
867 continue;
868
869 cpumask_set_cpu(i, mask);
870 }
871
872 /* We must not have empty masks here */
873 WARN_ON_ONCE(cpumask_empty(mask));
874 }
875
876 /*
877 * XXX: This creates per-node group entries; since the load-balancer will
878 * immediately access remote memory to construct this group's load-balance
879 * statistics having the groups node local is of dubious benefit.
880 */
881 static struct sched_group *
build_group_from_child_sched_domain(struct sched_domain * sd,int cpu)882 build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
883 {
884 struct sched_group *sg;
885 struct cpumask *sg_span;
886
887 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
888 GFP_KERNEL, cpu_to_node(cpu));
889
890 if (!sg)
891 return NULL;
892
893 sg_span = sched_group_span(sg);
894 if (sd->child)
895 cpumask_copy(sg_span, sched_domain_span(sd->child));
896 else
897 cpumask_copy(sg_span, sched_domain_span(sd));
898
899 atomic_inc(&sg->ref);
900 return sg;
901 }
902
init_overlap_sched_group(struct sched_domain * sd,struct sched_group * sg)903 static void init_overlap_sched_group(struct sched_domain *sd,
904 struct sched_group *sg)
905 {
906 struct cpumask *mask = sched_domains_tmpmask2;
907 struct sd_data *sdd = sd->private;
908 struct cpumask *sg_span;
909 int cpu;
910
911 build_balance_mask(sd, sg, mask);
912 cpu = cpumask_first_and(sched_group_span(sg), mask);
913
914 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
915 if (atomic_inc_return(&sg->sgc->ref) == 1)
916 cpumask_copy(group_balance_mask(sg), mask);
917 else
918 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
919
920 /*
921 * Initialize sgc->capacity such that even if we mess up the
922 * domains and no possible iteration will get us here, we won't
923 * die on a /0 trap.
924 */
925 sg_span = sched_group_span(sg);
926 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
927 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
928 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
929 }
930
931 static int
build_overlap_sched_groups(struct sched_domain * sd,int cpu)932 build_overlap_sched_groups(struct sched_domain *sd, int cpu)
933 {
934 struct sched_group *first = NULL, *last = NULL, *sg;
935 const struct cpumask *span = sched_domain_span(sd);
936 struct cpumask *covered = sched_domains_tmpmask;
937 struct sd_data *sdd = sd->private;
938 struct sched_domain *sibling;
939 int i;
940
941 cpumask_clear(covered);
942
943 for_each_cpu_wrap(i, span, cpu) {
944 struct cpumask *sg_span;
945
946 if (cpumask_test_cpu(i, covered))
947 continue;
948
949 sibling = *per_cpu_ptr(sdd->sd, i);
950
951 /*
952 * Asymmetric node setups can result in situations where the
953 * domain tree is of unequal depth, make sure to skip domains
954 * that already cover the entire range.
955 *
956 * In that case build_sched_domains() will have terminated the
957 * iteration early and our sibling sd spans will be empty.
958 * Domains should always include the CPU they're built on, so
959 * check that.
960 */
961 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
962 continue;
963
964 sg = build_group_from_child_sched_domain(sibling, cpu);
965 if (!sg)
966 goto fail;
967
968 sg_span = sched_group_span(sg);
969 cpumask_or(covered, covered, sg_span);
970
971 init_overlap_sched_group(sd, sg);
972
973 if (!first)
974 first = sg;
975 if (last)
976 last->next = sg;
977 last = sg;
978 last->next = first;
979 }
980 sd->groups = first;
981
982 return 0;
983
984 fail:
985 free_sched_groups(first, 0);
986
987 return -ENOMEM;
988 }
989
990
991 /*
992 * Package topology (also see the load-balance blurb in fair.c)
993 *
994 * The scheduler builds a tree structure to represent a number of important
995 * topology features. By default (default_topology[]) these include:
996 *
997 * - Simultaneous multithreading (SMT)
998 * - Multi-Core Cache (MC)
999 * - Package (DIE)
1000 *
1001 * Where the last one more or less denotes everything up to a NUMA node.
1002 *
1003 * The tree consists of 3 primary data structures:
1004 *
1005 * sched_domain -> sched_group -> sched_group_capacity
1006 * ^ ^ ^ ^
1007 * `-' `-'
1008 *
1009 * The sched_domains are per-CPU and have a two way link (parent & child) and
1010 * denote the ever growing mask of CPUs belonging to that level of topology.
1011 *
1012 * Each sched_domain has a circular (double) linked list of sched_group's, each
1013 * denoting the domains of the level below (or individual CPUs in case of the
1014 * first domain level). The sched_group linked by a sched_domain includes the
1015 * CPU of that sched_domain [*].
1016 *
1017 * Take for instance a 2 threaded, 2 core, 2 cache cluster part:
1018 *
1019 * CPU 0 1 2 3 4 5 6 7
1020 *
1021 * DIE [ ]
1022 * MC [ ] [ ]
1023 * SMT [ ] [ ] [ ] [ ]
1024 *
1025 * - or -
1026 *
1027 * DIE 0-7 0-7 0-7 0-7 0-7 0-7 0-7 0-7
1028 * MC 0-3 0-3 0-3 0-3 4-7 4-7 4-7 4-7
1029 * SMT 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7
1030 *
1031 * CPU 0 1 2 3 4 5 6 7
1032 *
1033 * One way to think about it is: sched_domain moves you up and down among these
1034 * topology levels, while sched_group moves you sideways through it, at child
1035 * domain granularity.
1036 *
1037 * sched_group_capacity ensures each unique sched_group has shared storage.
1038 *
1039 * There are two related construction problems, both require a CPU that
1040 * uniquely identify each group (for a given domain):
1041 *
1042 * - The first is the balance_cpu (see should_we_balance() and the
1043 * load-balance blub in fair.c); for each group we only want 1 CPU to
1044 * continue balancing at a higher domain.
1045 *
1046 * - The second is the sched_group_capacity; we want all identical groups
1047 * to share a single sched_group_capacity.
1048 *
1049 * Since these topologies are exclusive by construction. That is, its
1050 * impossible for an SMT thread to belong to multiple cores, and cores to
1051 * be part of multiple caches. There is a very clear and unique location
1052 * for each CPU in the hierarchy.
1053 *
1054 * Therefore computing a unique CPU for each group is trivial (the iteration
1055 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1056 * group), we can simply pick the first CPU in each group.
1057 *
1058 *
1059 * [*] in other words, the first group of each domain is its child domain.
1060 */
1061
get_group(int cpu,struct sd_data * sdd)1062 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1063 {
1064 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1065 struct sched_domain *child = sd->child;
1066 struct sched_group *sg;
1067 bool already_visited;
1068
1069 if (child)
1070 cpu = cpumask_first(sched_domain_span(child));
1071
1072 sg = *per_cpu_ptr(sdd->sg, cpu);
1073 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1074
1075 /* Increase refcounts for claim_allocations: */
1076 already_visited = atomic_inc_return(&sg->ref) > 1;
1077 /* sgc visits should follow a similar trend as sg */
1078 WARN_ON(already_visited != (atomic_inc_return(&sg->sgc->ref) > 1));
1079
1080 /* If we have already visited that group, it's already initialized. */
1081 if (already_visited)
1082 return sg;
1083
1084 if (child) {
1085 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
1086 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
1087 } else {
1088 cpumask_set_cpu(cpu, sched_group_span(sg));
1089 cpumask_set_cpu(cpu, group_balance_mask(sg));
1090 }
1091
1092 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
1093 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
1094 sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
1095
1096 return sg;
1097 }
1098
1099 /*
1100 * build_sched_groups will build a circular linked list of the groups
1101 * covered by the given span, will set each group's ->cpumask correctly,
1102 * and will initialize their ->sgc.
1103 *
1104 * Assumes the sched_domain tree is fully constructed
1105 */
1106 static int
build_sched_groups(struct sched_domain * sd,int cpu)1107 build_sched_groups(struct sched_domain *sd, int cpu)
1108 {
1109 struct sched_group *first = NULL, *last = NULL;
1110 struct sd_data *sdd = sd->private;
1111 const struct cpumask *span = sched_domain_span(sd);
1112 struct cpumask *covered;
1113 int i;
1114
1115 lockdep_assert_held(&sched_domains_mutex);
1116 covered = sched_domains_tmpmask;
1117
1118 cpumask_clear(covered);
1119
1120 for_each_cpu_wrap(i, span, cpu) {
1121 struct sched_group *sg;
1122
1123 if (cpumask_test_cpu(i, covered))
1124 continue;
1125
1126 sg = get_group(i, sdd);
1127
1128 cpumask_or(covered, covered, sched_group_span(sg));
1129
1130 if (!first)
1131 first = sg;
1132 if (last)
1133 last->next = sg;
1134 last = sg;
1135 }
1136 last->next = first;
1137 sd->groups = first;
1138
1139 return 0;
1140 }
1141
1142 /*
1143 * Initialize sched groups cpu_capacity.
1144 *
1145 * cpu_capacity indicates the capacity of sched group, which is used while
1146 * distributing the load between different sched groups in a sched domain.
1147 * Typically cpu_capacity for all the groups in a sched domain will be same
1148 * unless there are asymmetries in the topology. If there are asymmetries,
1149 * group having more cpu_capacity will pickup more load compared to the
1150 * group having less cpu_capacity.
1151 */
init_sched_groups_capacity(int cpu,struct sched_domain * sd)1152 static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1153 {
1154 struct sched_group *sg = sd->groups;
1155
1156 WARN_ON(!sg);
1157
1158 do {
1159 int cpu, max_cpu = -1;
1160
1161 sg->group_weight = cpumask_weight(sched_group_span(sg));
1162
1163 if (!(sd->flags & SD_ASYM_PACKING))
1164 goto next;
1165
1166 for_each_cpu(cpu, sched_group_span(sg)) {
1167 if (max_cpu < 0)
1168 max_cpu = cpu;
1169 else if (sched_asym_prefer(cpu, max_cpu))
1170 max_cpu = cpu;
1171 }
1172 sg->asym_prefer_cpu = max_cpu;
1173
1174 next:
1175 sg = sg->next;
1176 } while (sg != sd->groups);
1177
1178 if (cpu != group_balance_cpu(sg))
1179 return;
1180
1181 update_group_capacity(sd, cpu);
1182 }
1183
1184 /*
1185 * Initializers for schedule domains
1186 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
1187 */
1188
1189 static int default_relax_domain_level = -1;
1190 int sched_domain_level_max;
1191
setup_relax_domain_level(char * str)1192 static int __init setup_relax_domain_level(char *str)
1193 {
1194 if (kstrtoint(str, 0, &default_relax_domain_level))
1195 pr_warn("Unable to set relax_domain_level\n");
1196
1197 return 1;
1198 }
1199 __setup("relax_domain_level=", setup_relax_domain_level);
1200
set_domain_attribute(struct sched_domain * sd,struct sched_domain_attr * attr)1201 static void set_domain_attribute(struct sched_domain *sd,
1202 struct sched_domain_attr *attr)
1203 {
1204 int request;
1205
1206 if (!attr || attr->relax_domain_level < 0) {
1207 if (default_relax_domain_level < 0)
1208 return;
1209 request = default_relax_domain_level;
1210 } else
1211 request = attr->relax_domain_level;
1212
1213 if (sd->level > request) {
1214 /* Turn off idle balance on this domain: */
1215 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1216 }
1217 }
1218
1219 static void __sdt_free(const struct cpumask *cpu_map);
1220 static int __sdt_alloc(const struct cpumask *cpu_map);
1221
__free_domain_allocs(struct s_data * d,enum s_alloc what,const struct cpumask * cpu_map)1222 static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
1223 const struct cpumask *cpu_map)
1224 {
1225 switch (what) {
1226 case sa_rootdomain:
1227 if (!atomic_read(&d->rd->refcount))
1228 free_rootdomain(&d->rd->rcu);
1229 fallthrough;
1230 case sa_sd:
1231 free_percpu(d->sd);
1232 fallthrough;
1233 case sa_sd_storage:
1234 __sdt_free(cpu_map);
1235 fallthrough;
1236 case sa_none:
1237 break;
1238 }
1239 }
1240
1241 static enum s_alloc
__visit_domain_allocation_hell(struct s_data * d,const struct cpumask * cpu_map)1242 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1243 {
1244 memset(d, 0, sizeof(*d));
1245
1246 if (__sdt_alloc(cpu_map))
1247 return sa_sd_storage;
1248 d->sd = alloc_percpu(struct sched_domain *);
1249 if (!d->sd)
1250 return sa_sd_storage;
1251 d->rd = alloc_rootdomain();
1252 if (!d->rd)
1253 return sa_sd;
1254
1255 return sa_rootdomain;
1256 }
1257
1258 /*
1259 * NULL the sd_data elements we've used to build the sched_domain and
1260 * sched_group structure so that the subsequent __free_domain_allocs()
1261 * will not free the data we're using.
1262 */
claim_allocations(int cpu,struct sched_domain * sd)1263 static void claim_allocations(int cpu, struct sched_domain *sd)
1264 {
1265 struct sd_data *sdd = sd->private;
1266
1267 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1268 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1269
1270 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1271 *per_cpu_ptr(sdd->sds, cpu) = NULL;
1272
1273 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1274 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1275
1276 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1277 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1278 }
1279
1280 #ifdef CONFIG_NUMA
1281 enum numa_topology_type sched_numa_topology_type;
1282
1283 static int sched_domains_numa_levels;
1284 static int sched_domains_curr_level;
1285
1286 int sched_max_numa_distance;
1287 static int *sched_domains_numa_distance;
1288 static struct cpumask ***sched_domains_numa_masks;
1289 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
1290 #endif
1291
1292 /*
1293 * SD_flags allowed in topology descriptions.
1294 *
1295 * These flags are purely descriptive of the topology and do not prescribe
1296 * behaviour. Behaviour is artificial and mapped in the below sd_init()
1297 * function:
1298 *
1299 * SD_SHARE_CPUCAPACITY - describes SMT topologies
1300 * SD_SHARE_PKG_RESOURCES - describes shared caches
1301 * SD_NUMA - describes NUMA topologies
1302 *
1303 * Odd one out, which beside describing the topology has a quirk also
1304 * prescribes the desired behaviour that goes along with it:
1305 *
1306 * SD_ASYM_PACKING - describes SMT quirks
1307 */
1308 #define TOPOLOGY_SD_FLAGS \
1309 (SD_SHARE_CPUCAPACITY | \
1310 SD_SHARE_PKG_RESOURCES | \
1311 SD_NUMA | \
1312 SD_ASYM_PACKING)
1313
1314 static struct sched_domain *
sd_init(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain * child,int dflags,int cpu)1315 sd_init(struct sched_domain_topology_level *tl,
1316 const struct cpumask *cpu_map,
1317 struct sched_domain *child, int dflags, int cpu)
1318 {
1319 struct sd_data *sdd = &tl->data;
1320 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1321 int sd_id, sd_weight, sd_flags = 0;
1322
1323 #ifdef CONFIG_NUMA
1324 /*
1325 * Ugly hack to pass state to sd_numa_mask()...
1326 */
1327 sched_domains_curr_level = tl->numa_level;
1328 #endif
1329
1330 sd_weight = cpumask_weight(tl->mask(cpu));
1331
1332 if (tl->sd_flags)
1333 sd_flags = (*tl->sd_flags)();
1334 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1335 "wrong sd_flags in topology description\n"))
1336 sd_flags &= TOPOLOGY_SD_FLAGS;
1337
1338 /* Apply detected topology flags */
1339 sd_flags |= dflags;
1340
1341 *sd = (struct sched_domain){
1342 .min_interval = sd_weight,
1343 .max_interval = 2*sd_weight,
1344 .busy_factor = 16,
1345 .imbalance_pct = 117,
1346
1347 .cache_nice_tries = 0,
1348
1349 .flags = 1*SD_BALANCE_NEWIDLE
1350 | 1*SD_BALANCE_EXEC
1351 | 1*SD_BALANCE_FORK
1352 | 0*SD_BALANCE_WAKE
1353 | 1*SD_WAKE_AFFINE
1354 | 0*SD_SHARE_CPUCAPACITY
1355 | 0*SD_SHARE_PKG_RESOURCES
1356 | 0*SD_SERIALIZE
1357 | 1*SD_PREFER_SIBLING
1358 | 0*SD_NUMA
1359 | sd_flags
1360 ,
1361
1362 .last_balance = jiffies,
1363 .balance_interval = sd_weight,
1364 .max_newidle_lb_cost = 0,
1365 .next_decay_max_lb_cost = jiffies,
1366 .child = child,
1367 #ifdef CONFIG_SCHED_DEBUG
1368 .name = tl->name,
1369 #endif
1370 };
1371
1372 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1373 sd_id = cpumask_first(sched_domain_span(sd));
1374
1375 /*
1376 * Convert topological properties into behaviour.
1377 */
1378
1379 /* Don't attempt to spread across CPUs of different capacities. */
1380 if ((sd->flags & SD_ASYM_CPUCAPACITY) && sd->child)
1381 sd->child->flags &= ~SD_PREFER_SIBLING;
1382
1383 if (sd->flags & SD_SHARE_CPUCAPACITY) {
1384 sd->imbalance_pct = 110;
1385
1386 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1387 sd->imbalance_pct = 117;
1388 sd->cache_nice_tries = 1;
1389
1390 #ifdef CONFIG_NUMA
1391 } else if (sd->flags & SD_NUMA) {
1392 sd->cache_nice_tries = 2;
1393
1394 sd->flags &= ~SD_PREFER_SIBLING;
1395 sd->flags |= SD_SERIALIZE;
1396 if (sched_domains_numa_distance[tl->numa_level] > node_reclaim_distance) {
1397 sd->flags &= ~(SD_BALANCE_EXEC |
1398 SD_BALANCE_FORK |
1399 SD_WAKE_AFFINE);
1400 }
1401
1402 #endif
1403 } else {
1404 sd->cache_nice_tries = 1;
1405 }
1406
1407 /*
1408 * For all levels sharing cache; connect a sched_domain_shared
1409 * instance.
1410 */
1411 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1412 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1413 atomic_inc(&sd->shared->ref);
1414 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1415 }
1416
1417 sd->private = sdd;
1418
1419 return sd;
1420 }
1421
1422 /*
1423 * Topology list, bottom-up.
1424 */
1425 static struct sched_domain_topology_level default_topology[] = {
1426 #ifdef CONFIG_SCHED_SMT
1427 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1428 #endif
1429 #ifdef CONFIG_SCHED_MC
1430 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1431 #endif
1432 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1433 { NULL, },
1434 };
1435
1436 static struct sched_domain_topology_level *sched_domain_topology =
1437 default_topology;
1438
1439 #define for_each_sd_topology(tl) \
1440 for (tl = sched_domain_topology; tl->mask; tl++)
1441
set_sched_topology(struct sched_domain_topology_level * tl)1442 void set_sched_topology(struct sched_domain_topology_level *tl)
1443 {
1444 if (WARN_ON_ONCE(sched_smp_initialized))
1445 return;
1446
1447 sched_domain_topology = tl;
1448 }
1449
1450 #ifdef CONFIG_NUMA
1451
sd_numa_mask(int cpu)1452 static const struct cpumask *sd_numa_mask(int cpu)
1453 {
1454 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1455 }
1456
sched_numa_warn(const char * str)1457 static void sched_numa_warn(const char *str)
1458 {
1459 static int done = false;
1460 int i,j;
1461
1462 if (done)
1463 return;
1464
1465 done = true;
1466
1467 printk(KERN_WARNING "ERROR: %s\n\n", str);
1468
1469 for (i = 0; i < nr_node_ids; i++) {
1470 printk(KERN_WARNING " ");
1471 for (j = 0; j < nr_node_ids; j++)
1472 printk(KERN_CONT "%02d ", node_distance(i,j));
1473 printk(KERN_CONT "\n");
1474 }
1475 printk(KERN_WARNING "\n");
1476 }
1477
find_numa_distance(int distance)1478 bool find_numa_distance(int distance)
1479 {
1480 int i;
1481
1482 if (distance == node_distance(0, 0))
1483 return true;
1484
1485 for (i = 0; i < sched_domains_numa_levels; i++) {
1486 if (sched_domains_numa_distance[i] == distance)
1487 return true;
1488 }
1489
1490 return false;
1491 }
1492
1493 /*
1494 * A system can have three types of NUMA topology:
1495 * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
1496 * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
1497 * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
1498 *
1499 * The difference between a glueless mesh topology and a backplane
1500 * topology lies in whether communication between not directly
1501 * connected nodes goes through intermediary nodes (where programs
1502 * could run), or through backplane controllers. This affects
1503 * placement of programs.
1504 *
1505 * The type of topology can be discerned with the following tests:
1506 * - If the maximum distance between any nodes is 1 hop, the system
1507 * is directly connected.
1508 * - If for two nodes A and B, located N > 1 hops away from each other,
1509 * there is an intermediary node C, which is < N hops away from both
1510 * nodes A and B, the system is a glueless mesh.
1511 */
init_numa_topology_type(void)1512 static void init_numa_topology_type(void)
1513 {
1514 int a, b, c, n;
1515
1516 n = sched_max_numa_distance;
1517
1518 if (sched_domains_numa_levels <= 2) {
1519 sched_numa_topology_type = NUMA_DIRECT;
1520 return;
1521 }
1522
1523 for_each_online_node(a) {
1524 for_each_online_node(b) {
1525 /* Find two nodes furthest removed from each other. */
1526 if (node_distance(a, b) < n)
1527 continue;
1528
1529 /* Is there an intermediary node between a and b? */
1530 for_each_online_node(c) {
1531 if (node_distance(a, c) < n &&
1532 node_distance(b, c) < n) {
1533 sched_numa_topology_type =
1534 NUMA_GLUELESS_MESH;
1535 return;
1536 }
1537 }
1538
1539 sched_numa_topology_type = NUMA_BACKPLANE;
1540 return;
1541 }
1542 }
1543 }
1544
1545
1546 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
1547
sched_init_numa(void)1548 void sched_init_numa(void)
1549 {
1550 struct sched_domain_topology_level *tl;
1551 unsigned long *distance_map;
1552 int nr_levels = 0;
1553 int i, j;
1554
1555 /*
1556 * O(nr_nodes^2) deduplicating selection sort -- in order to find the
1557 * unique distances in the node_distance() table.
1558 */
1559 distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
1560 if (!distance_map)
1561 return;
1562
1563 bitmap_zero(distance_map, NR_DISTANCE_VALUES);
1564 for (i = 0; i < nr_node_ids; i++) {
1565 for (j = 0; j < nr_node_ids; j++) {
1566 int distance = node_distance(i, j);
1567
1568 if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
1569 sched_numa_warn("Invalid distance value range");
1570 return;
1571 }
1572
1573 bitmap_set(distance_map, distance, 1);
1574 }
1575 }
1576 /*
1577 * We can now figure out how many unique distance values there are and
1578 * allocate memory accordingly.
1579 */
1580 nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
1581
1582 sched_domains_numa_distance = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
1583 if (!sched_domains_numa_distance) {
1584 bitmap_free(distance_map);
1585 return;
1586 }
1587
1588 for (i = 0, j = 0; i < nr_levels; i++, j++) {
1589 j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
1590 sched_domains_numa_distance[i] = j;
1591 }
1592
1593 bitmap_free(distance_map);
1594
1595 /*
1596 * 'nr_levels' contains the number of unique distances
1597 *
1598 * The sched_domains_numa_distance[] array includes the actual distance
1599 * numbers.
1600 */
1601
1602 /*
1603 * Here, we should temporarily reset sched_domains_numa_levels to 0.
1604 * If it fails to allocate memory for array sched_domains_numa_masks[][],
1605 * the array will contain less then 'nr_levels' members. This could be
1606 * dangerous when we use it to iterate array sched_domains_numa_masks[][]
1607 * in other functions.
1608 *
1609 * We reset it to 'nr_levels' at the end of this function.
1610 */
1611 sched_domains_numa_levels = 0;
1612
1613 sched_domains_numa_masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
1614 if (!sched_domains_numa_masks)
1615 return;
1616
1617 /*
1618 * Now for each level, construct a mask per node which contains all
1619 * CPUs of nodes that are that many hops away from us.
1620 */
1621 for (i = 0; i < nr_levels; i++) {
1622 sched_domains_numa_masks[i] =
1623 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1624 if (!sched_domains_numa_masks[i])
1625 return;
1626
1627 for (j = 0; j < nr_node_ids; j++) {
1628 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1629 int k;
1630
1631 if (!mask)
1632 return;
1633
1634 sched_domains_numa_masks[i][j] = mask;
1635
1636 for_each_node(k) {
1637 if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
1638 sched_numa_warn("Node-distance not symmetric");
1639
1640 if (node_distance(j, k) > sched_domains_numa_distance[i])
1641 continue;
1642
1643 cpumask_or(mask, mask, cpumask_of_node(k));
1644 }
1645 }
1646 }
1647
1648 /* Compute default topology size */
1649 for (i = 0; sched_domain_topology[i].mask; i++);
1650
1651 tl = kzalloc((i + nr_levels + 1) *
1652 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1653 if (!tl)
1654 return;
1655
1656 /*
1657 * Copy the default topology bits..
1658 */
1659 for (i = 0; sched_domain_topology[i].mask; i++)
1660 tl[i] = sched_domain_topology[i];
1661
1662 /*
1663 * Add the NUMA identity distance, aka single NODE.
1664 */
1665 tl[i++] = (struct sched_domain_topology_level){
1666 .mask = sd_numa_mask,
1667 .numa_level = 0,
1668 SD_INIT_NAME(NODE)
1669 };
1670
1671 /*
1672 * .. and append 'j' levels of NUMA goodness.
1673 */
1674 for (j = 1; j < nr_levels; i++, j++) {
1675 tl[i] = (struct sched_domain_topology_level){
1676 .mask = sd_numa_mask,
1677 .sd_flags = cpu_numa_flags,
1678 .flags = SDTL_OVERLAP,
1679 .numa_level = j,
1680 SD_INIT_NAME(NUMA)
1681 };
1682 }
1683
1684 sched_domain_topology = tl;
1685
1686 sched_domains_numa_levels = nr_levels;
1687 sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
1688
1689 init_numa_topology_type();
1690 }
1691
sched_domains_numa_masks_set(unsigned int cpu)1692 void sched_domains_numa_masks_set(unsigned int cpu)
1693 {
1694 int node = cpu_to_node(cpu);
1695 int i, j;
1696
1697 for (i = 0; i < sched_domains_numa_levels; i++) {
1698 for (j = 0; j < nr_node_ids; j++) {
1699 if (node_distance(j, node) <= sched_domains_numa_distance[i])
1700 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1701 }
1702 }
1703 }
1704
sched_domains_numa_masks_clear(unsigned int cpu)1705 void sched_domains_numa_masks_clear(unsigned int cpu)
1706 {
1707 int i, j;
1708
1709 for (i = 0; i < sched_domains_numa_levels; i++) {
1710 for (j = 0; j < nr_node_ids; j++)
1711 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1712 }
1713 }
1714
1715 /*
1716 * sched_numa_find_closest() - given the NUMA topology, find the cpu
1717 * closest to @cpu from @cpumask.
1718 * cpumask: cpumask to find a cpu from
1719 * cpu: cpu to be close to
1720 *
1721 * returns: cpu, or nr_cpu_ids when nothing found.
1722 */
sched_numa_find_closest(const struct cpumask * cpus,int cpu)1723 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1724 {
1725 int i, j = cpu_to_node(cpu);
1726
1727 for (i = 0; i < sched_domains_numa_levels; i++) {
1728 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
1729 if (cpu < nr_cpu_ids)
1730 return cpu;
1731 }
1732 return nr_cpu_ids;
1733 }
1734
1735 #endif /* CONFIG_NUMA */
1736
__sdt_alloc(const struct cpumask * cpu_map)1737 static int __sdt_alloc(const struct cpumask *cpu_map)
1738 {
1739 struct sched_domain_topology_level *tl;
1740 int j;
1741
1742 for_each_sd_topology(tl) {
1743 struct sd_data *sdd = &tl->data;
1744
1745 sdd->sd = alloc_percpu(struct sched_domain *);
1746 if (!sdd->sd)
1747 return -ENOMEM;
1748
1749 sdd->sds = alloc_percpu(struct sched_domain_shared *);
1750 if (!sdd->sds)
1751 return -ENOMEM;
1752
1753 sdd->sg = alloc_percpu(struct sched_group *);
1754 if (!sdd->sg)
1755 return -ENOMEM;
1756
1757 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1758 if (!sdd->sgc)
1759 return -ENOMEM;
1760
1761 for_each_cpu(j, cpu_map) {
1762 struct sched_domain *sd;
1763 struct sched_domain_shared *sds;
1764 struct sched_group *sg;
1765 struct sched_group_capacity *sgc;
1766
1767 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1768 GFP_KERNEL, cpu_to_node(j));
1769 if (!sd)
1770 return -ENOMEM;
1771
1772 *per_cpu_ptr(sdd->sd, j) = sd;
1773
1774 sds = kzalloc_node(sizeof(struct sched_domain_shared),
1775 GFP_KERNEL, cpu_to_node(j));
1776 if (!sds)
1777 return -ENOMEM;
1778
1779 *per_cpu_ptr(sdd->sds, j) = sds;
1780
1781 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1782 GFP_KERNEL, cpu_to_node(j));
1783 if (!sg)
1784 return -ENOMEM;
1785
1786 sg->next = sg;
1787
1788 *per_cpu_ptr(sdd->sg, j) = sg;
1789
1790 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1791 GFP_KERNEL, cpu_to_node(j));
1792 if (!sgc)
1793 return -ENOMEM;
1794
1795 #ifdef CONFIG_SCHED_DEBUG
1796 sgc->id = j;
1797 #endif
1798
1799 *per_cpu_ptr(sdd->sgc, j) = sgc;
1800 }
1801 }
1802
1803 return 0;
1804 }
1805
__sdt_free(const struct cpumask * cpu_map)1806 static void __sdt_free(const struct cpumask *cpu_map)
1807 {
1808 struct sched_domain_topology_level *tl;
1809 int j;
1810
1811 for_each_sd_topology(tl) {
1812 struct sd_data *sdd = &tl->data;
1813
1814 for_each_cpu(j, cpu_map) {
1815 struct sched_domain *sd;
1816
1817 if (sdd->sd) {
1818 sd = *per_cpu_ptr(sdd->sd, j);
1819 if (sd && (sd->flags & SD_OVERLAP))
1820 free_sched_groups(sd->groups, 0);
1821 kfree(*per_cpu_ptr(sdd->sd, j));
1822 }
1823
1824 if (sdd->sds)
1825 kfree(*per_cpu_ptr(sdd->sds, j));
1826 if (sdd->sg)
1827 kfree(*per_cpu_ptr(sdd->sg, j));
1828 if (sdd->sgc)
1829 kfree(*per_cpu_ptr(sdd->sgc, j));
1830 }
1831 free_percpu(sdd->sd);
1832 sdd->sd = NULL;
1833 free_percpu(sdd->sds);
1834 sdd->sds = NULL;
1835 free_percpu(sdd->sg);
1836 sdd->sg = NULL;
1837 free_percpu(sdd->sgc);
1838 sdd->sgc = NULL;
1839 }
1840 }
1841
build_sched_domain(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,struct sched_domain_attr * attr,struct sched_domain * child,int dflags,int cpu)1842 static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1843 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1844 struct sched_domain *child, int dflags, int cpu)
1845 {
1846 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
1847
1848 if (child) {
1849 sd->level = child->level + 1;
1850 sched_domain_level_max = max(sched_domain_level_max, sd->level);
1851 child->parent = sd;
1852
1853 if (!cpumask_subset(sched_domain_span(child),
1854 sched_domain_span(sd))) {
1855 pr_err("BUG: arch topology borken\n");
1856 #ifdef CONFIG_SCHED_DEBUG
1857 pr_err(" the %s domain not a subset of the %s domain\n",
1858 child->name, sd->name);
1859 #endif
1860 /* Fixup, ensure @sd has at least @child CPUs. */
1861 cpumask_or(sched_domain_span(sd),
1862 sched_domain_span(sd),
1863 sched_domain_span(child));
1864 }
1865
1866 }
1867 set_domain_attribute(sd, attr);
1868
1869 return sd;
1870 }
1871
1872 /*
1873 * Ensure topology masks are sane, i.e. there are no conflicts (overlaps) for
1874 * any two given CPUs at this (non-NUMA) topology level.
1875 */
topology_span_sane(struct sched_domain_topology_level * tl,const struct cpumask * cpu_map,int cpu)1876 static bool topology_span_sane(struct sched_domain_topology_level *tl,
1877 const struct cpumask *cpu_map, int cpu)
1878 {
1879 int i;
1880
1881 /* NUMA levels are allowed to overlap */
1882 if (tl->flags & SDTL_OVERLAP)
1883 return true;
1884
1885 /*
1886 * Non-NUMA levels cannot partially overlap - they must be either
1887 * completely equal or completely disjoint. Otherwise we can end up
1888 * breaking the sched_group lists - i.e. a later get_group() pass
1889 * breaks the linking done for an earlier span.
1890 */
1891 for_each_cpu(i, cpu_map) {
1892 if (i == cpu)
1893 continue;
1894 /*
1895 * We should 'and' all those masks with 'cpu_map' to exactly
1896 * match the topology we're about to build, but that can only
1897 * remove CPUs, which only lessens our ability to detect
1898 * overlaps
1899 */
1900 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) &&
1901 cpumask_intersects(tl->mask(cpu), tl->mask(i)))
1902 return false;
1903 }
1904
1905 return true;
1906 }
1907
1908 /*
1909 * Find the sched_domain_topology_level where all CPU capacities are visible
1910 * for all CPUs.
1911 */
1912 static struct sched_domain_topology_level
asym_cpu_capacity_level(const struct cpumask * cpu_map)1913 *asym_cpu_capacity_level(const struct cpumask *cpu_map)
1914 {
1915 int i, j, asym_level = 0;
1916 bool asym = false;
1917 struct sched_domain_topology_level *tl, *asym_tl = NULL;
1918 unsigned long cap;
1919
1920 /* Is there any asymmetry? */
1921 cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
1922
1923 for_each_cpu(i, cpu_map) {
1924 if (arch_scale_cpu_capacity(i) != cap) {
1925 asym = true;
1926 break;
1927 }
1928 }
1929
1930 if (!asym)
1931 return NULL;
1932
1933 /*
1934 * Examine topology from all CPU's point of views to detect the lowest
1935 * sched_domain_topology_level where a highest capacity CPU is visible
1936 * to everyone.
1937 */
1938 for_each_cpu(i, cpu_map) {
1939 unsigned long max_capacity = arch_scale_cpu_capacity(i);
1940 int tl_id = 0;
1941
1942 for_each_sd_topology(tl) {
1943 if (tl_id < asym_level)
1944 goto next_level;
1945
1946 for_each_cpu_and(j, tl->mask(i), cpu_map) {
1947 unsigned long capacity;
1948
1949 capacity = arch_scale_cpu_capacity(j);
1950
1951 if (capacity <= max_capacity)
1952 continue;
1953
1954 max_capacity = capacity;
1955 asym_level = tl_id;
1956 asym_tl = tl;
1957 }
1958 next_level:
1959 tl_id++;
1960 }
1961 }
1962
1963 return asym_tl;
1964 }
1965
1966
1967 /*
1968 * Build sched domains for a given set of CPUs and attach the sched domains
1969 * to the individual CPUs
1970 */
1971 static int
build_sched_domains(const struct cpumask * cpu_map,struct sched_domain_attr * attr)1972 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1973 {
1974 enum s_alloc alloc_state = sa_none;
1975 struct sched_domain *sd;
1976 struct s_data d;
1977 struct rq *rq = NULL;
1978 int i, ret = -ENOMEM;
1979 struct sched_domain_topology_level *tl_asym;
1980 bool has_asym = false;
1981
1982 if (WARN_ON(cpumask_empty(cpu_map)))
1983 goto error;
1984
1985 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1986 if (alloc_state != sa_rootdomain)
1987 goto error;
1988
1989 tl_asym = asym_cpu_capacity_level(cpu_map);
1990
1991 /* Set up domains for CPUs specified by the cpu_map: */
1992 for_each_cpu(i, cpu_map) {
1993 struct sched_domain_topology_level *tl;
1994 int dflags = 0;
1995
1996 sd = NULL;
1997 for_each_sd_topology(tl) {
1998 if (tl == tl_asym) {
1999 dflags |= SD_ASYM_CPUCAPACITY;
2000 has_asym = true;
2001 }
2002
2003 if (WARN_ON(!topology_span_sane(tl, cpu_map, i)))
2004 goto error;
2005
2006 sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
2007
2008 if (tl == sched_domain_topology)
2009 *per_cpu_ptr(d.sd, i) = sd;
2010 if (tl->flags & SDTL_OVERLAP)
2011 sd->flags |= SD_OVERLAP;
2012 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
2013 break;
2014 }
2015 }
2016
2017 /* Build the groups for the domains */
2018 for_each_cpu(i, cpu_map) {
2019 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2020 sd->span_weight = cpumask_weight(sched_domain_span(sd));
2021 if (sd->flags & SD_OVERLAP) {
2022 if (build_overlap_sched_groups(sd, i))
2023 goto error;
2024 } else {
2025 if (build_sched_groups(sd, i))
2026 goto error;
2027 }
2028 }
2029 }
2030
2031 /* Calculate CPU capacity for physical packages and nodes */
2032 for (i = nr_cpumask_bits-1; i >= 0; i--) {
2033 if (!cpumask_test_cpu(i, cpu_map))
2034 continue;
2035
2036 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
2037 claim_allocations(i, sd);
2038 init_sched_groups_capacity(i, sd);
2039 }
2040 }
2041
2042 /* Attach the domains */
2043 rcu_read_lock();
2044 for_each_cpu(i, cpu_map) {
2045 rq = cpu_rq(i);
2046 sd = *per_cpu_ptr(d.sd, i);
2047
2048 /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
2049 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
2050 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
2051
2052 cpu_attach_domain(sd, d.rd, i);
2053 }
2054 rcu_read_unlock();
2055
2056 if (has_asym)
2057 static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
2058
2059 if (rq && sched_debug_enabled) {
2060 pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
2061 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
2062 }
2063 trace_android_vh_build_sched_domains(has_asym);
2064
2065 ret = 0;
2066 error:
2067 __free_domain_allocs(&d, alloc_state, cpu_map);
2068
2069 return ret;
2070 }
2071
2072 /* Current sched domains: */
2073 static cpumask_var_t *doms_cur;
2074
2075 /* Number of sched domains in 'doms_cur': */
2076 static int ndoms_cur;
2077
2078 /* Attribues of custom domains in 'doms_cur' */
2079 static struct sched_domain_attr *dattr_cur;
2080
2081 /*
2082 * Special case: If a kmalloc() of a doms_cur partition (array of
2083 * cpumask) fails, then fallback to a single sched domain,
2084 * as determined by the single cpumask fallback_doms.
2085 */
2086 static cpumask_var_t fallback_doms;
2087
2088 /*
2089 * arch_update_cpu_topology lets virtualized architectures update the
2090 * CPU core maps. It is supposed to return 1 if the topology changed
2091 * or 0 if it stayed the same.
2092 */
arch_update_cpu_topology(void)2093 int __weak arch_update_cpu_topology(void)
2094 {
2095 return 0;
2096 }
2097
alloc_sched_domains(unsigned int ndoms)2098 cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
2099 {
2100 int i;
2101 cpumask_var_t *doms;
2102
2103 doms = kmalloc_array(ndoms, sizeof(*doms), GFP_KERNEL);
2104 if (!doms)
2105 return NULL;
2106 for (i = 0; i < ndoms; i++) {
2107 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
2108 free_sched_domains(doms, i);
2109 return NULL;
2110 }
2111 }
2112 return doms;
2113 }
2114
free_sched_domains(cpumask_var_t doms[],unsigned int ndoms)2115 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
2116 {
2117 unsigned int i;
2118 for (i = 0; i < ndoms; i++)
2119 free_cpumask_var(doms[i]);
2120 kfree(doms);
2121 }
2122
2123 /*
2124 * Set up scheduler domains and groups. For now this just excludes isolated
2125 * CPUs, but could be used to exclude other special cases in the future.
2126 */
sched_init_domains(const struct cpumask * cpu_map)2127 int sched_init_domains(const struct cpumask *cpu_map)
2128 {
2129 int err;
2130
2131 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
2132 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
2133 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2134
2135 arch_update_cpu_topology();
2136 ndoms_cur = 1;
2137 doms_cur = alloc_sched_domains(ndoms_cur);
2138 if (!doms_cur)
2139 doms_cur = &fallback_doms;
2140 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
2141 err = build_sched_domains(doms_cur[0], NULL);
2142 register_sched_domain_sysctl();
2143
2144 return err;
2145 }
2146
2147 /*
2148 * Detach sched domains from a group of CPUs specified in cpu_map
2149 * These CPUs will now be attached to the NULL domain
2150 */
detach_destroy_domains(const struct cpumask * cpu_map)2151 static void detach_destroy_domains(const struct cpumask *cpu_map)
2152 {
2153 unsigned int cpu = cpumask_any(cpu_map);
2154 int i;
2155
2156 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
2157 static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
2158
2159 rcu_read_lock();
2160 for_each_cpu(i, cpu_map)
2161 cpu_attach_domain(NULL, &def_root_domain, i);
2162 rcu_read_unlock();
2163 }
2164
2165 /* handle null as "default" */
dattrs_equal(struct sched_domain_attr * cur,int idx_cur,struct sched_domain_attr * new,int idx_new)2166 static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
2167 struct sched_domain_attr *new, int idx_new)
2168 {
2169 struct sched_domain_attr tmp;
2170
2171 /* Fast path: */
2172 if (!new && !cur)
2173 return 1;
2174
2175 tmp = SD_ATTR_INIT;
2176
2177 return !memcmp(cur ? (cur + idx_cur) : &tmp,
2178 new ? (new + idx_new) : &tmp,
2179 sizeof(struct sched_domain_attr));
2180 }
2181
2182 /*
2183 * Partition sched domains as specified by the 'ndoms_new'
2184 * cpumasks in the array doms_new[] of cpumasks. This compares
2185 * doms_new[] to the current sched domain partitioning, doms_cur[].
2186 * It destroys each deleted domain and builds each new domain.
2187 *
2188 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
2189 * The masks don't intersect (don't overlap.) We should setup one
2190 * sched domain for each mask. CPUs not in any of the cpumasks will
2191 * not be load balanced. If the same cpumask appears both in the
2192 * current 'doms_cur' domains and in the new 'doms_new', we can leave
2193 * it as it is.
2194 *
2195 * The passed in 'doms_new' should be allocated using
2196 * alloc_sched_domains. This routine takes ownership of it and will
2197 * free_sched_domains it when done with it. If the caller failed the
2198 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
2199 * and partition_sched_domains() will fallback to the single partition
2200 * 'fallback_doms', it also forces the domains to be rebuilt.
2201 *
2202 * If doms_new == NULL it will be replaced with cpu_online_mask.
2203 * ndoms_new == 0 is a special case for destroying existing domains,
2204 * and it will not create the default domain.
2205 *
2206 * Call with hotplug lock and sched_domains_mutex held
2207 */
partition_sched_domains_locked(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2208 void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
2209 struct sched_domain_attr *dattr_new)
2210 {
2211 bool __maybe_unused has_eas = false;
2212 int i, j, n;
2213 int new_topology;
2214
2215 lockdep_assert_held(&sched_domains_mutex);
2216
2217 /* Always unregister in case we don't destroy any domains: */
2218 unregister_sched_domain_sysctl();
2219
2220 /* Let the architecture update CPU core mappings: */
2221 new_topology = arch_update_cpu_topology();
2222
2223 if (!doms_new) {
2224 WARN_ON_ONCE(dattr_new);
2225 n = 0;
2226 doms_new = alloc_sched_domains(1);
2227 if (doms_new) {
2228 n = 1;
2229 cpumask_and(doms_new[0], cpu_active_mask,
2230 housekeeping_cpumask(HK_FLAG_DOMAIN));
2231 }
2232 } else {
2233 n = ndoms_new;
2234 }
2235
2236 /* Destroy deleted domains: */
2237 for (i = 0; i < ndoms_cur; i++) {
2238 for (j = 0; j < n && !new_topology; j++) {
2239 if (cpumask_equal(doms_cur[i], doms_new[j]) &&
2240 dattrs_equal(dattr_cur, i, dattr_new, j)) {
2241 struct root_domain *rd;
2242
2243 /*
2244 * This domain won't be destroyed and as such
2245 * its dl_bw->total_bw needs to be cleared. It
2246 * will be recomputed in function
2247 * update_tasks_root_domain().
2248 */
2249 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd;
2250 dl_clear_root_domain(rd);
2251 goto match1;
2252 }
2253 }
2254 /* No match - a current sched domain not in new doms_new[] */
2255 detach_destroy_domains(doms_cur[i]);
2256 match1:
2257 ;
2258 }
2259
2260 n = ndoms_cur;
2261 if (!doms_new) {
2262 n = 0;
2263 doms_new = &fallback_doms;
2264 cpumask_and(doms_new[0], cpu_active_mask,
2265 housekeeping_cpumask(HK_FLAG_DOMAIN));
2266 }
2267
2268 /* Build new domains: */
2269 for (i = 0; i < ndoms_new; i++) {
2270 for (j = 0; j < n && !new_topology; j++) {
2271 if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2272 dattrs_equal(dattr_new, i, dattr_cur, j))
2273 goto match2;
2274 }
2275 /* No match - add a new doms_new */
2276 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
2277 match2:
2278 ;
2279 }
2280
2281 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2282 /* Build perf. domains: */
2283 for (i = 0; i < ndoms_new; i++) {
2284 for (j = 0; j < n && !sched_energy_update; j++) {
2285 if (cpumask_equal(doms_new[i], doms_cur[j]) &&
2286 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
2287 has_eas = true;
2288 goto match3;
2289 }
2290 }
2291 /* No match - add perf. domains for a new rd */
2292 has_eas |= build_perf_domains(doms_new[i]);
2293 match3:
2294 ;
2295 }
2296 sched_energy_set(has_eas);
2297 #endif
2298
2299 /* Remember the new sched domains: */
2300 if (doms_cur != &fallback_doms)
2301 free_sched_domains(doms_cur, ndoms_cur);
2302
2303 kfree(dattr_cur);
2304 doms_cur = doms_new;
2305 dattr_cur = dattr_new;
2306 ndoms_cur = ndoms_new;
2307
2308 register_sched_domain_sysctl();
2309 }
2310
2311 /*
2312 * Call with hotplug lock held
2313 */
partition_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)2314 void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
2315 struct sched_domain_attr *dattr_new)
2316 {
2317 mutex_lock(&sched_domains_mutex);
2318 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
2319 mutex_unlock(&sched_domains_mutex);
2320 }
2321