Lines Matching +full:cs +full:- +full:0
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
60 #include <linux/backing-dev.h>
90 * The user-configured masks can only be changed by writing to
104 * The user-configured masks are always the same with effective masks.
107 /* user-configured CPUs and Memory Nodes allow to tasks */
117 * CPUs allocated to child sub-partitions (default hierarchy only)
118 * - CPUs granted by the parent = effective_cpus U subparts_cpus
119 * - effective_cpus and subparts_cpus are mutually exclusive.
129 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
130 * - A new cpuset's old_mems_allowed is initialized when some
132 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
142 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
160 * use_parent_ecpus - set if using parent's effective_cpus
161 * child_ecpus_count - # of children with use_parent_ecpus set
178 * 0 - not a partition root
180 * 1 - partition root
182 * -1 - invalid partition root
189 #define PRS_DISABLED 0
191 #define PRS_ERROR -1
213 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
215 return css_cs(cs->css.parent); in parent_cs()
220 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
222 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
227 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
229 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
245 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
247 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
250 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
252 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
255 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
257 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
260 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
262 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
265 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
267 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
270 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
272 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
275 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
277 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
280 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
282 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
285 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
287 return cs->partition_root_state > 0; in is_partition_root()
297 * cpuset_for_each_child - traverse online children of a cpuset
306 css_for_each_child((pos_css), &(parent_cs)->css) \
310 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
321 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
325 * There are two global locks guarding cpuset structures - cpuset_mutex and
345 * If a task is only holding callback_lock, then it has read-only
353 * small pieces of code, such as when reading out possibly multi-word
395 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
403 * One way or another, we guarantee to return some non-empty subset
408 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
410 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
411 cs = parent_cs(cs); in guarantee_online_cpus()
412 if (unlikely(!cs)) { in guarantee_online_cpus()
424 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
433 * One way or another, we guarantee to return some non-empty subset
438 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
440 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
441 cs = parent_cs(cs); in guarantee_online_mems()
442 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
450 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
453 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
458 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
465 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
474 return cpumask_subset(p->cpus_requested, q->cpus_requested) && in is_cpuset_subset()
475 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset()
481 * alloc_cpumasks - allocate three cpumasks for cpuset
482 * @cs: the cpuset that have cpumasks to be allocated.
484 * Return: 0 if successful, -ENOMEM otherwise.
486 * Only one of the two input arguments should be non-NULL.
488 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
492 if (cs) { in alloc_cpumasks()
493 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
494 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
495 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
496 pmask4 = &cs->cpus_requested; in alloc_cpumasks()
498 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
499 pmask2 = &tmp->addmask; in alloc_cpumasks()
500 pmask3 = &tmp->delmask; in alloc_cpumasks()
504 return -ENOMEM; in alloc_cpumasks()
512 if (cs && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) in alloc_cpumasks()
515 return 0; in alloc_cpumasks()
523 return -ENOMEM; in alloc_cpumasks()
527 * free_cpumasks - free cpumasks in a tmpmasks structure
528 * @cs: the cpuset that have cpumasks to be free.
531 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
533 if (cs) { in free_cpumasks()
534 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
535 free_cpumask_var(cs->cpus_requested); in free_cpumasks()
536 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
537 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
540 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
541 free_cpumask_var(tmp->addmask); in free_cpumasks()
542 free_cpumask_var(tmp->delmask); in free_cpumasks()
547 * alloc_trial_cpuset - allocate a trial cpuset
548 * @cs: the cpuset that the trial cpuset duplicates
550 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
554 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
563 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
564 cpumask_copy(trial->cpus_requested, cs->cpus_requested); in alloc_trial_cpuset()
565 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
570 * free_cpuset - free the cpuset
571 * @cs: the cpuset to be freed
573 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
575 free_cpumasks(cs, NULL); in free_cpuset()
576 kfree(cs); in free_cpuset()
580 * validate_change() - Used to validate that any proposed cpuset change
588 * 'cur' is the address of an actual, in-use cpuset. Operations
596 * Return 0 if valid, -errno if not.
608 ret = -EBUSY; in validate_change()
614 ret = 0; in validate_change()
621 ret = -EACCES; in validate_change()
629 ret = -EINVAL; in validate_change()
633 cpumask_intersects(trial->cpus_requested, in validate_change()
634 c->cpus_requested)) in validate_change()
638 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
643 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
646 ret = -ENOSPC; in validate_change()
647 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
648 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
649 cpumask_empty(trial->cpus_allowed)) in validate_change()
651 if (!nodes_empty(cur->mems_allowed) && in validate_change()
652 nodes_empty(trial->mems_allowed)) in validate_change()
660 ret = -EBUSY; in validate_change()
662 !cpuset_cpumask_can_shrink(cur->cpus_allowed, in validate_change()
663 trial->cpus_allowed)) in validate_change()
666 ret = 0; in validate_change()
679 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
685 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
686 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
699 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
713 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
721 * A 'partial partition' is a set of non-overlapping subsets whose
728 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
739 * cp - cpuset pointer, used (together with pos_css) to perform a
740 * top-down scan of all cpusets. For our purposes, rebuilding
743 * csa - (for CpuSet Array) Array of pointers to all the cpusets
750 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
773 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
779 int ndoms = 0; /* number of sched domains in result */ in generate_sched_domains()
800 cpumask_and(doms[0], top_cpuset.effective_cpus, in generate_sched_domains()
809 csn = 0; in generate_sched_domains()
825 * If root is load-balancing, we can skip @cp if it in generate_sched_domains()
828 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
830 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
835 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) in generate_sched_domains()
839 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
848 for (i = 0; i < csn; i++) in generate_sched_domains()
849 csa[i]->pn = i; in generate_sched_domains()
854 for (i = 0; i < csn; i++) { in generate_sched_domains()
856 int apn = a->pn; in generate_sched_domains()
858 for (j = 0; j < csn; j++) { in generate_sched_domains()
860 int bpn = b->pn; in generate_sched_domains()
863 for (k = 0; k < csn; k++) { in generate_sched_domains()
866 if (c->pn == bpn) in generate_sched_domains()
867 c->pn = apn; in generate_sched_domains()
869 ndoms--; /* one less element */ in generate_sched_domains()
890 for (nslot = 0, i = 0; i < csn; i++) { in generate_sched_domains()
893 int apn = a->pn; in generate_sched_domains()
895 if (apn < 0) { in generate_sched_domains()
907 warnings--; in generate_sched_domains()
918 if (apn == b->pn) { in generate_sched_domains()
919 cpumask_or(dp, dp, b->effective_cpus); in generate_sched_domains()
925 b->pn = -1; in generate_sched_domains()
947 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
952 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
955 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
965 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
980 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
982 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
987 css_get(&cs->css); in dl_rebuild_rd_accounting()
991 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
994 css_put(&cs->css); in dl_rebuild_rd_accounting()
1012 * If the flag 'sched_load_balance' of any cpuset with non-empty
1014 * which has that flag enabled, or if any cpuset with a non-empty
1025 struct cpuset *cs; in rebuild_sched_domains_locked() local
1051 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1052 if (!is_partition_root(cs)) { in rebuild_sched_domains_locked()
1056 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1087 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1088 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1090 * Iterate through each task of @cs updating its cpus_allowed to the
1094 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1098 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1100 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1105 if (top_cs && (task->flags & PF_KTHREAD) && in update_tasks_cpumask()
1108 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1114 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1116 * @cs: the cpuset the need to recompute the new effective_cpus mask
1125 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1127 if (parent->nr_subparts_cpus) { in compute_effective_cpumask()
1128 cpumask_or(new_cpus, parent->effective_cpus, in compute_effective_cpumask()
1129 parent->subparts_cpus); in compute_effective_cpumask()
1130 cpumask_and(new_cpus, new_cpus, cs->cpus_requested); in compute_effective_cpumask()
1133 cpumask_and(new_cpus, cs->cpus_requested, in compute_effective_cpumask()
1134 parent->effective_cpus); in compute_effective_cpumask()
1148 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1153 * Return: 0, 1 or an error code
1155 * For partcmd_enable, the cpuset is being transformed from a non-partition
1158 * effective_cpus. The function will return 0 if all the CPUs listed in
1162 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1164 * into parent's effective_cpus. 0 should always be returned.
1172 * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1173 * Error code should only be returned when newmask is non-NULL.
1208 (!newmask && cpumask_empty(cpuset->cpus_allowed))) in update_parent_subparts_cpumask()
1209 return -EINVAL; in update_parent_subparts_cpumask()
1215 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) in update_parent_subparts_cpumask()
1216 return -EBUSY; in update_parent_subparts_cpumask()
1224 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || in update_parent_subparts_cpumask()
1225 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) in update_parent_subparts_cpumask()
1226 return -EINVAL; in update_parent_subparts_cpumask()
1232 new_prs = cpuset->partition_root_state; in update_parent_subparts_cpumask()
1234 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); in update_parent_subparts_cpumask()
1237 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1238 parent->subparts_cpus); in update_parent_subparts_cpumask()
1243 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus in update_parent_subparts_cpumask()
1244 * addmask = newmask & parent->effective_cpus in update_parent_subparts_cpumask()
1245 * & ~parent->subparts_cpus in update_parent_subparts_cpumask()
1247 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1248 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_subparts_cpumask()
1249 parent->subparts_cpus); in update_parent_subparts_cpumask()
1251 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1252 adding = cpumask_andnot(tmp->addmask, tmp->addmask, in update_parent_subparts_cpumask()
1253 parent->subparts_cpus); in update_parent_subparts_cpumask()
1258 cpumask_equal(parent->effective_cpus, tmp->addmask)) { in update_parent_subparts_cpumask()
1260 return -EINVAL; in update_parent_subparts_cpumask()
1266 if (!cpumask_and(tmp->addmask, tmp->delmask, in update_parent_subparts_cpumask()
1268 return -EINVAL; in update_parent_subparts_cpumask()
1269 cpumask_copy(tmp->addmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1275 * addmask = cpus_allowed & parent->effective_cpus in update_parent_subparts_cpumask()
1278 * pre-shrunk in case there is a change in the cpu list. in update_parent_subparts_cpumask()
1281 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1282 parent->effective_cpus); in update_parent_subparts_cpumask()
1283 part_error = cpumask_equal(tmp->addmask, in update_parent_subparts_cpumask()
1284 parent->effective_cpus); in update_parent_subparts_cpumask()
1288 int prev_prs = cpuset->partition_root_state; in update_parent_subparts_cpumask()
1294 switch (cpuset->partition_root_state) { in update_parent_subparts_cpumask()
1311 return 0; /* Nothing need to be done */ in update_parent_subparts_cpumask()
1318 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1319 parent->subparts_cpus); in update_parent_subparts_cpumask()
1322 if (!adding && !deleting && (new_prs == cpuset->partition_root_state)) in update_parent_subparts_cpumask()
1323 return 0; in update_parent_subparts_cpumask()
1332 cpumask_or(parent->subparts_cpus, in update_parent_subparts_cpumask()
1333 parent->subparts_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1334 cpumask_andnot(parent->effective_cpus, in update_parent_subparts_cpumask()
1335 parent->effective_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1338 cpumask_andnot(parent->subparts_cpus, in update_parent_subparts_cpumask()
1339 parent->subparts_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1343 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); in update_parent_subparts_cpumask()
1344 cpumask_or(parent->effective_cpus, in update_parent_subparts_cpumask()
1345 parent->effective_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1348 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); in update_parent_subparts_cpumask()
1350 if (cpuset->partition_root_state != new_prs) in update_parent_subparts_cpumask()
1351 cpuset->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1358 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1359 * @cs: the cpuset to consider
1369 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1377 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1380 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
1386 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
1387 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
1388 if (!cp->use_parent_ecpus) { in update_cpumasks_hier()
1389 cp->use_parent_ecpus = true; in update_cpumasks_hier()
1390 parent->child_ecpus_count++; in update_cpumasks_hier()
1392 } else if (cp->use_parent_ecpus) { in update_cpumasks_hier()
1393 cp->use_parent_ecpus = false; in update_cpumasks_hier()
1394 WARN_ON_ONCE(!parent->child_ecpus_count); in update_cpumasks_hier()
1395 parent->child_ecpus_count--; in update_cpumasks_hier()
1402 if (!cp->partition_root_state && in update_cpumasks_hier()
1403 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { in update_cpumasks_hier()
1410 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1414 new_prs = cp->partition_root_state; in update_cpumasks_hier()
1415 if ((cp != cs) && new_prs) { in update_cpumasks_hier()
1416 switch (parent->partition_root_state) { in update_cpumasks_hier()
1423 WARN_ON_ONCE(cp->partition_root_state in update_cpumasks_hier()
1434 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); in update_cpumasks_hier()
1451 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
1457 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1458 if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { in update_cpumasks_hier()
1459 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1460 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1461 } else if (cp->nr_subparts_cpus) { in update_cpumasks_hier()
1467 * becomes empty. we clear cp->nr_subparts_cpus and in update_cpumasks_hier()
1471 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, in update_cpumasks_hier()
1472 cp->subparts_cpus); in update_cpumasks_hier()
1473 if (cpumask_empty(cp->effective_cpus)) { in update_cpumasks_hier()
1474 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1475 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1476 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1477 } else if (!cpumask_subset(cp->subparts_cpus, in update_cpumasks_hier()
1478 tmp->new_cpus)) { in update_cpumasks_hier()
1479 cpumask_andnot(cp->subparts_cpus, in update_cpumasks_hier()
1480 cp->subparts_cpus, tmp->new_cpus); in update_cpumasks_hier()
1481 cp->nr_subparts_cpus in update_cpumasks_hier()
1482 = cpumask_weight(cp->subparts_cpus); in update_cpumasks_hier()
1486 if (new_prs != cp->partition_root_state) in update_cpumasks_hier()
1487 cp->partition_root_state = new_prs; in update_cpumasks_hier()
1492 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
1497 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
1502 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
1509 css_put(&cp->css); in update_cpumasks_hier()
1518 * update_sibling_cpumasks - Update siblings cpumasks
1520 * @cs: Current cpuset
1523 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1541 if (sibling == cs) in update_sibling_cpumasks()
1543 if (!sibling->use_parent_ecpus) in update_sibling_cpumasks()
1545 if (!css_tryget_online(&sibling->css)) in update_sibling_cpumasks()
1551 css_put(&sibling->css); in update_sibling_cpumasks()
1557 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1558 * @cs: the cpuset to consider
1562 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1568 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ in update_cpumask()
1569 if (cs == &top_cpuset) in update_cpumask()
1570 return -EACCES; in update_cpumask()
1579 cpumask_clear(trialcs->cpus_requested); in update_cpumask()
1581 retval = cpulist_parse(buf, trialcs->cpus_requested); in update_cpumask()
1582 if (retval < 0) in update_cpumask()
1586 if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) in update_cpumask()
1587 return -EINVAL; in update_cpumask()
1589 cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, in update_cpumask()
1593 if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) in update_cpumask()
1594 return 0; in update_cpumask()
1596 retval = validate_change(cs, trialcs); in update_cpumask()
1597 if (retval < 0) in update_cpumask()
1605 tmp.addmask = trialcs->subparts_cpus; in update_cpumask()
1606 tmp.delmask = trialcs->effective_cpus; in update_cpumask()
1607 tmp.new_cpus = trialcs->cpus_allowed; in update_cpumask()
1610 if (cs->partition_root_state) { in update_cpumask()
1612 if (cpumask_empty(trialcs->cpus_allowed)) in update_cpumask()
1613 return -EINVAL; in update_cpumask()
1614 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1615 trialcs->cpus_allowed, &tmp) < 0) in update_cpumask()
1616 return -EINVAL; in update_cpumask()
1620 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1621 cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); in update_cpumask()
1626 if (cs->nr_subparts_cpus) { in update_cpumask()
1627 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); in update_cpumask()
1628 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1632 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1634 if (cs->partition_root_state) { in update_cpumask()
1635 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1641 if (parent->child_ecpus_count) in update_cpumask()
1642 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1644 return 0; in update_cpumask()
1668 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
1669 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
1680 mwork->mm = mm; in cpuset_migrate_mm()
1681 mwork->from = *from; in cpuset_migrate_mm()
1682 mwork->to = *to; in cpuset_migrate_mm()
1683 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
1684 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
1696 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1700 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1711 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1713 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
1715 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
1717 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1726 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1727 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1729 * Iterate through each task of @cs updating its mems_allowed to the
1733 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1739 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1741 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1745 * take while holding tasklist_lock. Forks can happen - the in update_tasks_nodemask()
1753 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1764 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1766 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1768 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1776 * cs->old_mems_allowed. in update_tasks_nodemask()
1778 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1785 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1786 * @cs: the cpuset to consider
1796 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1802 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1805 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
1812 *new_mems = parent->effective_mems; in update_nodemasks_hier()
1815 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
1820 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
1825 cp->effective_mems = *new_mems; in update_nodemasks_hier()
1829 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
1834 css_put(&cp->css); in update_nodemasks_hier()
1848 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1849 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
1852 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1859 * it's read-only in update_nodemask()
1861 if (cs == &top_cpuset) { in update_nodemask()
1862 retval = -EACCES; in update_nodemask()
1873 nodes_clear(trialcs->mems_allowed); in update_nodemask()
1875 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
1876 if (retval < 0) in update_nodemask()
1879 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
1881 retval = -EINVAL; in update_nodemask()
1886 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1887 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
1890 retval = validate_change(cs, trialcs); in update_nodemask()
1891 if (retval < 0) in update_nodemask()
1895 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1898 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
1899 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1915 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1918 if (val < -1 || val >= sched_domain_level_max) in update_relax_domain_level()
1919 return -EINVAL; in update_relax_domain_level()
1922 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1923 cs->relax_domain_level = val; in update_relax_domain_level()
1924 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1925 is_sched_load_balance(cs)) in update_relax_domain_level()
1929 return 0; in update_relax_domain_level()
1933 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1934 * @cs: the cpuset in which each task's spread flags needs to be changed
1936 * Iterate through each task of @cs updating its spread flags. As this
1940 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1945 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1947 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1952 * update_flag - read a 0 or a 1 in a file and update associated flag
1954 * cs: the cpuset to update
1960 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1968 trialcs = alloc_trial_cpuset(cs); in update_flag()
1970 return -ENOMEM; in update_flag()
1973 set_bit(bit, &trialcs->flags); in update_flag()
1975 clear_bit(bit, &trialcs->flags); in update_flag()
1977 err = validate_change(cs, trialcs); in update_flag()
1978 if (err < 0) in update_flag()
1981 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1984 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1985 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1988 cs->flags = trialcs->flags; in update_flag()
1991 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) in update_flag()
1995 update_tasks_flags(cs); in update_flag()
2002 * update_prstate - update partititon_root_state
2003 * cs: the cpuset to update
2008 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2010 int err, old_prs = cs->partition_root_state; in update_prstate()
2011 struct cpuset *parent = parent_cs(cs); in update_prstate()
2015 return 0; in update_prstate()
2022 return -EINVAL; in update_prstate()
2025 return -ENOMEM; in update_prstate()
2027 err = -EINVAL; in update_prstate()
2034 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
2037 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
2041 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2044 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2053 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2054 err = 0; in update_prstate()
2058 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
2064 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2069 if (parent->child_ecpus_count) in update_prstate()
2070 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2076 cs->partition_root_state = new_prs; in update_prstate()
2085 * Frequency meter - How fast is some event occurring?
2089 * fmeter_init() - initialize a frequency meter.
2090 * fmeter_markevent() - called each time the event happens.
2091 * fmeter_getrate() - returns the recent rate of such events.
2092 * fmeter_update() - internal routine used to update fmeter.
2099 * The filter is single-pole low-pass recursive (IIR). The time unit
2100 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2104 * has a half-life of 10 seconds, meaning that if the events quit
2129 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2137 fmp->cnt = 0; in fmeter_init()
2138 fmp->val = 0; in fmeter_init()
2139 fmp->time = 0; in fmeter_init()
2140 spin_lock_init(&fmp->lock); in fmeter_init()
2143 /* Internal meter update - process cnt events and update value */
2150 ticks = now - fmp->time; in fmeter_update()
2152 if (ticks == 0) in fmeter_update()
2156 while (ticks-- > 0) in fmeter_update()
2157 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; in fmeter_update()
2158 fmp->time = now; in fmeter_update()
2160 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; in fmeter_update()
2161 fmp->cnt = 0; in fmeter_update()
2167 spin_lock(&fmp->lock); in fmeter_markevent()
2169 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); in fmeter_markevent()
2170 spin_unlock(&fmp->lock); in fmeter_markevent()
2178 spin_lock(&fmp->lock); in fmeter_getrate()
2180 val = fmp->val; in fmeter_getrate()
2181 spin_unlock(&fmp->lock); in fmeter_getrate()
2187 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
2189 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2190 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2197 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
2204 cs = css_cs(css); in cpuset_can_attach()
2209 ret = -ENOSPC; in cpuset_can_attach()
2211 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2223 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2224 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2228 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2231 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2232 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2235 reset_migrate_dl_data(cs); in cpuset_can_attach()
2236 ret = -EINVAL; in cpuset_can_attach()
2240 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
2242 reset_migrate_dl_data(cs); in cpuset_can_attach()
2252 cs->attach_in_progress++; in cpuset_can_attach()
2253 ret = 0; in cpuset_can_attach()
2262 struct cpuset *cs; in cpuset_cancel_attach() local
2265 cs = css_cs(css); in cpuset_cancel_attach()
2268 cs->attach_in_progress--; in cpuset_cancel_attach()
2269 if (!cs->attach_in_progress) in cpuset_cancel_attach()
2272 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
2273 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
2275 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
2276 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
2296 struct cpuset *cs; in cpuset_attach() local
2300 cs = css_cs(css); in cpuset_attach()
2306 if (cs == &top_cpuset) in cpuset_attach()
2309 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2311 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2321 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2328 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2343 if (is_memory_migrate(cs)) in cpuset_attach()
2344 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
2351 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2353 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
2354 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
2355 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
2356 reset_migrate_dl_data(cs); in cpuset_attach()
2359 cs->attach_in_progress--; in cpuset_attach()
2360 if (!cs->attach_in_progress) in cpuset_attach()
2390 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2391 cpuset_filetype_t type = cft->private; in cpuset_write_u64()
2392 int retval = 0; in cpuset_write_u64()
2396 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2397 retval = -ENODEV; in cpuset_write_u64()
2403 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2406 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2409 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2412 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2415 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2421 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2424 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2427 retval = -EINVAL; in cpuset_write_u64()
2439 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2440 cpuset_filetype_t type = cft->private; in cpuset_write_s64()
2441 int retval = -ENODEV; in cpuset_write_s64()
2445 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2450 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2453 retval = -EINVAL; in cpuset_write_s64()
2468 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2470 int retval = -ENODEV; in cpuset_write_resmask()
2475 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2480 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2489 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2493 css_get(&cs->css); in cpuset_write_resmask()
2494 kernfs_break_active_protection(of->kn); in cpuset_write_resmask()
2499 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2502 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2504 retval = -ENOMEM; in cpuset_write_resmask()
2508 switch (of_cft(of)->private) { in cpuset_write_resmask()
2510 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2513 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2516 retval = -EINVAL; in cpuset_write_resmask()
2524 kernfs_unbreak_active_protection(of->kn); in cpuset_write_resmask()
2525 css_put(&cs->css); in cpuset_write_resmask()
2540 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2541 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
2542 int ret = 0; in cpuset_common_seq_show()
2548 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); in cpuset_common_seq_show()
2551 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2554 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2557 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2560 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2563 ret = -EINVAL; in cpuset_common_seq_show()
2572 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2573 cpuset_filetype_t type = cft->private; in cpuset_read_u64()
2576 return is_cpu_exclusive(cs); in cpuset_read_u64()
2578 return is_mem_exclusive(cs); in cpuset_read_u64()
2580 return is_mem_hardwall(cs); in cpuset_read_u64()
2582 return is_sched_load_balance(cs); in cpuset_read_u64()
2584 return is_memory_migrate(cs); in cpuset_read_u64()
2588 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2590 return is_spread_page(cs); in cpuset_read_u64()
2592 return is_spread_slab(cs); in cpuset_read_u64()
2598 return 0; in cpuset_read_u64()
2603 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2604 cpuset_filetype_t type = cft->private; in cpuset_read_s64()
2607 return cs->relax_domain_level; in cpuset_read_s64()
2613 return 0; in cpuset_read_s64()
2618 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2620 switch (cs->partition_root_state) { in sched_partition_show()
2631 return 0; in sched_partition_show()
2637 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2639 int retval = -ENODEV; in sched_partition_write()
2651 return -EINVAL; in sched_partition_write()
2653 css_get(&cs->css); in sched_partition_write()
2656 if (!is_cpuset_online(cs)) in sched_partition_write()
2659 retval = update_prstate(cs, val); in sched_partition_write()
2663 css_put(&cs->css); in sched_partition_write()
2828 * cpuset_css_alloc - allocate a cpuset css
2835 struct cpuset *cs; in cpuset_css_alloc() local
2840 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2841 if (!cs) in cpuset_css_alloc()
2842 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2844 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2845 kfree(cs); in cpuset_css_alloc()
2846 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2849 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2850 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2851 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2852 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2853 cs->relax_domain_level = -1; in cpuset_css_alloc()
2855 return &cs->css; in cpuset_css_alloc()
2860 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2861 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2866 return 0; in cpuset_css_online()
2871 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2873 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2875 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2881 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2882 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2883 cs->use_parent_ecpus = true; in cpuset_css_online()
2884 parent->child_ecpus_count++; in cpuset_css_online()
2888 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
2894 * histrical reasons - the flag may be specified during mount. in cpuset_css_online()
2897 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
2901 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
2914 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2915 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2916 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2917 cpumask_copy(cs->cpus_requested, parent->cpus_requested); in cpuset_css_online()
2918 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2923 return 0; in cpuset_css_online()
2939 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2944 if (is_partition_root(cs)) in cpuset_css_offline()
2945 update_prstate(cs, 0); in cpuset_css_offline()
2948 is_sched_load_balance(cs)) in cpuset_css_offline()
2949 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2951 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2952 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2954 cs->use_parent_ecpus = false; in cpuset_css_offline()
2955 parent->child_ecpus_count--; in cpuset_css_offline()
2959 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2967 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2969 free_cpuset(cs); in cpuset_css_free()
3000 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
3001 task->mems_allowed = current->mems_allowed; in cpuset_fork()
3022 * cpuset_init - initialize cpusets at system boot
3042 top_cpuset.relax_domain_level = -1; in cpuset_init()
3046 return 0; in cpuset_init()
3054 * cpuset to its next-highest non-empty parent.
3056 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3061 * Find its next-highest non-empty parent, (top cpuset in remove_tasks_in_empty_cpuset()
3064 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3065 while (cpumask_empty(parent->cpus_allowed) || in remove_tasks_in_empty_cpuset()
3066 nodes_empty(parent->mems_allowed)) in remove_tasks_in_empty_cpuset()
3069 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3071 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3077 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3084 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3085 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3086 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3087 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3094 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3095 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
3096 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3097 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3099 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3100 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3110 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3116 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3121 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3123 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3126 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3127 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3131 update_tasks_cpumask(cs); in hotplug_update_tasks()
3133 update_tasks_nodemask(cs); in hotplug_update_tasks()
3144 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3145 * @cs: cpuset in interest
3148 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3149 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3152 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3160 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3168 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3173 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3174 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3175 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3177 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3182 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3184 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3192 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3193 (parent->partition_root_state == PRS_ERROR))) { in cpuset_hotplug_update_tasks()
3194 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3196 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3197 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3199 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3208 if ((parent->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3210 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3213 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3225 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3226 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && in cpuset_hotplug_update_tasks()
3227 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3231 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3232 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3235 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3238 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3245 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3253 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3306 top_cpuset.nr_subparts_cpus = 0; in cpuset_hotplug_workfn()
3332 struct cpuset *cs; in cpuset_hotplug_workfn() local
3336 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3337 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3341 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3344 css_put(&cs->css); in cpuset_hotplug_workfn()
3391 * cpuset_init_smp - initialize cpus_allowed
3409 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); in cpuset_init_smp()
3414 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3415 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3419 * attached to the specified @tsk. Guaranteed to return some non-empty
3436 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3440 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3441 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3451 task_cs(tsk)->cpus_allowed : cpu_possible_mask); in cpuset_cpus_allowed_fallback()
3455 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
3457 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3459 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
3460 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
3464 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
3475 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
3479 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3480 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3483 * attached to the specified @tsk. Guaranteed to return some non-empty
3503 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3506 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3510 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
3514 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3519 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3521 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3522 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3523 return cs; in nearest_hardwall_ancestor()
3527 * cpuset_node_allowed - Can we allocate on a memory node?
3560 * in_interrupt - any node ok (current task context irrelevant)
3561 * GFP_ATOMIC - any node ok
3562 * tsk_is_oom_victim - any node ok
3563 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3564 * GFP_USER - only nodes in current tasks mems allowed ok.
3568 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3574 if (node_isset(node, current->mems_allowed)) in __cpuset_node_allowed()
3585 if (current->flags & PF_EXITING) /* Let dying task have memory */ in __cpuset_node_allowed()
3592 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3593 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()
3601 * cpuset_mem_spread_node() - On which node to begin search for a file page
3602 * cpuset_slab_spread_node() - On which node to begin search for a slab page
3617 * only set nodes in task->mems_allowed that are online. So it
3629 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
3634 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
3635 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
3636 node_random(¤t->mems_allowed); in cpuset_mem_spread_node()
3638 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
3643 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) in cpuset_slab_spread_node()
3644 current->cpuset_slab_spread_rotor = in cpuset_slab_spread_node()
3645 node_random(¤t->mems_allowed); in cpuset_slab_spread_node()
3647 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); in cpuset_slab_spread_node()
3653 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3666 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
3670 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3681 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
3685 nodemask_pr_args(¤t->mems_allowed)); in cpuset_print_current_mems_allowed()
3699 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3710 * Display to user space in the per-cpuset read-only file
3719 fmeter_markevent(&task_cs(current)->fmeter); in __cpuset_memory_pressure_bump()
3726 * - Print tasks cpuset path into seq_file.
3727 * - Used for /proc/<pid>/cpuset.
3728 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3729 * doesn't really matter if tsk->cpuset changes after we read it,
3740 retval = -ENOMEM; in proc_cpuset_show()
3746 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, in proc_cpuset_show()
3747 current->nsproxy->cgroup_ns); in proc_cpuset_show()
3750 retval = -ENAMETOOLONG; in proc_cpuset_show()
3751 if (retval < 0) in proc_cpuset_show()
3755 retval = 0; in proc_cpuset_show()
3767 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
3769 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()