• Home
  • Raw
  • Download

Lines Matching +full:cs +full:- +full:1

7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
59 #include <linux/backing-dev.h>
89 * The user-configured masks can only be changed by writing to
103 * The user-configured masks are always the same with effective masks.
106 /* user-configured CPUs and Memory Nodes allow to tasks */
116 * CPUs allocated to child sub-partitions (default hierarchy only)
117 * - CPUs granted by the parent = effective_cpus U subparts_cpus
118 * - effective_cpus and subparts_cpus are mutually exclusive.
128 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
129 * - A new cpuset's old_mems_allowed is initialized when some
131 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
141 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
159 * use_parent_ecpus - set if using parent's effective_cpus
160 * child_ecpus_count - # of children with use_parent_ecpus set
169 * 0 - not a partition root
171 * 1 - partition root
173 * -1 - invalid partition root
181 #define PRS_ENABLED 1
182 #define PRS_ERROR -1
204 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
206 return css_cs(cs->css.parent); in parent_cs()
222 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
224 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
227 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
229 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
232 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
234 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
237 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
239 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
242 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
244 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
247 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
249 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
252 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
254 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
257 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
259 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
262 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
264 return cs->partition_root_state > 0; in is_partition_root()
268 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
269 (1 << CS_MEM_EXCLUSIVE)),
274 * cpuset_for_each_child - traverse online children of a cpuset
283 css_for_each_child((pos_css), &(parent_cs)->css) \
287 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
298 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
302 * There are two global locks guarding cpuset structures - cpuset_mutex and
322 * If a task is only holding callback_lock, then it has read-only
330 * small pieces of code, such as when reading out possibly multi-word
372 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
380 * One way or another, we guarantee to return some non-empty subset
385 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
387 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
388 cs = parent_cs(cs); in guarantee_online_cpus()
389 if (unlikely(!cs)) { in guarantee_online_cpus()
401 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
410 * One way or another, we guarantee to return some non-empty subset
415 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
417 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
418 cs = parent_cs(cs); in guarantee_online_mems()
419 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
427 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
430 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
435 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
442 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
451 return cpumask_subset(p->cpus_requested, q->cpus_requested) && in is_cpuset_subset()
452 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset()
458 * alloc_cpumasks - allocate three cpumasks for cpuset
459 * @cs: the cpuset that have cpumasks to be allocated.
461 * Return: 0 if successful, -ENOMEM otherwise.
463 * Only one of the two input arguments should be non-NULL.
465 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
469 if (cs) { in alloc_cpumasks()
470 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
471 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
472 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
473 pmask4 = &cs->cpus_requested; in alloc_cpumasks()
475 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
476 pmask2 = &tmp->addmask; in alloc_cpumasks()
477 pmask3 = &tmp->delmask; in alloc_cpumasks()
481 return -ENOMEM; in alloc_cpumasks()
489 if (cs && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) in alloc_cpumasks()
500 return -ENOMEM; in alloc_cpumasks()
504 * free_cpumasks - free cpumasks in a tmpmasks structure
505 * @cs: the cpuset that have cpumasks to be free.
508 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
510 if (cs) { in free_cpumasks()
511 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
512 free_cpumask_var(cs->cpus_requested); in free_cpumasks()
513 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
514 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
517 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
518 free_cpumask_var(tmp->addmask); in free_cpumasks()
519 free_cpumask_var(tmp->delmask); in free_cpumasks()
524 * alloc_trial_cpuset - allocate a trial cpuset
525 * @cs: the cpuset that the trial cpuset duplicates
527 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
531 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
540 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
541 cpumask_copy(trial->cpus_requested, cs->cpus_requested); in alloc_trial_cpuset()
542 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
547 * free_cpuset - free the cpuset
548 * @cs: the cpuset to be freed
550 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
552 free_cpumasks(cs, NULL); in free_cpuset()
553 kfree(cs); in free_cpuset()
557 * validate_change() - Used to validate that any proposed cpuset change
565 * 'cur' is the address of an actual, in-use cpuset. Operations
573 * Return 0 if valid, -errno if not.
585 ret = -EBUSY; in validate_change()
598 ret = -EACCES; in validate_change()
606 ret = -EINVAL; in validate_change()
610 cpumask_intersects(trial->cpus_requested, in validate_change()
611 c->cpus_requested)) in validate_change()
615 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
620 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
623 ret = -ENOSPC; in validate_change()
624 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
625 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
626 cpumask_empty(trial->cpus_allowed)) in validate_change()
628 if (!nodes_empty(cur->mems_allowed) && in validate_change()
629 nodes_empty(trial->mems_allowed)) in validate_change()
637 ret = -EBUSY; in validate_change()
639 !cpuset_cpumask_can_shrink(cur->cpus_allowed, in validate_change()
640 trial->cpus_allowed)) in validate_change()
656 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
662 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
663 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
676 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
690 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
691 return static_key_count(&cpusets_enabled_key.key) + 1; in nr_cpusets()
698 * A 'partial partition' is a set of non-overlapping subsets whose
705 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
716 * cp - cpuset pointer, used (together with pos_css) to perform a
717 * top-down scan of all cpusets. For our purposes, rebuilding
720 * csa - (for CpuSet Array) Array of pointers to all the cpusets
727 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
750 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
767 ndoms = 1; in generate_sched_domains()
802 * If root is load-balancing, we can skip @cp if it in generate_sched_domains()
805 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
807 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
812 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) in generate_sched_domains()
816 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
826 csa[i]->pn = i; in generate_sched_domains()
833 int apn = a->pn; in generate_sched_domains()
837 int bpn = b->pn; in generate_sched_domains()
843 if (c->pn == bpn) in generate_sched_domains()
844 c->pn = apn; in generate_sched_domains()
846 ndoms--; /* one less element */ in generate_sched_domains()
870 int apn = a->pn; in generate_sched_domains()
884 warnings--; in generate_sched_domains()
895 if (apn == b->pn) { in generate_sched_domains()
896 cpumask_or(dp, dp, b->effective_cpus); in generate_sched_domains()
902 b->pn = -1; in generate_sched_domains()
917 ndoms = 1; in generate_sched_domains()
924 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
929 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
939 struct cpuset *cs = NULL; in rebuild_root_domains() local
954 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
956 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
961 css_get(&cs->css); in rebuild_root_domains()
965 update_tasks_root_domain(cs); in rebuild_root_domains()
968 css_put(&cs->css); in rebuild_root_domains()
986 * If the flag 'sched_load_balance' of any cpuset with non-empty
988 * which has that flag enabled, or if any cpuset with a non-empty
999 struct cpuset *cs; in rebuild_sched_domains_locked() local
1025 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1026 if (!is_partition_root(cs)) { in rebuild_sched_domains_locked()
1030 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1061 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1062 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1064 * Iterate through each task of @cs updating its cpus_allowed to the
1068 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1073 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1075 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1080 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1082 * @cs: the cpuset the need to recompute the new effective_cpus mask
1091 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1093 if (parent->nr_subparts_cpus) { in compute_effective_cpumask()
1094 cpumask_or(new_cpus, parent->effective_cpus, in compute_effective_cpumask()
1095 parent->subparts_cpus); in compute_effective_cpumask()
1096 cpumask_and(new_cpus, new_cpus, cs->cpus_requested); in compute_effective_cpumask()
1099 cpumask_and(new_cpus, cs->cpus_requested, in compute_effective_cpumask()
1100 parent->effective_cpus); in compute_effective_cpumask()
1114 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1119 * Return: 0, 1 or an error code
1121 * For partcmd_enable, the cpuset is being transformed from a non-partition
1128 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1137 * be granted by the parent. The function will return 1 if changes to
1139 * Error code should only be returned when newmask is non-NULL.
1174 (!newmask && cpumask_empty(cpuset->cpus_allowed))) in update_parent_subparts_cpumask()
1175 return -EINVAL; in update_parent_subparts_cpumask()
1181 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) in update_parent_subparts_cpumask()
1182 return -EBUSY; in update_parent_subparts_cpumask()
1190 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || in update_parent_subparts_cpumask()
1191 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) in update_parent_subparts_cpumask()
1192 return -EINVAL; in update_parent_subparts_cpumask()
1198 new_prs = cpuset->partition_root_state; in update_parent_subparts_cpumask()
1200 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); in update_parent_subparts_cpumask()
1203 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1204 parent->subparts_cpus); in update_parent_subparts_cpumask()
1209 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus in update_parent_subparts_cpumask()
1210 * addmask = newmask & parent->effective_cpus in update_parent_subparts_cpumask()
1211 * & ~parent->subparts_cpus in update_parent_subparts_cpumask()
1213 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1214 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_subparts_cpumask()
1215 parent->subparts_cpus); in update_parent_subparts_cpumask()
1217 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1218 adding = cpumask_andnot(tmp->addmask, tmp->addmask, in update_parent_subparts_cpumask()
1219 parent->subparts_cpus); in update_parent_subparts_cpumask()
1224 cpumask_equal(parent->effective_cpus, tmp->addmask)) { in update_parent_subparts_cpumask()
1226 return -EINVAL; in update_parent_subparts_cpumask()
1232 if (!cpumask_and(tmp->addmask, tmp->delmask, in update_parent_subparts_cpumask()
1234 return -EINVAL; in update_parent_subparts_cpumask()
1235 cpumask_copy(tmp->addmask, parent->effective_cpus); in update_parent_subparts_cpumask()
1241 * addmask = cpus_allowed & parent->effective_cpus in update_parent_subparts_cpumask()
1244 * pre-shrunk in case there is a change in the cpu list. in update_parent_subparts_cpumask()
1247 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1248 parent->effective_cpus); in update_parent_subparts_cpumask()
1249 part_error = cpumask_equal(tmp->addmask, in update_parent_subparts_cpumask()
1250 parent->effective_cpus); in update_parent_subparts_cpumask()
1254 int prev_prs = cpuset->partition_root_state; in update_parent_subparts_cpumask()
1260 switch (cpuset->partition_root_state) { in update_parent_subparts_cpumask()
1284 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, in update_parent_subparts_cpumask()
1285 parent->subparts_cpus); in update_parent_subparts_cpumask()
1288 if (!adding && !deleting && (new_prs == cpuset->partition_root_state)) in update_parent_subparts_cpumask()
1298 cpumask_or(parent->subparts_cpus, in update_parent_subparts_cpumask()
1299 parent->subparts_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1300 cpumask_andnot(parent->effective_cpus, in update_parent_subparts_cpumask()
1301 parent->effective_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1304 cpumask_andnot(parent->subparts_cpus, in update_parent_subparts_cpumask()
1305 parent->subparts_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1309 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); in update_parent_subparts_cpumask()
1310 cpumask_or(parent->effective_cpus, in update_parent_subparts_cpumask()
1311 parent->effective_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1314 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); in update_parent_subparts_cpumask()
1316 if (cpuset->partition_root_state != new_prs) in update_parent_subparts_cpumask()
1317 cpuset->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1324 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1325 * @cs: the cpuset to consider
1335 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1343 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1346 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
1352 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
1353 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
1354 if (!cp->use_parent_ecpus) { in update_cpumasks_hier()
1355 cp->use_parent_ecpus = true; in update_cpumasks_hier()
1356 parent->child_ecpus_count++; in update_cpumasks_hier()
1358 } else if (cp->use_parent_ecpus) { in update_cpumasks_hier()
1359 cp->use_parent_ecpus = false; in update_cpumasks_hier()
1360 WARN_ON_ONCE(!parent->child_ecpus_count); in update_cpumasks_hier()
1361 parent->child_ecpus_count--; in update_cpumasks_hier()
1368 if (!cp->partition_root_state && in update_cpumasks_hier()
1369 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { in update_cpumasks_hier()
1376 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1380 new_prs = cp->partition_root_state; in update_cpumasks_hier()
1381 if ((cp != cs) && new_prs) { in update_cpumasks_hier()
1382 switch (parent->partition_root_state) { in update_cpumasks_hier()
1389 WARN_ON_ONCE(cp->partition_root_state in update_cpumasks_hier()
1400 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); in update_cpumasks_hier()
1417 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
1423 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1424 if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { in update_cpumasks_hier()
1425 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1426 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1427 } else if (cp->nr_subparts_cpus) { in update_cpumasks_hier()
1433 * becomes empty. we clear cp->nr_subparts_cpus and in update_cpumasks_hier()
1437 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, in update_cpumasks_hier()
1438 cp->subparts_cpus); in update_cpumasks_hier()
1439 if (cpumask_empty(cp->effective_cpus)) { in update_cpumasks_hier()
1440 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1441 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1442 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1443 } else if (!cpumask_subset(cp->subparts_cpus, in update_cpumasks_hier()
1444 tmp->new_cpus)) { in update_cpumasks_hier()
1445 cpumask_andnot(cp->subparts_cpus, in update_cpumasks_hier()
1446 cp->subparts_cpus, tmp->new_cpus); in update_cpumasks_hier()
1447 cp->nr_subparts_cpus in update_cpumasks_hier()
1448 = cpumask_weight(cp->subparts_cpus); in update_cpumasks_hier()
1452 if (new_prs != cp->partition_root_state) in update_cpumasks_hier()
1453 cp->partition_root_state = new_prs; in update_cpumasks_hier()
1458 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
1463 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
1468 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
1475 css_put(&cp->css); in update_cpumasks_hier()
1484 * update_sibling_cpumasks - Update siblings cpumasks
1486 * @cs: Current cpuset
1489 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1502 if (sibling == cs) in update_sibling_cpumasks()
1504 if (!sibling->use_parent_ecpus) in update_sibling_cpumasks()
1513 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1514 * @cs: the cpuset to consider
1518 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1524 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ in update_cpumask()
1525 if (cs == &top_cpuset) in update_cpumask()
1526 return -EACCES; in update_cpumask()
1535 cpumask_clear(trialcs->cpus_requested); in update_cpumask()
1537 retval = cpulist_parse(buf, trialcs->cpus_requested); in update_cpumask()
1542 if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask)) in update_cpumask()
1543 return -EINVAL; in update_cpumask()
1545 cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, in update_cpumask()
1549 if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) in update_cpumask()
1552 retval = validate_change(cs, trialcs); in update_cpumask()
1561 tmp.addmask = trialcs->subparts_cpus; in update_cpumask()
1562 tmp.delmask = trialcs->effective_cpus; in update_cpumask()
1563 tmp.new_cpus = trialcs->cpus_allowed; in update_cpumask()
1566 if (cs->partition_root_state) { in update_cpumask()
1568 if (cpumask_empty(trialcs->cpus_allowed)) in update_cpumask()
1569 return -EINVAL; in update_cpumask()
1570 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1571 trialcs->cpus_allowed, &tmp) < 0) in update_cpumask()
1572 return -EINVAL; in update_cpumask()
1576 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1577 cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); in update_cpumask()
1582 if (cs->nr_subparts_cpus) { in update_cpumask()
1583 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); in update_cpumask()
1584 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1588 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1590 if (cs->partition_root_state) { in update_cpumask()
1591 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1597 if (parent->child_ecpus_count) in update_cpumask()
1598 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1624 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
1625 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
1636 mwork->mm = mm; in cpuset_migrate_mm()
1637 mwork->from = *from; in cpuset_migrate_mm()
1638 mwork->to = *to; in cpuset_migrate_mm()
1639 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
1640 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
1652 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1656 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1667 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1669 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
1671 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
1673 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
1682 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1683 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1685 * Iterate through each task of @cs updating its mems_allowed to the
1689 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1695 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1697 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1701 * take while holding tasklist_lock. Forks can happen - the in update_tasks_nodemask()
1709 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1720 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1722 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1724 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1732 * cs->old_mems_allowed. in update_tasks_nodemask()
1734 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1741 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1742 * @cs: the cpuset to consider
1752 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1758 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1761 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
1768 *new_mems = parent->effective_mems; in update_nodemasks_hier()
1771 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
1776 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
1781 cp->effective_mems = *new_mems; in update_nodemasks_hier()
1785 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
1790 css_put(&cp->css); in update_nodemasks_hier()
1804 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1805 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
1808 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1815 * it's read-only in update_nodemask()
1817 if (cs == &top_cpuset) { in update_nodemask()
1818 retval = -EACCES; in update_nodemask()
1829 nodes_clear(trialcs->mems_allowed); in update_nodemask()
1831 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
1835 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
1837 retval = -EINVAL; in update_nodemask()
1842 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1843 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
1846 retval = validate_change(cs, trialcs); in update_nodemask()
1851 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1854 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
1855 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1871 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1874 if (val < -1 || val >= sched_domain_level_max) in update_relax_domain_level()
1875 return -EINVAL; in update_relax_domain_level()
1878 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1879 cs->relax_domain_level = val; in update_relax_domain_level()
1880 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1881 is_sched_load_balance(cs)) in update_relax_domain_level()
1889 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1890 * @cs: the cpuset in which each task's spread flags needs to be changed
1892 * Iterate through each task of @cs updating its spread flags. As this
1896 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1901 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1903 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1908 * update_flag - read a 0 or a 1 in a file and update associated flag
1910 * cs: the cpuset to update
1916 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1924 trialcs = alloc_trial_cpuset(cs); in update_flag()
1926 return -ENOMEM; in update_flag()
1929 set_bit(bit, &trialcs->flags); in update_flag()
1931 clear_bit(bit, &trialcs->flags); in update_flag()
1933 err = validate_change(cs, trialcs); in update_flag()
1937 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1940 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1941 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1944 cs->flags = trialcs->flags; in update_flag()
1947 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) in update_flag()
1951 update_tasks_flags(cs); in update_flag()
1958 * update_prstate - update partititon_root_state
1959 * cs: the cpuset to update
1964 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
1966 int err, old_prs = cs->partition_root_state; in update_prstate()
1967 struct cpuset *parent = parent_cs(cs); in update_prstate()
1978 return -EINVAL; in update_prstate()
1981 return -ENOMEM; in update_prstate()
1983 err = -EINVAL; in update_prstate()
1990 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
1993 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
1997 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2000 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2009 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2014 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
2020 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2030 if (parent->child_ecpus_count) in update_prstate()
2031 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2037 cs->partition_root_state = new_prs; in update_prstate()
2046 * Frequency meter - How fast is some event occurring?
2050 * fmeter_init() - initialize a frequency meter.
2051 * fmeter_markevent() - called each time the event happens.
2052 * fmeter_getrate() - returns the recent rate of such events.
2053 * fmeter_update() - internal routine used to update fmeter.
2060 * The filter is single-pole low-pass recursive (IIR). The time unit
2061 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2064 * With an FM_COEF of 933, and a time base of 1 second, the filter
2065 * has a half-life of 10 seconds, meaning that if the events quit
2080 * per msec it maxes out at values just under 1,000,000. At constant
2090 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2098 fmp->cnt = 0; in fmeter_init()
2099 fmp->val = 0; in fmeter_init()
2100 fmp->time = 0; in fmeter_init()
2101 spin_lock_init(&fmp->lock); in fmeter_init()
2104 /* Internal meter update - process cnt events and update value */
2111 ticks = now - fmp->time; in fmeter_update()
2117 while (ticks-- > 0) in fmeter_update()
2118 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; in fmeter_update()
2119 fmp->time = now; in fmeter_update()
2121 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; in fmeter_update()
2122 fmp->cnt = 0; in fmeter_update()
2128 spin_lock(&fmp->lock); in fmeter_markevent()
2130 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); in fmeter_markevent()
2131 spin_unlock(&fmp->lock); in fmeter_markevent()
2139 spin_lock(&fmp->lock); in fmeter_getrate()
2141 val = fmp->val; in fmeter_getrate()
2142 spin_unlock(&fmp->lock); in fmeter_getrate()
2152 struct cpuset *cs; in cpuset_can_attach() local
2158 cs = css_cs(css); in cpuset_can_attach()
2163 ret = -ENOSPC; in cpuset_can_attach()
2165 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2169 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
2181 cs->attach_in_progress++; in cpuset_can_attach()
2195 css_cs(css)->attach_in_progress--; in cpuset_cancel_attach()
2213 struct cpuset *cs; in cpuset_attach() local
2217 cs = css_cs(css); in cpuset_attach()
2222 if (cs == &top_cpuset) in cpuset_attach()
2225 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2227 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2237 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2244 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2259 if (is_memory_migrate(cs)) in cpuset_attach()
2260 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
2267 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2269 cs->attach_in_progress--; in cpuset_attach()
2270 if (!cs->attach_in_progress) in cpuset_attach()
2300 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2301 cpuset_filetype_t type = cft->private; in cpuset_write_u64()
2306 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2307 retval = -ENODEV; in cpuset_write_u64()
2313 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2316 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2319 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2322 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2325 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2331 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2334 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2337 retval = -EINVAL; in cpuset_write_u64()
2349 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2350 cpuset_filetype_t type = cft->private; in cpuset_write_s64()
2351 int retval = -ENODEV; in cpuset_write_s64()
2355 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2360 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2363 retval = -EINVAL; in cpuset_write_s64()
2378 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2380 int retval = -ENODEV; in cpuset_write_resmask()
2385 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2390 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2399 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2403 css_get(&cs->css); in cpuset_write_resmask()
2404 kernfs_break_active_protection(of->kn); in cpuset_write_resmask()
2409 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2412 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2414 retval = -ENOMEM; in cpuset_write_resmask()
2418 switch (of_cft(of)->private) { in cpuset_write_resmask()
2420 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2423 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2426 retval = -EINVAL; in cpuset_write_resmask()
2434 kernfs_unbreak_active_protection(of->kn); in cpuset_write_resmask()
2435 css_put(&cs->css); in cpuset_write_resmask()
2450 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2451 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
2458 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); in cpuset_common_seq_show()
2461 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2464 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2467 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2470 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2473 ret = -EINVAL; in cpuset_common_seq_show()
2482 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2483 cpuset_filetype_t type = cft->private; in cpuset_read_u64()
2486 return is_cpu_exclusive(cs); in cpuset_read_u64()
2488 return is_mem_exclusive(cs); in cpuset_read_u64()
2490 return is_mem_hardwall(cs); in cpuset_read_u64()
2492 return is_sched_load_balance(cs); in cpuset_read_u64()
2494 return is_memory_migrate(cs); in cpuset_read_u64()
2498 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2500 return is_spread_page(cs); in cpuset_read_u64()
2502 return is_spread_slab(cs); in cpuset_read_u64()
2513 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2514 cpuset_filetype_t type = cft->private; in cpuset_read_s64()
2517 return cs->relax_domain_level; in cpuset_read_s64()
2528 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2530 switch (cs->partition_root_state) { in sched_partition_show()
2547 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2549 int retval = -ENODEV; in sched_partition_write()
2561 return -EINVAL; in sched_partition_write()
2563 css_get(&cs->css); in sched_partition_write()
2566 if (!is_cpuset_online(cs)) in sched_partition_write()
2569 retval = update_prstate(cs, val); in sched_partition_write()
2573 css_put(&cs->css); in sched_partition_write()
2738 * cpuset_css_alloc - allocate a cpuset css
2745 struct cpuset *cs; in cpuset_css_alloc() local
2750 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2751 if (!cs) in cpuset_css_alloc()
2752 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2754 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2755 kfree(cs); in cpuset_css_alloc()
2756 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
2759 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2760 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2761 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2762 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2763 cs->relax_domain_level = -1; in cpuset_css_alloc()
2765 return &cs->css; in cpuset_css_alloc()
2770 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2771 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2781 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2783 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2785 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2791 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2792 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2793 cs->use_parent_ecpus = true; in cpuset_css_online()
2794 parent->child_ecpus_count++; in cpuset_css_online()
2798 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
2804 * histrical reasons - the flag may be specified during mount. in cpuset_css_online()
2807 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
2811 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
2824 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2825 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2826 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2827 cpumask_copy(cs->cpus_requested, parent->cpus_requested); in cpuset_css_online()
2828 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2849 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2854 if (is_partition_root(cs)) in cpuset_css_offline()
2855 update_prstate(cs, 0); in cpuset_css_offline()
2858 is_sched_load_balance(cs)) in cpuset_css_offline()
2859 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2861 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2862 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2864 cs->use_parent_ecpus = false; in cpuset_css_offline()
2865 parent->child_ecpus_count--; in cpuset_css_offline()
2869 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2877 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2879 free_cpuset(cs); in cpuset_css_free()
2910 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
2911 task->mems_allowed = current->mems_allowed; in cpuset_fork()
2932 * cpuset_init - initialize cpusets at system boot
2954 top_cpuset.relax_domain_level = -1; in cpuset_init()
2966 * cpuset to its next-highest non-empty parent.
2968 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2973 * Find its next-highest non-empty parent, (top cpuset in remove_tasks_in_empty_cpuset()
2976 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2977 while (cpumask_empty(parent->cpus_allowed) || in remove_tasks_in_empty_cpuset()
2978 nodes_empty(parent->mems_allowed)) in remove_tasks_in_empty_cpuset()
2981 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2983 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2989 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2996 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2997 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2998 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2999 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3006 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3007 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
3008 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3009 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3011 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3012 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3022 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3028 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3033 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3035 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3038 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3039 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3043 update_tasks_cpumask(cs); in hotplug_update_tasks()
3045 update_tasks_nodemask(cs); in hotplug_update_tasks()
3056 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3057 * @cs: cpuset in interest
3060 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3061 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3064 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3072 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3080 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3085 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3086 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3087 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3089 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3094 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3096 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3104 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3105 (parent->partition_root_state == PRS_ERROR))) { in cpuset_hotplug_update_tasks()
3106 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3108 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3109 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3111 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3120 if ((parent->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3122 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3125 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3137 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3138 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && in cpuset_hotplug_update_tasks()
3139 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3143 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3144 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3147 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3150 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3157 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3165 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3244 struct cpuset *cs; in cpuset_hotplug_workfn() local
3248 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3249 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3253 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3256 css_put(&cs->css); in cpuset_hotplug_workfn()
3303 * cpuset_init_smp - initialize cpus_allowed
3323 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3324 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3328 * attached to the specified @tsk. Guaranteed to return some non-empty
3345 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3349 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3350 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3360 task_cs(tsk)->cpus_allowed : cpu_possible_mask); in cpuset_cpus_allowed_fallback()
3364 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
3366 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3368 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
3369 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
3373 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
3384 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
3388 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3389 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3392 * attached to the specified @tsk. Guaranteed to return some non-empty
3412 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3415 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3419 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
3423 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3428 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3430 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3431 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3432 return cs; in nearest_hardwall_ancestor()
3436 * cpuset_node_allowed - Can we allocate on a memory node?
3469 * in_interrupt - any node ok (current task context irrelevant)
3470 * GFP_ATOMIC - any node ok
3471 * tsk_is_oom_victim - any node ok
3472 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3473 * GFP_USER - only nodes in current tasks mems allowed ok.
3477 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3483 if (node_isset(node, current->mems_allowed)) in __cpuset_node_allowed()
3494 if (current->flags & PF_EXITING) /* Let dying task have memory */ in __cpuset_node_allowed()
3501 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3502 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()
3510 * cpuset_mem_spread_node() - On which node to begin search for a file page
3511 * cpuset_slab_spread_node() - On which node to begin search for a slab page
3526 * only set nodes in task->mems_allowed that are online. So it
3538 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
3543 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
3544 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
3545 node_random(&current->mems_allowed); in cpuset_mem_spread_node()
3547 return cpuset_spread_node(&current->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
3552 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) in cpuset_slab_spread_node()
3553 current->cpuset_slab_spread_rotor = in cpuset_slab_spread_node()
3554 node_random(&current->mems_allowed); in cpuset_slab_spread_node()
3556 return cpuset_spread_node(&current->cpuset_slab_spread_rotor); in cpuset_slab_spread_node()
3562 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3575 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
3579 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3590 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
3594 nodemask_pr_args(&current->mems_allowed)); in cpuset_print_current_mems_allowed()
3601 * this flag is enabled by writing "1" to the special
3608 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3619 * Display to user space in the per-cpuset read-only file
3628 fmeter_markevent(&task_cs(current)->fmeter); in __cpuset_memory_pressure_bump()
3635 * - Print tasks cpuset path into seq_file.
3636 * - Used for /proc/<pid>/cpuset.
3637 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3638 * doesn't really matter if tsk->cpuset changes after we read it,
3649 retval = -ENOMEM; in proc_cpuset_show()
3655 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, in proc_cpuset_show()
3656 current->nsproxy->cgroup_ns); in proc_cpuset_show()
3659 retval = -ENAMETOOLONG; in proc_cpuset_show()
3676 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
3678 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()