• Home
  • Raw
  • Download

Lines Matching full:cs

214 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
216 return css_cs(cs->css.parent); in parent_cs()
221 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
223 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
228 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
230 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
246 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
248 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
251 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
253 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
256 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
258 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
261 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
263 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
266 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
268 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
271 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
273 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
276 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
278 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
281 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
283 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
286 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
288 return cs->partition_root_state > 0; in is_partition_root()
409 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
411 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
412 cs = parent_cs(cs); in guarantee_online_cpus()
413 if (unlikely(!cs)) { in guarantee_online_cpus()
425 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
439 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
441 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
442 cs = parent_cs(cs); in guarantee_online_mems()
443 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
451 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
454 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
459 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
483 * @cs: the cpuset that have cpumasks to be allocated.
489 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
493 if (cs) { in alloc_cpumasks()
494 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
495 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
496 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
497 pmask4 = &cs->cpus_requested; in alloc_cpumasks()
513 if (cs && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) in alloc_cpumasks()
529 * @cs: the cpuset that have cpumasks to be free.
532 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
534 if (cs) { in free_cpumasks()
535 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
536 free_cpumask_var(cs->cpus_requested); in free_cpumasks()
537 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
538 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
549 * @cs: the cpuset that the trial cpuset duplicates
551 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
555 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
564 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
565 cpumask_copy(trial->cpus_requested, cs->cpus_requested); in alloc_trial_cpuset()
566 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
572 * @cs: the cpuset to be freed
574 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
576 free_cpumasks(cs, NULL); in free_cpuset()
577 kfree(cs); in free_cpuset()
948 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
953 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
956 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
966 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
981 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
983 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
988 css_get(&cs->css); in dl_rebuild_rd_accounting()
992 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
995 css_put(&cs->css); in dl_rebuild_rd_accounting()
1026 struct cpuset *cs; in rebuild_sched_domains_locked() local
1052 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1053 if (!is_partition_root(cs)) { in rebuild_sched_domains_locked()
1057 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1089 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1091 * Iterate through each task of @cs updating its cpus_allowed to the
1095 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1099 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1101 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1109 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1117 * @cs: the cpuset the need to recompute the new effective_cpus mask
1126 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1131 cpumask_and(new_cpus, new_cpus, cs->cpus_requested); in compute_effective_cpumask()
1134 cpumask_and(new_cpus, cs->cpus_requested, in compute_effective_cpumask()
1360 * @cs: the cpuset to consider
1370 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1378 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1411 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1416 if ((cp != cs) && new_prs) { in update_cpumasks_hier()
1521 * @cs: Current cpuset
1524 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1542 if (sibling == cs) in update_sibling_cpumasks()
1559 * @cs: the cpuset to consider
1563 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1570 if (cs == &top_cpuset) in update_cpumask()
1594 if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested)) in update_cpumask()
1597 retval = validate_change(cs, trialcs); in update_cpumask()
1611 if (cs->partition_root_state) { in update_cpumask()
1615 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1621 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1622 cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); in update_cpumask()
1627 if (cs->nr_subparts_cpus) { in update_cpumask()
1628 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); in update_cpumask()
1629 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1633 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1635 if (cs->partition_root_state) { in update_cpumask()
1636 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1643 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1728 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1730 * Iterate through each task of @cs updating its mems_allowed to the
1734 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1740 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1742 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1754 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1765 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1767 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1769 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1777 * cs->old_mems_allowed. in update_tasks_nodemask()
1779 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1787 * @cs: the cpuset to consider
1797 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1803 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1849 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1853 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1862 if (cs == &top_cpuset) { in update_nodemask()
1887 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1891 retval = validate_change(cs, trialcs); in update_nodemask()
1896 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1900 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1916 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1923 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1924 cs->relax_domain_level = val; in update_relax_domain_level()
1925 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1926 is_sched_load_balance(cs)) in update_relax_domain_level()
1935 * @cs: the cpuset in which each task's spread flags needs to be changed
1937 * Iterate through each task of @cs updating its spread flags. As this
1941 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1946 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1948 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1955 * cs: the cpuset to update
1961 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1969 trialcs = alloc_trial_cpuset(cs); in update_flag()
1978 err = validate_change(cs, trialcs); in update_flag()
1982 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1985 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1986 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1989 cs->flags = trialcs->flags; in update_flag()
1996 update_tasks_flags(cs); in update_flag()
2004 * cs: the cpuset to update
2009 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2011 int err, old_prs = cs->partition_root_state; in update_prstate()
2012 struct cpuset *parent = parent_cs(cs); in update_prstate()
2035 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
2038 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
2042 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2045 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2054 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2059 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
2065 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2071 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2077 cs->partition_root_state = new_prs; in update_prstate()
2188 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
2190 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2191 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2198 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
2205 cs = css_cs(css); in cpuset_can_attach()
2212 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2224 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2225 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2229 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2232 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2233 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2236 reset_migrate_dl_data(cs); in cpuset_can_attach()
2241 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
2243 reset_migrate_dl_data(cs); in cpuset_can_attach()
2253 cs->attach_in_progress++; in cpuset_can_attach()
2263 struct cpuset *cs; in cpuset_cancel_attach() local
2266 cs = css_cs(css); in cpuset_cancel_attach()
2269 cs->attach_in_progress--; in cpuset_cancel_attach()
2270 if (!cs->attach_in_progress) in cpuset_cancel_attach()
2273 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
2274 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
2276 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
2277 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
2297 struct cpuset *cs; in cpuset_attach() local
2301 cs = css_cs(css); in cpuset_attach()
2307 if (cs == &top_cpuset) in cpuset_attach()
2310 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2312 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2322 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2329 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2344 if (is_memory_migrate(cs)) in cpuset_attach()
2352 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2354 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
2355 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
2356 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
2357 reset_migrate_dl_data(cs); in cpuset_attach()
2360 cs->attach_in_progress--; in cpuset_attach()
2361 if (!cs->attach_in_progress) in cpuset_attach()
2391 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2397 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2404 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2407 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2410 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2413 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2416 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2422 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2425 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2440 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2446 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2451 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2469 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2476 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2481 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2490 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2494 css_get(&cs->css); in cpuset_write_resmask()
2500 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2503 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2511 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2514 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2526 css_put(&cs->css); in cpuset_write_resmask()
2541 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2549 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested)); in cpuset_common_seq_show()
2552 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2555 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2558 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2561 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2573 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2577 return is_cpu_exclusive(cs); in cpuset_read_u64()
2579 return is_mem_exclusive(cs); in cpuset_read_u64()
2581 return is_mem_hardwall(cs); in cpuset_read_u64()
2583 return is_sched_load_balance(cs); in cpuset_read_u64()
2585 return is_memory_migrate(cs); in cpuset_read_u64()
2589 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2591 return is_spread_page(cs); in cpuset_read_u64()
2593 return is_spread_slab(cs); in cpuset_read_u64()
2604 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2608 return cs->relax_domain_level; in cpuset_read_s64()
2619 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2621 switch (cs->partition_root_state) { in sched_partition_show()
2638 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2654 css_get(&cs->css); in sched_partition_write()
2657 if (!is_cpuset_online(cs)) in sched_partition_write()
2660 retval = update_prstate(cs, val); in sched_partition_write()
2664 css_put(&cs->css); in sched_partition_write()
2836 struct cpuset *cs; in cpuset_css_alloc() local
2841 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2842 if (!cs) in cpuset_css_alloc()
2845 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2846 kfree(cs); in cpuset_css_alloc()
2850 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2851 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2852 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2853 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2854 cs->relax_domain_level = -1; in cpuset_css_alloc()
2856 return &cs->css; in cpuset_css_alloc()
2861 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2862 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2872 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2874 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2876 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2882 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2883 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2884 cs->use_parent_ecpus = true; in cpuset_css_online()
2915 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2916 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2917 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2918 cpumask_copy(cs->cpus_requested, parent->cpus_requested); in cpuset_css_online()
2919 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2940 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2945 if (is_partition_root(cs)) in cpuset_css_offline()
2946 update_prstate(cs, 0); in cpuset_css_offline()
2949 is_sched_load_balance(cs)) in cpuset_css_offline()
2950 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2952 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2953 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2955 cs->use_parent_ecpus = false; in cpuset_css_offline()
2960 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2968 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2970 free_cpuset(cs); in cpuset_css_free()
3057 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3065 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3070 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3072 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3078 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3085 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3086 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3087 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3088 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3095 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3096 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
3097 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3098 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3100 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3101 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3111 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3117 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3122 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3124 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3127 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3128 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3132 update_tasks_cpumask(cs); in hotplug_update_tasks()
3134 update_tasks_nodemask(cs); in hotplug_update_tasks()
3146 * @cs: cpuset in interest
3149 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3150 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3153 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3161 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3169 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3174 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3175 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3176 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3178 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3183 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3185 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3193 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3195 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3197 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3198 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3200 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3211 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3214 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3226 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3228 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3232 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3233 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3236 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3239 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3333 struct cpuset *cs; in cpuset_hotplug_workfn() local
3337 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3338 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3342 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3345 css_put(&cs->css); in cpuset_hotplug_workfn()
3458 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3520 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3522 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3523 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3524 return cs; in nearest_hardwall_ancestor()
3569 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3593 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3594 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()