Lines Matching refs:cs
243 static inline struct cpuset *parent_cs(struct cpuset *cs) in parent_cs() argument
245 return css_cs(cs->css.parent); in parent_cs()
261 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
266 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
271 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
276 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
278 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
281 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
286 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
291 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
293 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
296 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
298 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
301 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
303 return cs->partition_root_state > 0; in is_partition_valid()
306 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
308 return cs->partition_root_state < 0; in is_partition_invalid()
314 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
316 if (is_partition_valid(cs)) in make_partition_invalid()
317 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
323 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
325 if (old_prs == cs->partition_root_state) in notify_partition_change()
327 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
330 if (is_partition_valid(cs)) in notify_partition_change()
331 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
464 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
470 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
472 if (!excluded_child && !cs->nr_subparts_cpus) in partition_is_populated()
473 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
476 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
505 struct cpuset *cs; in guarantee_online_cpus() local
511 cs = task_cs(tsk); in guarantee_online_cpus()
513 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
514 cs = parent_cs(cs); in guarantee_online_cpus()
515 if (unlikely(!cs)) { in guarantee_online_cpus()
526 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
546 cs = parent_cs(cs); in guarantee_online_mems()
547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
556 static void cpuset_update_task_spread_flags(struct cpuset *cs, in cpuset_update_task_spread_flags() argument
562 if (is_spread_page(cs)) in cpuset_update_task_spread_flags()
567 if (is_spread_slab(cs)) in cpuset_update_task_spread_flags()
597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
601 if (cs) { in alloc_cpumasks()
602 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
603 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
604 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
636 if (cs) { in free_cpumasks()
637 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
638 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
639 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
656 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
665 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
666 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
674 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
676 free_cpumasks(cs, NULL); in free_cpuset()
677 kfree(cs); in free_cpuset()
1069 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
1074 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
1084 struct cpuset *cs = NULL; in rebuild_root_domains() local
1099 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
1101 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
1106 css_get(&cs->css); in rebuild_root_domains()
1110 update_tasks_root_domain(cs); in rebuild_root_domains()
1113 css_put(&cs->css); in rebuild_root_domains()
1144 struct cpuset *cs; in rebuild_sched_domains_locked() local
1170 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1171 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1175 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1214 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in update_tasks_cpumask() argument
1218 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1220 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1229 cpumask_and(new_cpus, cs->effective_cpus, in update_tasks_cpumask()
1248 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1253 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1256 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1270 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1310 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, in update_parent_subparts_cpumask() argument
1314 struct cpuset *parent = parent_cs(cs); in update_parent_subparts_cpumask()
1332 (!newmask && cpumask_empty(cs->cpus_allowed))) in update_parent_subparts_cpumask()
1340 old_prs = new_prs = cs->partition_root_state; in update_parent_subparts_cpumask()
1346 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) in update_parent_subparts_cpumask()
1353 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) && in update_parent_subparts_cpumask()
1354 partition_is_populated(parent, cs)) in update_parent_subparts_cpumask()
1357 cpumask_copy(tmp->addmask, cs->cpus_allowed); in update_parent_subparts_cpumask()
1365 cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1375 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1391 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1405 partition_is_populated(parent, cs)) { in update_parent_subparts_cpumask()
1408 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1430 cpumask_and(tmp->addmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1435 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || in update_parent_subparts_cpumask()
1438 partition_is_populated(parent, cs))) { in update_parent_subparts_cpumask()
1443 if (part_error && is_partition_valid(cs) && in update_parent_subparts_cpumask()
1445 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1449 WRITE_ONCE(cs->prs_err, part_error); in update_parent_subparts_cpumask()
1456 switch (cs->partition_root_state) { in update_parent_subparts_cpumask()
1478 if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) && in update_parent_subparts_cpumask()
1479 (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0)) in update_parent_subparts_cpumask()
1481 if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs)) in update_parent_subparts_cpumask()
1482 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_parent_subparts_cpumask()
1511 cs->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1524 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); in update_parent_subparts_cpumask()
1526 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in update_parent_subparts_cpumask()
1528 notify_partition_change(cs, old_prs); in update_parent_subparts_cpumask()
1545 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
1554 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1600 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
1697 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1715 if (sibling == cs) in update_sibling_cpumasks()
1736 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1744 if (cs == &top_cpuset) in update_cpumask()
1766 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1779 retval = validate_change(cs, trialcs); in update_cpumask()
1794 parent = parent_cs(cs); in update_cpumask()
1808 if (cs->partition_root_state) { in update_cpumask()
1810 update_parent_subparts_cpumask(cs, partcmd_invalidate, in update_cpumask()
1813 update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1818 parent_cs(cs)); in update_cpumask()
1820 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1827 if (cs->nr_subparts_cpus) { in update_cpumask()
1828 if (!is_partition_valid(cs) || in update_cpumask()
1829 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && in update_cpumask()
1830 partition_is_populated(cs, NULL))) { in update_cpumask()
1831 cs->nr_subparts_cpus = 0; in update_cpumask()
1832 cpumask_clear(cs->subparts_cpus); in update_cpumask()
1834 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1835 cs->cpus_allowed); in update_cpumask()
1836 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1842 update_cpumasks_hier(cs, &tmp, false); in update_cpumask()
1844 if (cs->partition_root_state) { in update_cpumask()
1845 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1852 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1948 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1954 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1956 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1968 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1979 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1981 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1983 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1993 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
2011 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2017 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2067 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2076 if (cs == &top_cpuset) { in update_nodemask()
2101 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2105 retval = validate_change(cs, trialcs); in update_nodemask()
2112 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2116 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2132 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
2139 if (val != cs->relax_domain_level) { in update_relax_domain_level()
2140 cs->relax_domain_level = val; in update_relax_domain_level()
2141 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
2142 is_sched_load_balance(cs)) in update_relax_domain_level()
2157 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
2162 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
2164 cpuset_update_task_spread_flags(cs, task); in update_tasks_flags()
2177 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
2185 trialcs = alloc_trial_cpuset(cs); in update_flag()
2194 err = validate_change(cs, trialcs); in update_flag()
2198 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
2201 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
2202 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
2205 cs->flags = trialcs->flags; in update_flag()
2212 update_tasks_flags(cs); in update_flag()
2226 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2228 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2230 struct cpuset *parent = parent_cs(cs); in update_prstate()
2241 cs->partition_root_state = -new_prs; in update_prstate()
2254 if (cpumask_empty(cs->cpus_allowed)) { in update_prstate()
2259 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
2265 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2268 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2277 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in update_prstate()
2284 update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED)); in update_prstate()
2292 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, in update_prstate()
2298 if (unlikely(cs->nr_subparts_cpus)) { in update_prstate()
2300 cs->nr_subparts_cpus = 0; in update_prstate()
2301 cpumask_clear(cs->subparts_cpus); in update_prstate()
2302 compute_effective_cpumask(cs->effective_cpus, cs, parent); in update_prstate()
2307 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2309 if (!is_sched_load_balance(cs)) { in update_prstate()
2311 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); in update_prstate()
2319 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2330 cs->partition_root_state = new_prs; in update_prstate()
2331 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2337 if (!list_empty(&cs->css.children)) in update_prstate()
2338 update_cpumasks_hier(cs, &tmpmask, !new_prs); in update_prstate()
2340 notify_partition_change(cs, old_prs); in update_prstate()
2452 struct cpuset *cs; in cpuset_can_attach() local
2458 cs = css_cs(css); in cpuset_can_attach()
2465 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2471 if (cpumask_empty(cs->effective_cpus)) in cpuset_can_attach()
2475 ret = task_can_attach(task, cs->effective_cpus); in cpuset_can_attach()
2487 cs->attach_in_progress++; in cpuset_can_attach()
2519 struct cpuset *cs; in cpuset_attach() local
2524 cs = css_cs(css); in cpuset_attach()
2528 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
2530 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
2540 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2544 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2547 if (cs != &top_cpuset) in cpuset_attach()
2558 cpuset_update_task_spread_flags(cs, task); in cpuset_attach()
2567 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2568 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
2585 if (is_memory_migrate(cs)) in cpuset_attach()
2594 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2596 cs->attach_in_progress--; in cpuset_attach()
2597 if (!cs->attach_in_progress) in cpuset_attach()
2627 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2633 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2640 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2643 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2646 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2649 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2652 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2658 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2661 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2676 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2682 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2687 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2705 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2730 css_get(&cs->css); in cpuset_write_resmask()
2736 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2739 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2747 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2750 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2762 css_put(&cs->css); in cpuset_write_resmask()
2777 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2785 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2788 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2791 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2794 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2797 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2809 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2813 return is_cpu_exclusive(cs); in cpuset_read_u64()
2815 return is_mem_exclusive(cs); in cpuset_read_u64()
2817 return is_mem_hardwall(cs); in cpuset_read_u64()
2819 return is_sched_load_balance(cs); in cpuset_read_u64()
2821 return is_memory_migrate(cs); in cpuset_read_u64()
2825 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2827 return is_spread_page(cs); in cpuset_read_u64()
2829 return is_spread_slab(cs); in cpuset_read_u64()
2840 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2844 return cs->relax_domain_level; in cpuset_read_s64()
2855 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2858 switch (cs->partition_root_state) { in sched_partition_show()
2874 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
2887 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2905 css_get(&cs->css); in sched_partition_write()
2908 if (!is_cpuset_online(cs)) in sched_partition_write()
2911 retval = update_prstate(cs, val); in sched_partition_write()
2915 css_put(&cs->css); in sched_partition_write()
3092 struct cpuset *cs; in cpuset_css_alloc() local
3097 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3098 if (!cs) in cpuset_css_alloc()
3101 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3102 kfree(cs); in cpuset_css_alloc()
3106 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3107 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
3108 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
3109 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3110 cs->relax_domain_level = -1; in cpuset_css_alloc()
3114 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3116 return &cs->css; in cpuset_css_alloc()
3121 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3122 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3132 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3134 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3136 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3142 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3143 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3144 cs->use_parent_ecpus = true; in cpuset_css_online()
3175 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3176 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3177 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3178 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3199 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3204 if (is_partition_valid(cs)) in cpuset_css_offline()
3205 update_prstate(cs, 0); in cpuset_css_offline()
3208 is_sched_load_balance(cs)) in cpuset_css_offline()
3209 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3211 if (cs->use_parent_ecpus) { in cpuset_css_offline()
3212 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
3214 cs->use_parent_ecpus = false; in cpuset_css_offline()
3219 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3227 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3229 free_cpuset(cs); in cpuset_css_free()
3314 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3322 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3327 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3329 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3335 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3342 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3343 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3344 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3345 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3352 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3353 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks_legacy()
3354 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3355 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3357 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3358 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3368 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3374 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3379 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3380 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3382 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3385 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3386 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3390 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3392 update_tasks_nodemask(cs); in hotplug_update_tasks()
3411 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3419 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3427 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3432 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3433 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3434 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3436 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3441 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3443 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3452 if (cs->nr_subparts_cpus && is_partition_valid(cs) && in cpuset_hotplug_update_tasks()
3453 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3455 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3456 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3458 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3468 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || in cpuset_hotplug_update_tasks()
3469 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) { in cpuset_hotplug_update_tasks()
3472 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); in cpuset_hotplug_update_tasks()
3473 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3475 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3476 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3478 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3481 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3483 if (is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3485 make_partition_invalid(cs); in cpuset_hotplug_update_tasks()
3488 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); in cpuset_hotplug_update_tasks()
3490 WRITE_ONCE(cs->prs_err, PERR_NOTPART); in cpuset_hotplug_update_tasks()
3492 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); in cpuset_hotplug_update_tasks()
3493 notify_partition_change(cs, old_prs); in cpuset_hotplug_update_tasks()
3502 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { in cpuset_hotplug_update_tasks()
3503 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp); in cpuset_hotplug_update_tasks()
3504 if (is_partition_valid(cs)) in cpuset_hotplug_update_tasks()
3509 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3510 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3516 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3519 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3613 struct cpuset *cs; in cpuset_hotplug_workfn() local
3617 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3618 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3622 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3625 css_put(&cs->css); in cpuset_hotplug_workfn()
3703 struct cpuset *cs; in cpuset_cpus_allowed() local
3708 cs = task_cs(tsk); in cpuset_cpus_allowed()
3709 if (cs != &top_cpuset) in cpuset_cpus_allowed()
3716 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in cpuset_cpus_allowed()
3826 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3828 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3829 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3830 return cs; in nearest_hardwall_ancestor()
3875 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3899 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3900 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()