Lines Matching defs:cs

209 	struct cpuset *cs;
265 static inline struct cpuset *parent_cs(struct cpuset *cs)
267 return css_cs(cs->css.parent);
272 struct cpuset *cs = task_cs(p);
274 cs->nr_deadline_tasks++;
279 struct cpuset *cs = task_cs(p);
281 cs->nr_deadline_tasks--;
297 static inline bool is_cpuset_online(struct cpuset *cs)
299 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
302 static inline int is_cpu_exclusive(const struct cpuset *cs)
304 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
307 static inline int is_mem_exclusive(const struct cpuset *cs)
309 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
312 static inline int is_mem_hardwall(const struct cpuset *cs)
314 return test_bit(CS_MEM_HARDWALL, &cs->flags);
317 static inline int is_sched_load_balance(const struct cpuset *cs)
319 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
322 static inline int is_memory_migrate(const struct cpuset *cs)
324 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
327 static inline int is_spread_page(const struct cpuset *cs)
329 return test_bit(CS_SPREAD_PAGE, &cs->flags);
332 static inline int is_spread_slab(const struct cpuset *cs)
334 return test_bit(CS_SPREAD_SLAB, &cs->flags);
337 static inline int is_partition_valid(const struct cpuset *cs)
339 return cs->partition_root_state > 0;
342 static inline int is_partition_invalid(const struct cpuset *cs)
344 return cs->partition_root_state < 0;
350 static inline void make_partition_invalid(struct cpuset *cs)
352 if (cs->partition_root_state > 0)
353 cs->partition_root_state = -cs->partition_root_state;
359 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
361 if (old_prs == cs->partition_root_state)
363 cgroup_file_notify(&cs->partition_file);
366 if (is_partition_valid(cs))
367 WRITE_ONCE(cs->prs_err, PERR_NONE);
490 * @cs: partition root to be checked
494 * It is assumed that @cs is a valid partition root. @excluded_child should
497 static inline bool partition_is_populated(struct cpuset *cs,
503 if (cs->css.cgroup->nr_populated_csets)
505 if (!excluded_child && !cs->nr_subparts)
506 return cgroup_is_populated(cs->css.cgroup);
509 cpuset_for_each_child(child, css, cs) {
538 struct cpuset *cs;
544 cs = task_cs(tsk);
546 while (!cpumask_intersects(cs->effective_cpus, pmask))
547 cs = parent_cs(cs);
549 cpumask_and(pmask, pmask, cs->effective_cpus);
564 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
566 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
567 cs = parent_cs(cs);
568 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
577 static void cpuset_update_task_spread_flags(struct cpuset *cs,
583 if (is_spread_page(cs))
588 if (is_spread_slab(cs))
612 * @cs: the cpuset that have cpumasks to be allocated.
618 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
622 if (cs) {
623 pmask1 = &cs->cpus_allowed;
624 pmask2 = &cs->effective_cpus;
625 pmask3 = &cs->effective_xcpus;
626 pmask4 = &cs->exclusive_cpus;
660 * @cs: the cpuset that have cpumasks to be free.
663 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
665 if (cs) {
666 free_cpumask_var(cs->cpus_allowed);
667 free_cpumask_var(cs->effective_cpus);
668 free_cpumask_var(cs->effective_xcpus);
669 free_cpumask_var(cs->exclusive_cpus);
680 * @cs: the cpuset that the trial cpuset duplicates
682 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
686 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
695 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
696 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
697 cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
698 cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
704 * @cs: the cpuset to be freed
706 static inline void free_cpuset(struct cpuset *cs)
708 free_cpumasks(cs, NULL);
709 kfree(cs);
712 static inline struct cpumask *fetch_xcpus(struct cpuset *cs)
714 return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus :
715 cpumask_empty(cs->effective_xcpus) ? cs->cpus_allowed
716 : cs->effective_xcpus;
1124 static void dl_update_tasks_root_domain(struct cpuset *cs)
1129 if (cs->nr_deadline_tasks == 0)
1132 css_task_iter_start(&cs->css, 0, &it);
1142 struct cpuset *cs = NULL;
1157 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1159 if (cpumask_empty(cs->effective_cpus)) {
1164 css_get(&cs->css);
1168 dl_update_tasks_root_domain(cs);
1171 css_put(&cs->css);
1202 struct cpuset *cs;
1228 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1229 if (!is_partition_valid(cs)) {
1233 if (!cpumask_subset(cs->effective_cpus,
1270 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1273 * Iterate through each task of @cs updating its cpus_allowed to the
1279 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1283 bool top_cs = cs == &top_cpuset;
1285 css_task_iter_start(&cs->css, 0, &it);
1297 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1307 * @cs: the cpuset the need to recompute the new effective_cpus mask
1313 struct cpuset *cs, struct cpuset *parent)
1315 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1329 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1331 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1339 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1343 if (exclusive && !is_cpu_exclusive(cs)) {
1344 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1346 } else if (!exclusive && is_cpu_exclusive(cs)) {
1348 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1360 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1362 int new_prs = cs->partition_root_state;
1367 * If cs is not a valid partition root, the load balance state
1373 new_lb = is_sched_load_balance(parent_cs(cs));
1375 if (new_lb != !!is_sched_load_balance(cs)) {
1378 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1380 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1390 static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1394 * A populated partition (cs or parent) can't have empty effective_cpus
1397 partition_is_populated(parent, cs)) ||
1399 partition_is_populated(cs, NULL));
1402 static void reset_partition_data(struct cpuset *cs)
1404 struct cpuset *parent = parent_cs(cs);
1411 cs->nr_subparts = 0;
1412 if (cpumask_empty(cs->exclusive_cpus)) {
1413 cpumask_clear(cs->effective_xcpus);
1414 if (is_cpu_exclusive(cs))
1415 clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1417 if (!cpumask_and(cs->effective_cpus,
1418 parent->effective_cpus, cs->cpus_allowed)) {
1419 cs->use_parent_ecpus = true;
1421 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1530 * @cs: cpuset
1537 static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
1540 struct cpuset *parent = parent_cs(cs);
1543 xcpus = cs->effective_xcpus;
1545 if (!cpumask_empty(cs->exclusive_cpus))
1546 cpumask_and(xcpus, cs->exclusive_cpus, cs->cpus_allowed);
1548 cpumask_copy(xcpus, cs->cpus_allowed);
1553 static inline bool is_remote_partition(struct cpuset *cs)
1555 return !list_empty(&cs->remote_sibling);
1558 static inline bool is_local_partition(struct cpuset *cs)
1560 return is_partition_valid(cs) && !is_remote_partition(cs);
1565 * @cs: the cpuset to update
1573 static int remote_partition_enable(struct cpuset *cs, int new_prs,
1592 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1600 list_add(&cs->remote_sibling, &remote_children);
1601 if (cs->use_parent_ecpus) {
1602 struct cpuset *parent = parent_cs(cs);
1604 cs->use_parent_ecpus = false;
1620 * @cs: the cpuset to update
1627 static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1631 compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1632 WARN_ON_ONCE(!is_remote_partition(cs));
1636 list_del_init(&cs->remote_sibling);
1637 isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1639 cs->partition_root_state = -cs->partition_root_state;
1640 if (!cs->prs_err)
1641 cs->prs_err = PERR_INVCPUS;
1642 reset_partition_data(cs);
1655 * @cs: the cpuset to be updated
1662 static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1666 int prs = cs->partition_root_state;
1669 if (WARN_ON_ONCE(!is_remote_partition(cs)))
1672 WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1677 adding = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
1678 deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
1706 remote_partition_disable(cs, tmp);
1711 * @cs: the cpuset to be updated
1716 * This should be called before the given cs has updated its cpus_allowed
1719 static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
1728 if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) ||
1772 * @cs: The cpuset that requests change in partition root state
1807 static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1811 struct cpuset *parent = parent_cs(cs);
1817 struct cpumask *xcpus; /* cs effective_xcpus */
1828 old_prs = new_prs = cs->partition_root_state;
1829 xcpus = !cpumask_empty(cs->exclusive_cpus)
1830 ? cs->effective_xcpus : cs->cpus_allowed;
1858 if (!newmask && cpumask_empty(cs->cpus_allowed))
1861 nocpu = tasks_nocpu_error(parent, cs, xcpus);
1963 if (is_partition_valid(cs))
1966 } else if (is_partition_invalid(cs) &&
1978 if (child == cs)
1980 if (!cpusets_are_exclusive(cs, child)) {
1996 WRITE_ONCE(cs->prs_err, part_error);
2003 switch (cs->partition_root_state) {
2028 * CPU lists in cs haven't been updated yet. So defer it to later.
2031 int err = update_partition_exclusive(cs, new_prs);
2046 cs->partition_root_state = new_prs;
2048 cs->nr_subparts = 0;
2051 * Adding to parent's effective_cpus means deletion CPUs from cs
2069 update_partition_exclusive(cs, new_prs);
2073 update_sibling_cpumasks(parent, cs, tmp);
2082 update_partition_sd_lb(cs, old_prs);
2084 notify_partition_change(cs, old_prs);
2090 * @cs: partition root cpuset
2104 static void compute_partition_effective_cpumask(struct cpuset *cs,
2109 bool populated = partition_is_populated(cs, NULL);
2119 compute_effective_exclusive_cpumask(cs, new_ecpus);
2123 cpuset_for_each_child(child, css, cs) {
2129 cs->effective_xcpus))
2143 cs->nr_subparts--;
2163 * @cs: the cpuset to consider
2174 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2183 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2190 * directly from top cpuset unless it is cs.
2192 if (remote && (cp != cs)) {
2201 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
2263 * for cs already in update_cpumask(). We should also call
2267 if ((cp != cs) && old_prs) {
2361 * @cs: Current cpuset
2364 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2389 if (sibling == cs)
2411 * @cs: the cpuset to consider
2415 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2420 struct cpuset *parent = parent_cs(cs);
2423 int old_prs = cs->partition_root_state;
2426 if (cs == &top_cpuset)
2453 if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2458 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2465 if (is_partition_valid(cs) &&
2468 cs->prs_err = PERR_INVCPUS;
2471 cs->prs_err = PERR_HKEEPING;
2472 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2474 cs->prs_err = PERR_NOCPUS;
2482 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2485 retval = validate_change(cs, trialcs);
2517 if (is_partition_valid(cs) ||
2518 (is_partition_invalid(cs) && !invalidate)) {
2521 if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2527 if (is_remote_partition(cs))
2528 remote_cpus_update(cs, xcpus, &tmp);
2530 update_parent_effective_cpumask(cs, partcmd_invalidate,
2533 update_parent_effective_cpumask(cs, partcmd_update,
2535 } else if (!cpumask_empty(cs->exclusive_cpus)) {
2539 remote_partition_check(cs, trialcs->effective_xcpus,
2544 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2545 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2546 if ((old_prs > 0) && !is_partition_valid(cs))
2547 reset_partition_data(cs);
2551 update_cpumasks_hier(cs, &tmp, hier_flags);
2554 if (cs->partition_root_state)
2555 update_partition_sd_lb(cs, old_prs);
2563 * @cs: the cpuset to consider
2567 * The tasks' cpumask will be updated if cs is a valid partition root.
2569 static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2574 struct cpuset *parent = parent_cs(cs);
2577 int old_prs = cs->partition_root_state;
2586 if (!is_cpu_exclusive(cs))
2591 if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2601 if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2604 retval = validate_change(cs, trialcs);
2614 cs->prs_err = PERR_INVCPUS;
2617 cs->prs_err = PERR_HKEEPING;
2618 } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2620 cs->prs_err = PERR_NOCPUS;
2623 if (is_remote_partition(cs)) {
2625 remote_partition_disable(cs, &tmp);
2627 remote_cpus_update(cs, trialcs->effective_xcpus,
2630 update_parent_effective_cpumask(cs, partcmd_invalidate,
2633 update_parent_effective_cpumask(cs, partcmd_update,
2640 remote_partition_check(cs, trialcs->effective_xcpus,
2644 cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2645 cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2646 if ((old_prs > 0) && !is_partition_valid(cs))
2647 reset_partition_data(cs);
2655 if (is_partition_valid(cs) || hier_flags)
2656 update_cpumasks_hier(cs, &tmp, hier_flags);
2659 if (cs->partition_root_state)
2660 update_partition_sd_lb(cs, old_prs);
2751 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2753 * Iterate through each task of @cs updating its mems_allowed to the
2757 static void update_tasks_nodemask(struct cpuset *cs)
2763 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2765 guarantee_online_mems(cs, &newmems);
2777 css_task_iter_start(&cs->css, 0, &it);
2788 migrate = is_memory_migrate(cs);
2790 mpol_rebind_mm(mm, &cs->mems_allowed);
2792 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2800 * cs->old_mems_allowed.
2802 cs->old_mems_allowed = newmems;
2810 * @cs: the cpuset to consider
2820 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2826 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2872 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2876 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2885 if (cs == &top_cpuset) {
2910 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2914 retval = validate_change(cs, trialcs);
2921 cs->mems_allowed = trialcs->mems_allowed;
2925 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2941 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2948 if (val != cs->relax_domain_level) {
2949 cs->relax_domain_level = val;
2950 if (!cpumask_empty(cs->cpus_allowed) &&
2951 is_sched_load_balance(cs))
2960 * @cs: the cpuset in which each task's spread flags needs to be changed
2962 * Iterate through each task of @cs updating its spread flags. As this
2966 static void update_tasks_flags(struct cpuset *cs)
2971 css_task_iter_start(&cs->css, 0, &it);
2973 cpuset_update_task_spread_flags(cs, task);
2980 * cs: the cpuset to update
2986 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2994 trialcs = alloc_trial_cpuset(cs);
3003 err = validate_change(cs, trialcs);
3007 balance_flag_changed = (is_sched_load_balance(cs) !=
3010 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
3011 || (is_spread_page(cs) != is_spread_page(trialcs)));
3014 cs->flags = trialcs->flags;
3021 update_tasks_flags(cs);
3029 * @cs: the cpuset to update
3035 static int update_prstate(struct cpuset *cs, int new_prs)
3037 int err = PERR_NONE, old_prs = cs->partition_root_state;
3038 struct cpuset *parent = parent_cs(cs);
3058 if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) {
3060 cpumask_and(cs->effective_xcpus,
3061 cs->cpus_allowed, parent->effective_xcpus);
3065 err = update_partition_exclusive(cs, new_prs);
3076 if (cpumask_empty(cs->cpus_allowed)) {
3081 err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
3086 if (err && remote_partition_enable(cs, new_prs, &tmpmask))
3098 if (is_remote_partition(cs))
3099 remote_partition_disable(cs, &tmpmask);
3101 update_parent_effective_cpumask(cs, partcmd_disable,
3116 update_partition_exclusive(cs, new_prs);
3120 cs->partition_root_state = new_prs;
3121 WRITE_ONCE(cs->prs_err, err);
3122 if (!is_partition_valid(cs))
3123 reset_partition_data(cs);
3125 partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
3130 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
3133 update_partition_sd_lb(cs, old_prs);
3135 notify_partition_change(cs, old_prs);
3249 static int cpuset_can_attach_check(struct cpuset *cs)
3251 if (cpumask_empty(cs->effective_cpus) ||
3252 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
3257 static void reset_migrate_dl_data(struct cpuset *cs)
3259 cs->nr_migrate_dl_tasks = 0;
3260 cs->sum_migrate_dl_bw = 0;
3267 struct cpuset *cs, *oldcs;
3275 cs = css_cs(css);
3280 ret = cpuset_can_attach_check(cs);
3284 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
3285 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3305 cs->nr_migrate_dl_tasks++;
3306 cs->sum_migrate_dl_bw += task->dl.dl_bw;
3310 if (!cs->nr_migrate_dl_tasks)
3313 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3314 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3317 reset_migrate_dl_data(cs);
3322 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3324 reset_migrate_dl_data(cs);
3334 cs->attach_in_progress++;
3343 struct cpuset *cs;
3346 cs = css_cs(css);
3349 cs->attach_in_progress--;
3350 if (!cs->attach_in_progress)
3353 if (cs->nr_migrate_dl_tasks) {
3354 int cpu = cpumask_any(cs->effective_cpus);
3356 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3357 reset_migrate_dl_data(cs);
3371 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3375 if (cs != &top_cpuset)
3387 cpuset_update_task_spread_flags(cs, task);
3395 struct cpuset *cs;
3400 cs = css_cs(css);
3404 cpus_updated = !cpumask_equal(cs->effective_cpus,
3406 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3416 cpuset_attach_nodemask_to = cs->effective_mems;
3420 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3423 cpuset_attach_task(cs, task);
3431 cpuset_attach_nodemask_to = cs->effective_mems;
3432 if (!is_memory_migrate(cs) && !mems_updated)
3449 if (is_memory_migrate(cs))
3458 cs->old_mems_allowed = cpuset_attach_nodemask_to;
3460 if (cs->nr_migrate_dl_tasks) {
3461 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3462 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3463 reset_migrate_dl_data(cs);
3466 cs->attach_in_progress--;
3467 if (!cs->attach_in_progress)
3500 struct cpuset *cs = css_cs(css);
3506 if (!is_cpuset_online(cs)) {
3513 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
3516 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
3519 retval = update_flag(CS_MEM_HARDWALL, cs, val);
3522 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
3525 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
3531 retval = update_flag(CS_SPREAD_PAGE, cs, val);
3534 retval = update_flag(CS_SPREAD_SLAB, cs, val);
3549 struct cpuset *cs = css_cs(css);
3555 if (!is_cpuset_online(cs))
3560 retval = update_relax_domain_level(cs, val);
3578 struct cpuset *cs = css_cs(of_css(of));
3585 * CPU or memory hotunplug may leave @cs w/o any execution
3590 * As writes to "cpus" or "mems" may restore @cs's execution
3599 * protection is okay as we check whether @cs is online after
3603 css_get(&cs->css);
3608 if (!is_cpuset_online(cs))
3611 trialcs = alloc_trial_cpuset(cs);
3619 retval = update_cpumask(cs, trialcs, buf);
3622 retval = update_exclusive_cpumask(cs, trialcs, buf);
3625 retval = update_nodemask(cs, trialcs, buf);
3637 css_put(&cs->css);
3652 struct cpuset *cs = css_cs(seq_css(sf));
3660 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3663 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3666 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3669 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3672 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3675 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3693 struct cpuset *cs = css_cs(css);
3697 return is_cpu_exclusive(cs);
3699 return is_mem_exclusive(cs);
3701 return is_mem_hardwall(cs);
3703 return is_sched_load_balance(cs);
3705 return is_memory_migrate(cs);
3709 return fmeter_getrate(&cs->fmeter);
3711 return is_spread_page(cs);
3713 return is_spread_slab(cs);
3724 struct cpuset *cs = css_cs(css);
3728 return cs->relax_domain_level;
3739 struct cpuset *cs = css_cs(seq_css(seq));
3742 switch (cs->partition_root_state) {
3758 err = perr_strings[READ_ONCE(cs->prs_err)];
3771 struct cpuset *cs = css_cs(of_css(of));
3786 css_get(&cs->css);
3789 if (!is_cpuset_online(cs))
3792 retval = update_prstate(cs, val);
3796 css_put(&cs->css);
3997 struct cpuset *cs;
4002 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
4003 if (!cs)
4006 if (alloc_cpumasks(cs, NULL)) {
4007 kfree(cs);
4011 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
4012 nodes_clear(cs->mems_allowed);
4013 nodes_clear(cs->effective_mems);
4014 fmeter_init(&cs->fmeter);
4015 cs->relax_domain_level = -1;
4016 INIT_LIST_HEAD(&cs->remote_sibling);
4020 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
4022 return &cs->css;
4027 struct cpuset *cs = css_cs(css);
4028 struct cpuset *parent = parent_cs(cs);
4038 set_bit(CS_ONLINE, &cs->flags);
4040 set_bit(CS_SPREAD_PAGE, &cs->flags);
4042 set_bit(CS_SPREAD_SLAB, &cs->flags);
4048 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
4049 cs->effective_mems = parent->effective_mems;
4050 cs->use_parent_ecpus = true;
4059 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
4089 cs->mems_allowed = parent->mems_allowed;
4090 cs->effective_mems = parent->mems_allowed;
4091 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
4092 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
4113 struct cpuset *cs = css_cs(css);
4118 if (is_partition_valid(cs))
4119 update_prstate(cs, 0);
4122 is_sched_load_balance(cs))
4123 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
4125 if (cs->use_parent_ecpus) {
4126 struct cpuset *parent = parent_cs(cs);
4128 cs->use_parent_ecpus = false;
4133 clear_bit(CS_ONLINE, &cs->flags);
4141 struct cpuset *cs = css_cs(css);
4143 free_cpuset(cs);
4171 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
4176 same_cs = (cs == task_cs(current));
4186 ret = cpuset_can_attach_check(cs);
4202 cs->attach_in_progress++;
4210 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
4214 same_cs = (cs == task_cs(current));
4221 cs->attach_in_progress--;
4222 if (!cs->attach_in_progress)
4234 struct cpuset *cs;
4238 cs = task_cs(task);
4239 same_cs = (cs == task_cs(current));
4243 if (cs == &top_cpuset)
4253 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
4254 cpuset_attach_task(cs, task);
4256 cs->attach_in_progress--;
4257 if (!cs->attach_in_progress)
4319 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
4327 parent = parent_cs(cs);
4332 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
4334 pr_cont_cgroup_name(cs->css.cgroup);
4344 remove_tasks_in_empty_cpuset(s->cs);
4345 css_put(&s->cs->css);
4350 hotplug_update_tasks_legacy(struct cpuset *cs,
4357 cpumask_copy(cs->cpus_allowed, new_cpus);
4358 cpumask_copy(cs->effective_cpus, new_cpus);
4359 cs->mems_allowed = *new_mems;
4360 cs->effective_mems = *new_mems;
4367 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
4368 update_tasks_cpumask(cs, new_cpus);
4369 if (mems_updated && !nodes_empty(cs->mems_allowed))
4370 update_tasks_nodemask(cs);
4372 is_empty = cpumask_empty(cs->cpus_allowed) ||
4373 nodes_empty(cs->mems_allowed);
4380 if (is_empty && cs->css.cgroup->nr_populated_csets &&
4381 css_tryget_online(&cs->css)) {
4386 css_put(&cs->css);
4390 s->cs = cs;
4397 hotplug_update_tasks(struct cpuset *cs,
4402 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
4403 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
4405 *new_mems = parent_cs(cs)->effective_mems;
4408 cpumask_copy(cs->effective_cpus, new_cpus);
4409 cs->effective_mems = *new_mems;
4413 update_tasks_cpumask(cs, new_cpus);
4415 update_tasks_nodemask(cs);
4427 * @cs: cpuset in interest
4430 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
4431 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
4434 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
4444 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
4452 if (cs->attach_in_progress) {
4457 parent = parent_cs(cs);
4458 compute_effective_cpumask(&new_cpus, cs, parent);
4459 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
4461 if (!tmp || !cs->partition_root_state)
4468 remote = is_remote_partition(cs);
4469 if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
4470 compute_partition_effective_cpumask(cs, &new_cpus);
4473 partition_is_populated(cs, NULL)) {
4474 remote_partition_disable(cs, tmp);
4475 compute_effective_cpumask(&new_cpus, cs, parent);
4487 if (is_local_partition(cs) && (!is_partition_valid(parent) ||
4488 tasks_nocpu_error(parent, cs, &new_cpus)))
4494 else if (is_partition_valid(parent) && is_partition_invalid(cs))
4498 update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
4499 if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
4500 compute_partition_effective_cpumask(cs, &new_cpus);
4506 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
4507 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
4515 hotplug_update_tasks(cs, &new_cpus, &new_mems,
4518 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
4616 struct cpuset *cs;
4620 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
4621 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
4625 cpuset_hotplug_update_tasks(cs, ptmp);
4628 css_put(&cs->css);
4701 struct cpuset *cs;
4706 cs = task_cs(tsk);
4707 if (cs != &top_cpuset)
4714 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4761 * But we used cs && cs->cpus_allowed lockless and thus can
4824 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4826 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4827 cs = parent_cs(cs);
4828 return cs;
4873 struct cpuset *cs; /* current cpuset ancestors */
4897 cs = nearest_hardwall_ancestor(task_cs(current));
4898 allowed = node_isset(node, cs->mems_allowed);