Lines Matching refs:cp

870 	struct cpuset *cp;
874 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
875 /* skip the whole subtree if @cp doesn't have any CPU */
876 if (cpumask_empty(cp->cpus_allowed)) {
881 if (is_sched_load_balance(cp))
882 update_domain_attr(dattr, cp);
916 * cp - cpuset pointer, used (together with pos_css) to perform a
950 struct cpuset *cp; /* top-down scan of cpusets */
983 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
991 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
992 if (cp == &top_cpuset)
995 * Continue traversing beyond @cp iff @cp has some CPUs and
1002 * If root is load-balancing, we can skip @cp if it
1005 if (!cpumask_empty(cp->cpus_allowed) &&
1006 !(is_sched_load_balance(cp) &&
1007 cpumask_intersects(cp->cpus_allowed,
1012 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
1015 if (is_sched_load_balance(cp) &&
1016 !cpumask_empty(cp->effective_cpus))
1017 csa[csn++] = cp;
1019 /* skip @cp's subtree if not a partition root */
1020 if (!is_partition_valid(cp))
2116 * 2) All the effective_cpus will be used up and cp
2177 struct cpuset *cp;
2183 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2184 struct cpuset *parent = parent_cs(cp);
2185 bool remote = is_remote_partition(cp);
2192 if (remote && (cp != cs)) {
2201 if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
2203 compute_effective_exclusive_cpumask(cp, NULL);
2207 old_prs = new_prs = cp->partition_root_state;
2209 is_partition_valid(cp)))
2210 compute_partition_effective_cpumask(cp, tmp->new_cpus);
2212 compute_effective_cpumask(tmp->new_cpus, cp, parent);
2219 if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2232 if (!cp->use_parent_ecpus) {
2233 cp->use_parent_ecpus = true;
2236 } else if (cp->use_parent_ecpus) {
2237 cp->use_parent_ecpus = false;
2252 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
2253 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2255 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2267 if ((cp != cs) && old_prs) {
2280 if (is_partition_valid(cp))
2281 new_prs = -cp->partition_root_state;
2282 WRITE_ONCE(cp->prs_err,
2289 if (!css_tryget_online(&cp->css))
2294 update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
2299 new_prs = cp->partition_root_state;
2303 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
2304 cp->partition_root_state = new_prs;
2309 if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2310 cpumask_and(cp->effective_xcpus,
2311 cp->cpus_allowed, parent->effective_xcpus);
2313 reset_partition_data(cp);
2316 notify_partition_change(cp, old_prs);
2319 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2321 update_tasks_cpumask(cp, cp->effective_cpus);
2329 !is_partition_valid(cp) &&
2330 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2332 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2334 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2343 if (!cpumask_empty(cp->cpus_allowed) &&
2344 is_sched_load_balance(cp) &&
2346 is_partition_valid(cp)))
2350 css_put(&cp->css);
2489 struct cpuset *cp;
2500 cpuset_for_each_child(cp, css, parent) {
2503 if (is_partition_valid(cp) &&
2504 cpumask_intersects(xcpus, cp->effective_xcpus)) {
2506 update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2822 struct cpuset *cp;
2826 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2827 struct cpuset *parent = parent_cs(cp);
2829 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2839 if (nodes_equal(*new_mems, cp->effective_mems)) {
2844 if (!css_tryget_online(&cp->css))
2849 cp->effective_mems = *new_mems;
2853 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2855 update_tasks_nodemask(cp);
2858 css_put(&cp->css);