Lines Matching refs:cpumask

452 /* PL: user requested unbound cpumask via sysfs */
455 /* PL: isolated cpumask to be excluded from unbound cpumask */
459 static struct cpumask wq_cmdline_cpumask __initdata;
739 * unbound_effective_cpumask - effective cpumask of an unbound workqueue
742 * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
743 * is masked with wq_unbound_cpumask to determine the effective cpumask. The
744 * default pwq is always mapped to the pool with the current effective cpumask.
746 static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
1575 struct cpumask *effective = unbound_effective_cpumask(wq);
2463 * intersection of cpu_online_mask and the cpumask of the node, unless we
2674 return pool->attrs->cpumask;
4583 free_cpumask_var(attrs->cpumask);
4604 if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
4609 cpumask_copy(attrs->cpumask, cpu_possible_mask);
4621 cpumask_copy(to->cpumask, from->cpumask);
4643 cpumask_copy(attrs->cpumask, cpu_possible_mask);
4656 hash = jhash(cpumask_bits(attrs->cpumask),
4671 if (!a->affn_strict && !cpumask_equal(a->cpumask, b->cpumask))
4682 * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
4685 cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
4686 if (unlikely(cpumask_empty(attrs->cpumask)))
4687 cpumask_copy(attrs->cpumask, unbound_cpumask);
5149 * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
5154 * Calculate the cpumask a workqueue with @attrs should use on @pod. If
5158 * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
5159 * and @pod has online CPUs requested by @attrs, the returned cpumask is the
5160 * intersection of the possible CPUs of @pod and @attrs->cpumask.
5162 * The caller is responsible for ensuring that the cpumask of @pod stays stable.
5171 cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
5177 cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
5182 cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
5185 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
5256 * the default pwq covering whole @attrs->cpumask. Always create
5261 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5280 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
5281 cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
5321 /* rescuer needs to respect wq cpumask changes */
5355 * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
5396 * with a cpumask spanning multiple pods, the workers which were already
5424 /* nothing to do if the target cpumask matches the current pwq */
6102 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
6545 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
6549 * An unbound pool may end up with a cpumask which doesn't have any online
6551 * its cpus_allowed. If @cpu is in @pool's cpumask which didn't have any
6556 static cpumask_t cpumask;
6562 if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
6565 cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
6569 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
6869 * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
6870 * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
6878 cpumask_var_t cpumask;
6881 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
6887 /* Save the current isolated cpumask & export it via sysfs */
6894 * by any subsequent write to workqueue/cpumask sysfs file.
6896 if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
6897 cpumask_copy(cpumask, wq_requested_unbound_cpumask);
6898 if (!cpumask_equal(cpumask, wq_unbound_cpumask))
6899 ret = workqueue_apply_unbound_cpumask(cpumask);
6902 free_cpumask_var(cpumask);
6969 * cpumask RW mask : bitmask of allowed CPUs for the workers
7098 cpumask_pr_args(wq->unbound_attrs->cpumask));
7117 ret = cpumask_parse(buf, attrs->cpumask);
7202 __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
7214 * workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
7215 * @cpumask: the cpumask to set
7217 * The low-level workqueues cpumask is a global cpumask that limits
7218 * the affinity of all unbound workqueues. This function check the @cpumask
7222 * -EINVAL - Invalid @cpumask
7225 static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
7233 cpumask_and(cpumask, cpumask, cpu_possible_mask);
7234 if (!cpumask_empty(cpumask)) {
7236 cpumask_copy(wq_requested_unbound_cpumask, cpumask);
7237 if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
7242 ret = workqueue_apply_unbound_cpumask(cpumask);
7286 cpumask_var_t cpumask;
7289 if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
7292 ret = cpumask_parse(buf, cpumask);
7294 ret = workqueue_set_unbound_cpumask(cpumask);
7296 free_cpumask_var(cpumask);
7299 static DEVICE_ATTR_RW(cpumask);
7629 static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
7644 cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));