Lines Matching refs:mems_allowed

121 	nodemask_t mems_allowed;
149 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
153 * cpuset.mems_allowed and have tasks' nodemask updated, and
154 * then old_mems_allowed is updated to mems_allowed.
162 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
424 * Now, the task_struct fields mems_allowed and mempolicy may be changed
563 * Return in *pmask the portion of a cpusets's mems_allowed that
614 nodes_subset(p->mems_allowed, q->mems_allowed) &&
786 * perhaps one or more of the fields cpus_allowed, mems_allowed,
813 * be changed to have empty cpus_allowed or mems_allowed.
820 if (!nodes_empty(cur->mems_allowed) &&
821 nodes_empty(trial->mems_allowed))
848 nodes_intersects(trial->mems_allowed, c->mems_allowed))
2693 /* on a wq worker, no need to worry about %current's mems_allowed */
2727 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2731 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2744 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2746 tsk->mems_allowed = *newmems;
2758 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2760 * Iterate through each task of @cs updating its mems_allowed to the
2797 mpol_rebind_mm(mm, &cs->mems_allowed);
2811 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2823 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2836 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2860 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2873 * cpusets mems_allowed, and for each task in the cpuset,
2874 * update mems_allowed and rebind task's mempolicy and any vma
2881 * their mempolicies to the cpusets new mems_allowed.
2889 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2898 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2904 nodes_clear(trialcs->mems_allowed);
2906 retval = nodelist_parse(buf, trialcs->mems_allowed);
2910 if (!nodes_subset(trialcs->mems_allowed,
2911 top_cpuset.mems_allowed)) {
2917 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2925 check_insane_mems_config(&trialcs->mems_allowed);
2928 cs->mems_allowed = trialcs->mems_allowed;
2931 /* use trialcs->mems_allowed as a temp variable */
2932 update_nodemasks_hier(cs, &trialcs->mems_allowed);
3252 * For v1, cpus_allowed and mems_allowed can't be empty.
3259 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
3339 * changes which zero cpus/mems_allowed.
3449 * old_mems_allowed is the same with mems_allowed
3452 * @mems_allowed has been updated and is empty, so
3671 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
4023 nodes_clear(cs->mems_allowed);
4105 cs->mems_allowed = parent->mems_allowed;
4106 cs->effective_mems = parent->mems_allowed;
4170 top_cpuset.mems_allowed = node_possible_map;
4174 top_cpuset.mems_allowed = top_cpuset.effective_mems;
4216 * changes which zero cpus/mems_allowed.
4263 task->mems_allowed = current->mems_allowed;
4314 nodes_setall(top_cpuset.mems_allowed);
4347 nodes_empty(parent->mems_allowed))
4367 cs->mems_allowed = *new_mems;
4377 if (mems_updated && !nodes_empty(cs->mems_allowed))
4381 nodes_empty(cs->mems_allowed);
4482 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
4635 /* synchronize mems_allowed to N_MEMORY */
4639 top_cpuset.mems_allowed = new_mems;
4691 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
4710 * cpus_allowd/mems_allowed set to v2 values in the initial
4714 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4818 nodes_setall(current->mems_allowed);
4822 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4823 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4825 * Description: Returns the nodemask_t mems_allowed of the cpuset
4846 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4849 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4853 return nodes_intersects(*nodemask, current->mems_allowed);
4875 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4889 * current tasks mems_allowed came up empty on the first pass over
4917 if (node_isset(node, current->mems_allowed))
4931 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4936 allowed = node_isset(node, cs->mems_allowed);
4954 * node around the tasks mems_allowed nodes.
4960 * only set nodes in task->mems_allowed that are online. So it
4971 return *rotor = next_node_in(*rotor, current->mems_allowed);
4981 node_random(&current->mems_allowed);
4993 node_random(&current->mems_allowed);
5000 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
5004 * Description: Return true if @tsk1's mems_allowed intersects the
5005 * mems_allowed of @tsk2. Used by the OOM killer to determine if
5013 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
5017 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
5020 * mems_allowed to the kernel log.
5031 pr_cont(",mems_allowed=%*pbl",
5032 nodemask_pr_args(&current->mems_allowed));
5110 /* Display task mems_allowed in /proc/<pid>/status file. */
5114 nodemask_pr_args(&task->mems_allowed));
5116 nodemask_pr_args(&task->mems_allowed));