• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching refs:policy

4    fix mmap readahead to honour policy and enable policy for any page cache
7 global policy for page cache? currently it uses process policy. Requires
9 handle mremap for shared memory (currently ignored for the policy)
11 make bind policy root only? It can trigger oom much faster and the
59 * run-time system-wide default policy => local allocation
147 * any, for the new policy. mpol_new() has already validated the nodes
148 * parameter with respect to the policy mode and flags. But, we need to
190 * This function just creates a new policy, does some check and simple
196 struct mempolicy *policy;
204 return NULL; /* simply delete any existing policy */
221 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
222 if (!policy)
224 atomic_set(&policy->refcnt, 1);
225 policy->mode = mode;
226 policy->flags = flags;
228 return policy;
320 * mpol_rebind_policy - Migrate a policy to a different set of nodes
559 /* Apply policy to a single VMA */
580 /* Step 2: apply policy to a range and do splits. */
658 /* Set the process memory policy */
710 * Return nodemask for policy for get_mempolicy() query
749 /* Retrieve NUMA policy */
750 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
765 *policy = 0; /* just so it's initialized */
774 * Do NOT fall back to task policy if the
775 * vma/shared policy at addr is NULL. We
799 *policy = err;
802 *policy = current->il_next;
808 *policy = pol == &default_policy ? MPOL_DEFAULT :
812 * the policy to userspace.
814 *policy |= (pol->flags & MPOL_MODE_FLAGS);
978 * Allocate a new page for page migration based on vma policy.
997 * if !vma, alloc_page_vma() will use task or system default policy
1056 * If we are using the default policy then operation
1200 /* Set the process memory policy */
1306 /* Retrieve NUMA policy */
1307 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1323 if (policy && put_user(pval, policy))
1334 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1350 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1413 * @task - task for fallback if vma policy == default
1414 * @vma - virtual memory area whose policy is sought
1415 * @addr - address in @vma for shared policy lookup
1417 * Returns effective policy for a VMA at specified address.
1418 * Falls back to @task or system default policy, as necessary.
1450 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1453 if (unlikely(policy->mode == MPOL_BIND) &&
1455 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1456 return &policy->v.nodes;
1462 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1466 switch (policy->mode) {
1468 if (!(policy->flags & MPOL_F_LOCAL))
1469 nd = policy->v.preferred_node;
1479 unlikely(!node_isset(nd, policy->v.nodes)))
1480 nd = first_node(policy->v.nodes);
1489 static unsigned interleave_nodes(struct mempolicy *policy)
1495 next = next_node(nid, policy->v.nodes);
1497 next = first_node(policy->v.nodes);
1504 * Depending on the memory policy provide a node from which to allocate the
1506 * @policy must be protected by freeing by the caller. If @policy is
1508 * task can change it's policy. The system default policy requires no
1511 unsigned slab_node(struct mempolicy *policy)
1513 if (!policy || policy->flags & MPOL_F_LOCAL)
1516 switch (policy->mode) {
1521 return policy->v.preferred_node;
1524 return interleave_nodes(policy);
1528 * Follow bind policy behavior and start allocation at the
1536 &policy->v.nodes,
1591 * @vma = virtual memory area whose policy is sought
1592 * @addr = address in @vma for shared policy lookup and interleave policy
1599 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1628 * to indicate default policy. Otherwise, extract the policy nodemask
1629 * for 'bind' or 'interleave' policy into the argument nodemask, or
1631 * 'preferred' or 'local' policy and return 'true' to indicate presence
1678 * policy. Otherwise, check for intersection between mask and the policy
1679 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1680 * policy, always return true since it may allocate elsewhere on fallback.
1718 /* Allocate a page in interleaved policy.
1747 * a NUMA policy associated with the VMA or the current process.
1775 * slow path: ref counted shared policy
1784 * fast path: default or task policy
1803 * interrupt context and apply the current process NUMA policy.
1880 * policy lookup, even if the policy needs/has extra ref on lookup.
1923 * Shared memory backing store policy support.
1963 /* Insert a new shared policy into the list. */
1984 new->policy ? new->policy->mode : 0);
1987 /* Find shared policy intersecting idx */
1999 mpol_get(sn->policy);
2000 pol = sn->policy;
2010 mpol_put(n->policy);
2025 n->policy = pol;
2029 /* Replace a policy range. */
2047 /* Old policy spanning whole new range. */
2051 new2 = sp_alloc(end, n->end, n->policy);
2071 mpol_put(new2->policy);
2078 * mpol_shared_policy_init - initialize shared policy for inode
2079 * @sp: pointer to inode shared policy
2082 * Install non-NULL @mpol in inode's shared policy rb-tree.
2112 /* Create pseudo-vma that contains just the policy */
2114 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2150 /* Free a backing policy store on inode delete. */
2164 mpol_put(n->policy);
2186 * Set interleaving policy for system init. Interleaving is only
2213 /* Reset policy of current process to default */
2224 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
2418 mode = MPOL_LOCAL; /* pseudo-policy */
2542 * Display pages allocated per node and memory policy via /proc.