• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/kernel/

Lines Matching refs:cpuset

2  *  kernel/cpuset.c
23 #include <linux/cpuset.h>
60 * When there is only one cpuset (the root cpuset) we can
74 struct cpuset {
76 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
82 atomic_t count; /* count tasks using this cpuset */
91 struct cpuset *parent; /* my parent */
92 struct dentry *dentry; /* cpuset fs entry */
96 * recent time this cpuset changed its mems_allowed.
103 /* bits in struct cpuset flags field */
115 static inline int is_cpu_exclusive(const struct cpuset *cs)
120 static inline int is_mem_exclusive(const struct cpuset *cs)
125 static inline int is_removed(const struct cpuset *cs)
130 static inline int notify_on_release(const struct cpuset *cs)
135 static inline int is_memory_migrate(const struct cpuset *cs)
140 static inline int is_spread_page(const struct cpuset *cs)
145 static inline int is_spread_slab(const struct cpuset *cs)
151 * Increment this integer everytime any cpuset changes its
154 * the cpuset they're using changes generation.
157 * reattach a task to a different cpuset, which must not have its
158 * generation numbers aliased with those of that tasks previous cpuset.
163 * its current->cpuset->mems_allowed has changed, requiring an update
171 static struct cpuset top_cpuset = {
184 * We have two global cpuset mutexes below. They can nest.
186 * require taking task_lock() when dereferencing a tasks cpuset pointer.
193 * the cpuset structure first, knowing nothing will change. It can
201 * from one of the callbacks into the cpuset code from within
216 * to that cpuset can fork (the other way to increment the count).
219 * holds manage_mutex or callback_mutex on a cpuset with zero count, it
220 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
224 * the cpuset hierarchy holds manage_mutex across the entire operation,
225 * single threading all such cpuset modifications across the system.
234 * when a task in a notify_on_release cpuset exits. Then manage_mutex
235 * is taken, and if the cpuset count is zero, a usermode call made
236 * to /sbin/cpuset_release_agent with the name of the cpuset (path
237 * relative to the root of cpuset file system) as the argument.
239 * A cpuset can only be deleted if both its 'count' of using tasks
241 * tasks in the system use _some_ cpuset, and since there is always at
251 * which overwrites one tasks cpuset pointer with another. It does
253 * critical places that need to reference task->cpuset without the
256 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
261 * update of a tasks cpuset pointer by attach_task() and the
262 * access of task->cpuset->mems_generation via that pointer in
300 /* is dentry a directory ? if so, kfree() associated cpuset */
302 struct cpuset *cs = dentry->d_fsdata;
401 .name = "cpuset",
408 * The files in the cpuset filesystem mostly have a very simple read/write
415 * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata
430 static inline struct cpuset *__d_cs(struct dentry *dentry)
441 * Call with manage_mutex held. Writes path of cpuset into buf.
445 static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
471 * Notify userspace when a cpuset is released, by running
472 * /sbin/cpuset_release_agent with the name of the cpuset (path
473 * relative to the root of cpuset file system) as the argument.
475 * Most likely, this user command will try to rmdir this cpuset.
478 * attached to this cpuset before it is removed, or that some other
479 * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
480 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
481 * unused, and this cpuset will be reprieved from its death sentence,
493 * When we had only one cpuset mutex, we had to call this
527 * the list of children is empty, prepare cpuset path in a kmalloc'd
541 static void check_for_release(struct cpuset *cs, char **ppathbuf)
559 * are online. If none are online, walk up the cpuset hierarchy
571 static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
584 * are online. If none are online, walk up the cpuset hierarchy
595 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
614 * current->cpuset if a task has its memory placement changed.
619 * 'the_top_cpuset_hack', the tasks cpuset pointer will never
623 * Reading current->cpuset->mems_generation doesn't need task_lock
624 * to guard the current->cpuset derefence, because it is guarded
625 * from concurrent freeing of current->cpuset by attach_task(),
629 * as I don't actually mind if I see a new cpuset pointer but
635 * avoiding the rcu critical section for tasks in the root cpuset
645 * task has been modifying its cpuset.
652 struct cpuset *cs;
654 if (tsk->cpuset == &top_cpuset) {
659 cs = rcu_dereference(tsk->cpuset);
667 cs = tsk->cpuset; /* Maybe changed when task not locked */
685 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
687 * One cpuset is a subset of another if all its allowed CPUs and
692 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
701 * validate_change() - Used to validate that any proposed cpuset change
704 * If we replaced the flag and mask values of the current cpuset
705 * (cur) with those values in the trial cpuset (trial), would
709 * 'cur' is the address of an actual, in-use cpuset. Operations
711 * cpuset in the list must use cur below, not trial.
720 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
722 struct cpuset *c, *par;
730 /* Remaining checks don't apply to root cpuset */
736 /* We must be a subset of our parent cpuset */
756 * For a given cpuset cur, partition the system as follows
757 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
759 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
769 static void update_cpu_domains(struct cpuset *cur)
771 struct cpuset *c, *par = cur->parent;
796 * Get all cpus from current cpuset's cpus_allowed not part
814 static int update_cpumask(struct cpuset *cs, char *buf)
816 struct cpuset trialcs;
826 * We allow a cpuset's cpus_allowed to be empty; if it has attached
838 /* cpus_allowed cannot be empty for a cpuset with attached tasks. */
861 * Call holding manage_mutex, so our current->cpuset won't change
865 * our tasks cpuset.
877 * sync with our tasks cpuset, and in particular, callbacks to
879 * won't see any mismatch of our cpuset and task mems_generation
898 guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
904 * of a cpuset. Needs to validate the request, update the
906 * task in the cpuset, rebind any vma mempolicies and if
907 * the cpuset is marked 'memory_migrate', migrate the tasks
911 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
916 static int update_nodemask(struct cpuset *cs, char *buf)
918 struct cpuset trialcs;
934 * We allow a cpuset's mems_allowed to be empty; if it has attached
951 /* mems_allowed cannot be empty for a cpuset with attached tasks. */
973 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
993 /* Load up mmarray[] with mm reference for each task in cpuset. */
1002 if (p->cpuset != cs)
1014 * new cpuset, and release that mm. The mpol_rebind_mm()
1019 * cpuset manage_mutex, we know that no other rebind effort will
1046 static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
1060 * cs: the cpuset to update
1066 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1069 struct cpuset trialcs;
1193 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
1194 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
1201 static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1205 struct cpuset *oldcs;
1247 oldcs = tsk->cpuset;
1249 * After getting 'oldcs' cpuset ptr, be sure still not exiting.
1260 rcu_assign_pointer(tsk->cpuset, cs);
1286 /* The various types of files and directories in a cpuset file system */
1308 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
1416 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1427 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1442 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
1602 * cs: the cpuset we create the directory for.
1605 * name: The name to give to the cpuset directory. Will be copied.
1609 static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
1651 * Reading this file can return large amounts of data if a cpuset has
1671 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1673 * when reading out p->cpuset, as we don't really care if it changes
1676 static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1684 if (p->cpuset == cs) {
1718 * process id's of tasks currently attached to the cpuset being opened.
1720 * Does not require any specific cpuset mutexes, and does not take any.
1724 struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent);
1738 * If cpuset gets more users after we read count, we won't have
1740 * caller from the case that the additional cpuset users didn't
1880 * cpuset_create - create a cpuset
1881 * parent: cpuset that will be parent of the new cpuset.
1882 * name: name of the new cpuset. Will be strcpy'ed.
1888 static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1890 struct cpuset *cs;
1944 struct cpuset *c_parent = dentry->d_parent->d_fsdata;
1953 * If the cpuset being removed is marked cpu_exclusive, then simulate
1963 struct cpuset *cs = dentry->d_fsdata;
1965 struct cpuset *parent;
2016 tsk->cpuset = &top_cpuset;
2017 tsk->cpuset->mems_generation = cpuset_mems_generation++;
2024 * Description: Initialize top_cpuset and the cpuset internal file system,
2038 init_task.cpuset = &top_cpuset;
2045 printk(KERN_ERR "cpuset: could not mount!\n");
2057 /* memory_pressure_enabled is in root cpuset only */
2066 * or memory nodes, we need to walk over the cpuset hierarchy,
2068 * last CPU or node from a cpuset, then the guarantee_online_cpus()
2074 * It will check all cpusets in a subtree even if the top cpuset of
2082 * Recursive, on depth of cpuset subtree.
2085 static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
2087 struct cpuset *c;
2101 * cpu_online_map and node_online_map. Force the top cpuset to track
2104 * To ensure that we don't remove a CPU or node from the top cpuset
2105 * that is currently in use by a child cpuset (which would violate
2161 * Description: Finish top cpuset after cpu, node maps are initialized
2173 * cpuset_fork - attach newly forked task to its parents cpuset.
2176 * Description: A task inherits its parent's cpuset at fork().
2178 * A pointer to the shared cpuset was automatically copied in fork.c
2181 * a valid cpuset pointer. attach_task() might have already changed
2182 * current->cpuset, allowing the previously referenced cpuset to
2184 * its present value of current->cpuset for our freshly forked child.
2193 child->cpuset = current->cpuset;
2194 atomic_inc(&child->cpuset->count);
2199 * cpuset_exit - detach cpuset from exiting task
2202 * Description: Detach cpuset from @tsk and release it.
2210 * Don't even think about derefencing 'cs' after the cpuset use count
2212 * or callback_mutex. Otherwise a zero cpuset use count is a license to
2213 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
2221 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
2226 * If a task tries to allocate memory with an invalid cpuset,
2231 * the root cpuset (top_cpuset) for the remainder of its exit.
2235 * code we would add a second cpuset function call, to drop that
2239 * Normally, holding a reference to a cpuset without bumping its
2240 * count is unsafe. The cpuset could go away, or someone could
2241 * attach us to a different cpuset, decrementing the count on
2242 * the first cpuset that we never incremented. But in this case,
2247 * Another way to do this would be to set the cpuset pointer
2255 struct cpuset *cs;
2258 cs = tsk->cpuset;
2259 tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */
2276 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2277 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2279 * Description: Returns the cpumask_t cpus_allowed of the cpuset
2282 * tasks cpuset.
2291 guarantee_online_cpus(tsk->cpuset, &mask);
2304 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2305 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2307 * Description: Returns the nodemask_t mems_allowed of the cpuset
2310 * tasks cpuset.
2319 guarantee_online_mems(tsk->cpuset, &mask);
2347 * ancestor to the specified cpuset. Call holding callback_mutex.
2349 * returns the root cpuset.
2351 static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2367 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2375 * from an enclosing cpuset.
2387 * and do not allow allocations outside the current tasks cpuset
2390 * nearest enclosing mem_exclusive ancestor cpuset.
2397 * cpuset are short of memory, might require taking the callback_mutex
2402 * so no allocation on a node outside the cpuset is allowed (unless
2413 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
2425 const struct cpuset *cs; /* current cpuset ancestors */
2450 cs = nearest_exclusive_ancestor(current->cpuset);
2478 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2501 * cpuset_lock - lock out any changes to cpuset structures
2505 * task in an overlapping cpuset. Expose callback_mutex via this
2517 * cpuset_unlock - release lock on cpuset changes
2531 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2579 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2587 cs1 = nearest_exclusive_ancestor(current->cpuset);
2595 cs2 = nearest_exclusive_ancestor(p->cpuset);
2606 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2612 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2615 * page reclaim efforts initiated by tasks in each cpuset.
2617 * This represents the rate at which some task in the cpuset
2623 * Display to user space in the per-cpuset read-only file
2626 * (direct) page reclaim by any task attached to the cpuset.
2631 struct cpuset *cs;
2634 cs = current->cpuset;
2641 * - Print tasks cpuset path into seq_file.
2642 * - Used for /proc/<pid>/cpuset.
2643 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2644 * doesn't really matter if tsk->cpuset changes after we read it,
2646 * anyway. No need to check that tsk->cpuset != NULL, thanks to
2648 * cpuset to top_cpuset.
2671 retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);