• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching refs:cpuset

2  *  kernel/cpuset.c
27 #include <linux/cpuset.h>
64 * Workqueue for cpuset related tasks.
67 * is set. So we create a separate workqueue thread for cpuset.
73 * When there is only one cpuset (the root cpuset) we can
80 struct cpuset;
91 struct cpuset {
95 cpumask_var_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
98 struct cpuset *parent; /* my parent */
108 /* used for walking a cpuset hierarchy */
112 /* Retrieve the cpuset for a cgroup */
113 static inline struct cpuset *cgroup_cs(struct cgroup *cont)
116 struct cpuset, css);
119 /* Retrieve the cpuset for a task */
120 static inline struct cpuset *task_cs(struct task_struct *task)
123 struct cpuset, css);
126 /* bits in struct cpuset flags field */
138 static inline int is_cpu_exclusive(const struct cpuset *cs)
143 static inline int is_mem_exclusive(const struct cpuset *cs)
148 static inline int is_mem_hardwall(const struct cpuset *cs)
153 static inline int is_sched_load_balance(const struct cpuset *cs)
158 static inline int is_memory_migrate(const struct cpuset *cs)
163 static inline int is_spread_page(const struct cpuset *cs)
168 static inline int is_spread_slab(const struct cpuset *cs)
173 static struct cpuset top_cpuset = {
178 * There are two global mutexes guarding cpuset structures. The first
180 * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
183 * task_lock() when dereferencing a task's cpuset pointer. See "The
190 * the cpuset structure first, knowing nothing will change. It can
198 * from one of the callbacks into the cpuset code from within
212 * Accessing a task's cpuset should be done in accordance with the
230 * This is ugly, but preserves the userspace API for existing cpuset
231 * users. If someone tries to mount the "cpuset" filesystem, we
242 "cpuset,noprefix,"
252 .name = "cpuset",
258 * are online. If none are online, walk up the cpuset hierarchy
270 static void guarantee_online_cpus(const struct cpuset *cs,
285 * up the cpuset hierarchy until we find one that does have some
295 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
309 * update task's spread flag if cpuset's page/slab spread flag is set
313 static void cpuset_update_task_spread_flag(struct cpuset *cs,
327 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
329 * One cpuset is a subset of another if all its allowed CPUs and
334 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
343 * alloc_trial_cpuset - allocate a trial cpuset
344 * @cs: the cpuset that the trial cpuset duplicates
346 static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
348 struct cpuset *trial;
364 * free_trial_cpuset - free the trial cpuset
365 * @trial: the trial cpuset to be freed
367 static void free_trial_cpuset(struct cpuset *trial)
374 * validate_change() - Used to validate that any proposed cpuset change
377 * If we replaced the flag and mask values of the current cpuset
378 * (cur) with those values in the trial cpuset (trial), would
382 * 'cur' is the address of an actual, in-use cpuset. Operations
384 * cpuset in the list must use cur below, not trial.
393 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
396 struct cpuset *c, *par;
404 /* Remaining checks don't apply to root cpuset */
410 /* We must be a subset of our parent cpuset */
446 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
452 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
460 update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
466 struct cpuset *cp;
468 struct cpuset *child;
470 cp = list_first_entry(&q, struct cpuset, stack_list);
508 * q - a linked-list queue of cpuset pointers, used to implement a
510 * to each cpuset marked is_sched_load_balance into the
517 * cpus_allowed of every cpuset marked is_sched_load_balance
528 * load balanced cpusets (using the array of cpuset pointers in
544 struct cpuset *cp; /* scans q */
545 struct cpuset **csa; /* array of all cpuset ptrs */
546 int csn; /* how many cpuset ptrs in csa so far */
582 struct cpuset *child; /* scans child cpusets of cp */
584 cp = list_first_entry(&q, struct cpuset, stack_list);
614 struct cpuset *a = csa[i];
618 struct cpuset *b = csa[j];
623 struct cpuset *c = csa[k];
649 struct cpuset *a = csa[i];
677 struct cpuset *b = csa[j];
713 * Cannot be directly called from cpuset code handling changes
714 * to the cpuset pseudo-filesystem, because it cannot be called
753 * If the flag 'sched_load_balance' of any cpuset with non-empty
754 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
755 * which has that flag enabled, or if any cpuset with a non-empty
761 * but such cpuset changes as these must nest that locking the
764 * So in order to avoid an ABBA deadlock, the cpuset code handling
789 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
796 * words, if its mask is not equal to its cpuset's mask).
806 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
813 * We don't need to re-check for the cgroup/cpuset membership, since we're
823 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
824 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
835 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
847 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
848 * @cs: the cpuset to consider
849 * @buf: buffer of cpu numbers written to this cpuset
851 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
863 * An empty cpus_allowed is ok only if the cpuset has no tasks.
897 * Scan tasks in the cpuset, and update the cpumasks of any
917 * Call holding cgroup_mutex, so current's cpuset won't change
921 * our task's cpuset.
973 * old allowed nodes. and if it allocates page when cpuset clears newly
1009 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1016 struct cpuset *cs;
1046 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1047 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1048 * @oldmem: old mems_allowed of cpuset cs
1055 static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1080 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1086 * of a cpuset. Needs to validate the request, update the
1087 * cpusets mems_allowed, and for each task in the cpuset,
1089 * mempolicies and if the cpuset is marked 'memory_migrate',
1093 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1097 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1117 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1165 static int update_relax_domain_level(struct cpuset *cs, s64 val)
1183 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1189 * We don't need to re-check for the cgroup/cpuset membership, since we're
1199 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1200 * @cs: the cpuset in which each task's spread flags needs to be changed
1211 static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1225 * cs: the cpuset to update
1231 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1234 struct cpuset *trialcs;
1379 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1384 struct cpuset *cs = cgroup_cs(cont);
1390 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
1420 struct cpuset *cs)
1440 struct cpuset *cs = cgroup_cs(cont);
1441 struct cpuset *oldcs = cgroup_cs(oldcont);
1482 /* The various types of files and directories in a cpuset file system */
1502 struct cpuset *cs = cgroup_cs(cgrp);
1547 struct cpuset *cs = cgroup_cs(cgrp);
1572 struct cpuset *cs = cgroup_cs(cgrp);
1573 struct cpuset *trialcs;
1611 static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1622 static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1647 struct cpuset *cs = cgroup_cs(cont);
1679 struct cpuset *cs = cgroup_cs(cont);
1710 struct cpuset *cs = cgroup_cs(cont);
1824 /* memory_pressure_enabled is in root cpuset only */
1852 struct cpuset *cs, *parent_cs;
1869 * cpuset_create - create a cpuset
1870 * ss: cpuset cgroup subsystem
1871 * cont: control group that the new cpuset will be part of
1878 struct cpuset *cs;
1879 struct cpuset *parent;
1910 * If the cpuset being removed has its flag 'sched_load_balance'
1917 struct cpuset *cs = cgroup_cs(cont);
1928 .name = "cpuset",
1942 * Description: Initialize top_cpuset and the cpuset internal file system,
1971 * cpuset_do_move_task - move a given task to another cpuset
1987 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1988 * @from: cpuset in which the tasks currently reside
1989 * @to: cpuset to which the tasks will be moved
1997 static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
2014 * or memory nodes, we need to walk over the cpuset hierarchy,
2016 * last CPU or node from a cpuset, then move the tasks in the empty
2017 * cpuset to its next-highest non-empty parent.
2022 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2024 struct cpuset *parent;
2028 * in the cpuset; the list is empty if there are none;
2035 * Find its next-highest non-empty parent, (top cpuset
2047 * Walk the specified cpuset subtree and look for empty cpusets.
2048 * The tasks of such cpuset must be moved to a parent cpuset.
2057 * For now, since we lack memory hot unplug, we'll never see a cpuset
2059 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2061 static void scan_for_empty_cpusets(struct cpuset *root)
2064 struct cpuset *cp; /* scans cpusets being updated */
2065 struct cpuset *child; /* scans child cpusets of cp */
2075 cp = list_first_entry(&queue, struct cpuset, stack_list);
2089 /* Remove offline cpus and mems from this cpuset. */
2097 /* Move tasks from the empty cpuset to a parent */
2182 * Description: Finish top cpuset after cpu, node maps are initialized
2192 cpuset_wq = create_singlethread_workqueue("cpuset");
2197 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2198 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2201 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2204 * tasks cpuset.
2218 const struct cpuset *cs;
2264 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2265 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2267 * Description: Returns the nodemask_t mems_allowed of the cpuset
2270 * tasks cpuset.
2299 * mem_hardwall ancestor to the specified cpuset. Call holding
2301 * (an unusual configuration), then returns the root cpuset.
2303 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2318 * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
2325 * might sleep, and might allow a node from an enclosing cpuset.
2337 * and do not allow allocations outside the current tasks cpuset
2340 * nearest enclosing hardwalled ancestor cpuset.
2347 * cpuset are short of memory, might require taking the callback_mutex
2352 * so no allocation on a node outside the cpuset is allowed (unless
2363 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
2373 const struct cpuset *cs; /* current cpuset ancestors */
2425 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2444 * cpuset_unlock - release lock on cpuset changes
2459 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2522 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2525 * Description: Prints @task's name, cpuset name, and cached copy of its
2539 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2547 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2553 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2556 * page reclaim efforts initiated by tasks in each cpuset.
2558 * This represents the rate at which some task in the cpuset
2564 * Display to user space in the per-cpuset read-only file
2567 * (direct) page reclaim by any task attached to the cpuset.
2580 * - Print tasks cpuset path into seq_file.
2581 * - Used for /proc/<pid>/cpuset.
2582 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2583 * doesn't really matter if tsk->cpuset changes after we read it,