Searched refs:cpuset (Results 1 - 22 of 22) sorted by relevance
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/kernel/ |
H A D | cpuset.c | 2 * kernel/cpuset.c 23 #include <linux/cpuset.h> 60 * When there is only one cpuset (the root cpuset) we can 74 struct cpuset { struct 76 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ 82 atomic_t count; /* count tasks using this cpuset */ 91 struct cpuset *parent; /* my parent */ 92 struct dentry *dentry; /* cpuset fs entry */ 96 * recent time this cpuset change [all...] |
H A D | Makefile | 38 obj-$(CONFIG_CPUSETS) += cpuset.o
|
H A D | exit.c | 34 #include <linux/cpuset.h>
|
H A D | fork.c | 32 #include <linux/cpuset.h>
|
H A D | sched.c | 46 #include <linux/cpuset.h>
|
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/arch/i386/mach-voyager/ |
H A D | voyager_smp.c | 81 static void send_CPI(__u32 cpuset, __u8 cpi); 112 send_QIC_CPI(__u32 cpuset, __u8 cpi) argument 117 if(cpuset & (1<<cpu)) { 1340 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per 1343 send_CPI(__u32 cpuset, __u8 cpi) argument 1346 __u32 quad_cpuset = (cpuset & voyager_quad_processors); 1351 outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); 1356 cpuset &= ~quad_cpuset; 1357 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ 1358 if(cpuset [all...] |
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/mm/ |
H A D | pdflush.c | 23 #include <linux/cpuset.h> 183 * Some configs put our parent kthread in a limited cpuset,
|
H A D | memory_hotplug.c | 25 #include <linux/cpuset.h>
|
H A D | oom_kill.c | 24 #include <linux/cpuset.h> 436 "No available memory in cpuset");
|
H A D | migrate.c | 26 #include <linux/cpuset.h>
|
H A D | hugetlb.c | 15 #include <linux/cpuset.h> 835 * When cpuset is configured, it breaks the strict hugetlb page 837 * reservation is completely rubbish in the presence of cpuset because 839 * current cpuset. Application can still potentially OOM'ed by kernel 840 * with lack of free htlb page in cpuset that the task is in. 841 * Attempt to enforce strict accounting with cpuset is almost 842 * impossible (or too ugly) because cpuset is too fluid that 845 * The change of semantics for shared hugetlb mapping with cpuset is 849 * semantics that cpuset has.
|
H A D | vmscan.c | 34 #include <linux/cpuset.h>
|
H A D | filemap.c | 32 #include <linux/cpuset.h>
|
H A D | mempolicy.c | 24 #include <linux/cpuset.h> 831 /* Restrict the nodes to the allowed nodes in the cpuset */ 1253 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it 1256 * keeps mempolicies cpuset relative after its cpuset moves. See 1257 * further kernel/cpuset.c update_nodemask().
|
H A D | page_alloc.c | 34 #include <linux/cpuset.h> 894 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 1016 * skip over zones that are not allowed by the cpuset, or that have 1152 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1278 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1279 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1843 /* cpuset refresh routine should be here */
|
H A D | slub.c | 19 #include <linux/cpuset.h>
|
H A D | slab.c | 97 #include <linux/cpuset.h> 3238 * of the current cpuset / memory policy requirements.
|
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/include/linux/ |
H A D | mempolicy.h | 155 (cpuset_being_rebound == current->cpuset)
|
H A D | sched.h | 752 struct cpuset; 1040 struct cpuset *cpuset; member in struct:task_struct 1168 #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ 1169 #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ 1483 * pins the final release of task.io_context. Also protects ->cpuset.
|
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/fs/proc/ |
H A D | array.c | 75 #include <linux/cpuset.h>
|
H A D | base.c | 71 #include <linux/cpuset.h> 1996 REG("cpuset", S_IRUGO, cpuset), 2279 REG("cpuset", S_IRUGO, cpuset),
|
/netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/init/ |
H A D | main.c | 41 #include <linux/cpuset.h>
|
Completed in 272 milliseconds