• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/include/linux/
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 *  cpuset interface
5 *
6 *  Copyright (C) 2003 BULL SA
7 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/cgroup.h>
15#include <linux/mm.h>
16
17#ifdef CONFIG_CPUSETS
18
19extern int number_of_cpusets;	/* How many cpusets are defined in system? */
20
21extern int cpuset_init(void);
22extern void cpuset_init_smp(void);
23extern void cpuset_update_active_cpus(void);
24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
25extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void);
29int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
30
31extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
33
34static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
35{
36	return number_of_cpusets <= 1 ||
37		__cpuset_node_allowed_softwall(node, gfp_mask);
38}
39
40static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
41{
42	return number_of_cpusets <= 1 ||
43		__cpuset_node_allowed_hardwall(node, gfp_mask);
44}
45
46static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
47{
48	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
49}
50
51static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
52{
53	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
54}
55
56extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
57					  const struct task_struct *tsk2);
58
59#define cpuset_memory_pressure_bump() 				\
60	do {							\
61		if (cpuset_memory_pressure_enabled)		\
62			__cpuset_memory_pressure_bump();	\
63	} while (0)
64extern int cpuset_memory_pressure_enabled;
65extern void __cpuset_memory_pressure_bump(void);
66
67extern const struct file_operations proc_cpuset_operations;
68struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m,
70					struct task_struct *task);
71
72extern int cpuset_mem_spread_node(void);
73extern int cpuset_slab_spread_node(void);
74
75static inline int cpuset_do_page_mem_spread(void)
76{
77	return current->flags & PF_SPREAD_PAGE;
78}
79
80static inline int cpuset_do_slab_mem_spread(void)
81{
82	return current->flags & PF_SPREAD_SLAB;
83}
84
85extern int current_cpuset_is_being_rebound(void);
86
87extern void rebuild_sched_domains(void);
88
89extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90
91/*
92 * reading current mems_allowed and mempolicy in the fastpath must protected
93 * by get_mems_allowed()
94 */
95static inline void get_mems_allowed(void)
96{
97	current->mems_allowed_change_disable++;
98
99	/*
100	 * ensure that reading mems_allowed and mempolicy happens after the
101	 * update of ->mems_allowed_change_disable.
102	 *
103	 * the write-side task finds ->mems_allowed_change_disable is not 0,
104	 * and knows the read-side task is reading mems_allowed or mempolicy,
105	 * so it will clear old bits lazily.
106	 */
107	smp_mb();
108}
109
110static inline void put_mems_allowed(void)
111{
112	/*
113	 * ensure that reading mems_allowed and mempolicy before reducing
114	 * mems_allowed_change_disable.
115	 *
116	 * the write-side task will know that the read-side task is still
117	 * reading mems_allowed or mempolicy, don't clears old bits in the
118	 * nodemask.
119	 */
120	smp_mb();
121	--ACCESS_ONCE(current->mems_allowed_change_disable);
122}
123
124static inline void set_mems_allowed(nodemask_t nodemask)
125{
126	task_lock(current);
127	current->mems_allowed = nodemask;
128	task_unlock(current);
129}
130
131#else /* !CONFIG_CPUSETS */
132
133static inline int cpuset_init(void) { return 0; }
134static inline void cpuset_init_smp(void) {}
135
136static inline void cpuset_update_active_cpus(void)
137{
138	partition_sched_domains(1, NULL, NULL);
139}
140
141static inline void cpuset_cpus_allowed(struct task_struct *p,
142				       struct cpumask *mask)
143{
144	cpumask_copy(mask, cpu_possible_mask);
145}
146
147static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
148{
149	cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
150	return cpumask_any(cpu_active_mask);
151}
152
153static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
154{
155	return node_possible_map;
156}
157
158#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
159static inline void cpuset_init_current_mems_allowed(void) {}
160
161static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
162{
163	return 1;
164}
165
166static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
167{
168	return 1;
169}
170
171static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
172{
173	return 1;
174}
175
176static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
177{
178	return 1;
179}
180
181static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
182{
183	return 1;
184}
185
186static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
187						 const struct task_struct *tsk2)
188{
189	return 1;
190}
191
192static inline void cpuset_memory_pressure_bump(void) {}
193
194static inline void cpuset_task_status_allowed(struct seq_file *m,
195						struct task_struct *task)
196{
197}
198
199static inline int cpuset_mem_spread_node(void)
200{
201	return 0;
202}
203
204static inline int cpuset_slab_spread_node(void)
205{
206	return 0;
207}
208
209static inline int cpuset_do_page_mem_spread(void)
210{
211	return 0;
212}
213
214static inline int cpuset_do_slab_mem_spread(void)
215{
216	return 0;
217}
218
219static inline int current_cpuset_is_being_rebound(void)
220{
221	return 0;
222}
223
224static inline void rebuild_sched_domains(void)
225{
226	partition_sched_domains(1, NULL, NULL);
227}
228
229static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
230{
231}
232
233static inline void set_mems_allowed(nodemask_t nodemask)
234{
235}
236
237static inline void get_mems_allowed(void)
238{
239}
240
241static inline void put_mems_allowed(void)
242{
243}
244
245#endif /* !CONFIG_CPUSETS */
246
247#endif /* _LINUX_CPUSET_H */
248