• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/include/linux/
1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/topology.h>
8#include <linux/mmdebug.h>
9
10struct vm_area_struct;
11
12/*
13 * GFP bitmasks..
14 *
15 * Zone modifiers (see linux/mmzone.h - low three bits)
16 *
17 * Do not put any conditional on these. If necessary modify the definitions
18 * without the underscores and use them consistently. The definitions here may
19 * be used in bit comparisons.
20 */
21#define __GFP_DMA	((__force gfp_t)0x01u)
22#define __GFP_HIGHMEM	((__force gfp_t)0x02u)
23#define __GFP_DMA32	((__force gfp_t)0x04u)
24#define __GFP_MOVABLE	((__force gfp_t)0x08u)  /* Page is movable */
25#define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
26/*
27 * Action modifiers - doesn't change the zoning
28 *
29 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
30 * _might_ fail.  This depends upon the particular VM implementation.
31 *
32 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
33 * cannot handle allocation failures.  This modifier is deprecated and no new
34 * users should be added.
35 *
36 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
37 *
38 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
39 * mechanism or reclaimed
40 */
41#define __GFP_WAIT	((__force gfp_t)0x10u)	/* Can wait and reschedule? */
42#define __GFP_HIGH	((__force gfp_t)0x20u)	/* Should access emergency pools? */
43#define __GFP_IO	((__force gfp_t)0x40u)	/* Can start physical IO? */
44#define __GFP_FS	((__force gfp_t)0x80u)	/* Can call down to low-level FS? */
45#define __GFP_COLD	((__force gfp_t)0x100u)	/* Cache-cold page required */
46#define __GFP_NOWARN	((__force gfp_t)0x200u)	/* Suppress page allocation failure warning */
47#define __GFP_REPEAT	((__force gfp_t)0x400u)	/* See above */
48#define __GFP_NOFAIL	((__force gfp_t)0x800u)	/* See above */
49#define __GFP_NORETRY	((__force gfp_t)0x1000u)/* See above */
50#define __GFP_COMP	((__force gfp_t)0x4000u)/* Add compound page metadata */
51#define __GFP_ZERO	((__force gfp_t)0x8000u)/* Return zeroed page on success */
52#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
53#define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
54#define __GFP_THISNODE	((__force gfp_t)0x40000u)/* No fallback, no policies */
55#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
56
57#ifdef CONFIG_KMEMCHECK
58#define __GFP_NOTRACK	((__force gfp_t)0x200000u)  /* Don't track with kmemcheck */
59#else
60#define __GFP_NOTRACK	((__force gfp_t)0)
61#endif
62
63/*
64 * This may seem redundant, but it's a way of annotating false positives vs.
65 * allocations that simply cannot be supported (e.g. page tables).
66 */
67#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68
69#define __GFP_BITS_SHIFT 22	/* Room for 22 __GFP_FOO bits */
70#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
71
72/* This equals 0, but use constants in case they ever change */
73#define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH)
74/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
75#define GFP_ATOMIC	(__GFP_HIGH)
76#define GFP_NOIO	(__GFP_WAIT)
77#define GFP_NOFS	(__GFP_WAIT | __GFP_IO)
78#define GFP_KERNEL	(__GFP_WAIT | __GFP_IO | __GFP_FS)
79#define GFP_TEMPORARY	(__GFP_WAIT | __GFP_IO | __GFP_FS | \
80			 __GFP_RECLAIMABLE)
81#define GFP_USER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
82#define GFP_HIGHUSER	(__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
83			 __GFP_HIGHMEM)
84#define GFP_HIGHUSER_MOVABLE	(__GFP_WAIT | __GFP_IO | __GFP_FS | \
85				 __GFP_HARDWALL | __GFP_HIGHMEM | \
86				 __GFP_MOVABLE)
87#define GFP_IOFS	(__GFP_IO | __GFP_FS)
88
89#ifdef CONFIG_NUMA
90#define GFP_THISNODE	(__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
91#else
92#define GFP_THISNODE	((__force gfp_t)0)
93#endif
94
95/* This mask makes up all the page movable related flags */
96#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
97
98/* Control page allocator reclaim behavior */
99#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
100			__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
101			__GFP_NORETRY|__GFP_NOMEMALLOC)
102
103/* Control slab gfp mask during early boot */
104#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
105
106/* Control allocation constraints */
107#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
108
109/* Do not use these with a slab allocator */
110#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
111
112/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
113   platforms, used as appropriate on others */
114
115#define GFP_DMA		__GFP_DMA
116
117/* 4GB DMA on some platforms */
118#define GFP_DMA32	__GFP_DMA32
119
120/* Convert GFP flags to their corresponding migrate type */
121static inline int allocflags_to_migratetype(gfp_t gfp_flags)
122{
123	WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
124
125	if (unlikely(page_group_by_mobility_disabled))
126		return MIGRATE_UNMOVABLE;
127
128	/* Group based on mobility */
129	return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
130		((gfp_flags & __GFP_RECLAIMABLE) != 0);
131}
132
133#ifdef CONFIG_HIGHMEM
134#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
135#else
136#define OPT_ZONE_HIGHMEM ZONE_NORMAL
137#endif
138
139#ifdef CONFIG_ZONE_DMA
140#define OPT_ZONE_DMA ZONE_DMA
141#else
142#define OPT_ZONE_DMA ZONE_NORMAL
143#endif
144
145#ifdef CONFIG_ZONE_DMA32
146#define OPT_ZONE_DMA32 ZONE_DMA32
147#else
148#define OPT_ZONE_DMA32 ZONE_NORMAL
149#endif
150
151/*
152 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
153 * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
154 * and there are 16 of them to cover all possible combinations of
155 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
156 *
157 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
158 * But GFP_MOVABLE is not only a zone specifier but also an allocation
159 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
160 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
161 *
162 *       bit       result
163 *       =================
164 *       0x0    => NORMAL
165 *       0x1    => DMA or NORMAL
166 *       0x2    => HIGHMEM or NORMAL
167 *       0x3    => BAD (DMA+HIGHMEM)
168 *       0x4    => DMA32 or DMA or NORMAL
169 *       0x5    => BAD (DMA+DMA32)
170 *       0x6    => BAD (HIGHMEM+DMA32)
171 *       0x7    => BAD (HIGHMEM+DMA32+DMA)
172 *       0x8    => NORMAL (MOVABLE+0)
173 *       0x9    => DMA or NORMAL (MOVABLE+DMA)
174 *       0xa    => MOVABLE (Movable is valid only if HIGHMEM is set too)
175 *       0xb    => BAD (MOVABLE+HIGHMEM+DMA)
176 *       0xc    => DMA32 (MOVABLE+HIGHMEM+DMA32)
177 *       0xd    => BAD (MOVABLE+DMA32+DMA)
178 *       0xe    => BAD (MOVABLE+DMA32+HIGHMEM)
179 *       0xf    => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
180 *
181 * ZONES_SHIFT must be <= 2 on 32 bit platforms.
182 */
183
184#if 16 * ZONES_SHIFT > BITS_PER_LONG
185#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
186#endif
187
188#define GFP_ZONE_TABLE ( \
189	(ZONE_NORMAL << 0 * ZONES_SHIFT)				\
190	| (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT)			\
191	| (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT)		\
192	| (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT)			\
193	| (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT)			\
194	| (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT)	\
195	| (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
196	| (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
197)
198
199/*
200 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
201 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
202 * entry starting with bit 0. Bit is set if the combination is not
203 * allowed.
204 */
205#define GFP_ZONE_BAD ( \
206	1 << (__GFP_DMA | __GFP_HIGHMEM)				\
207	| 1 << (__GFP_DMA | __GFP_DMA32)				\
208	| 1 << (__GFP_DMA32 | __GFP_HIGHMEM)				\
209	| 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)		\
210	| 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA)		\
211	| 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA)		\
212	| 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM)		\
213	| 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
214)
215
216static inline enum zone_type gfp_zone(gfp_t flags)
217{
218	enum zone_type z;
219	int bit = flags & GFP_ZONEMASK;
220
221	z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
222					 ((1 << ZONES_SHIFT) - 1);
223
224	if (__builtin_constant_p(bit))
225		MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
226	else {
227#ifdef CONFIG_DEBUG_VM
228		BUG_ON((GFP_ZONE_BAD >> bit) & 1);
229#endif
230	}
231	return z;
232}
233
234/*
235 * There is only one page-allocator function, and two main namespaces to
236 * it. The alloc_page*() variants return 'struct page *' and as such
237 * can allocate highmem pages, the *get*page*() variants return
238 * virtual kernel addresses to the allocated page(s).
239 */
240
241static inline int gfp_zonelist(gfp_t flags)
242{
243	if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
244		return 1;
245
246	return 0;
247}
248
249/*
250 * We get the zone list from the current node and the gfp_mask.
251 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
252 * There are two zonelists per node, one for all zones with memory and
253 * one containing just zones from the node the zonelist belongs to.
254 *
255 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
256 * optimized to &contig_page_data at compile-time.
257 */
258static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
259{
260	return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
261}
262
263#ifndef HAVE_ARCH_FREE_PAGE
264static inline void arch_free_page(struct page *page, int order) { }
265#endif
266#ifndef HAVE_ARCH_ALLOC_PAGE
267static inline void arch_alloc_page(struct page *page, int order) { }
268#endif
269
270struct page *
271__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
272		       struct zonelist *zonelist, nodemask_t *nodemask);
273
274static inline struct page *
275__alloc_pages(gfp_t gfp_mask, unsigned int order,
276		struct zonelist *zonelist)
277{
278	return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
279}
280
281static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
282						unsigned int order)
283{
284	/* Unknown node is current node */
285	if (nid < 0)
286		nid = numa_node_id();
287
288	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
289}
290
291static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
292						unsigned int order)
293{
294	VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
295
296	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
297}
298
299#ifdef CONFIG_NUMA
300extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
301
302static inline struct page *
303alloc_pages(gfp_t gfp_mask, unsigned int order)
304{
305	return alloc_pages_current(gfp_mask, order);
306}
307extern struct page *alloc_page_vma(gfp_t gfp_mask,
308			struct vm_area_struct *vma, unsigned long addr);
309#else
310#define alloc_pages(gfp_mask, order) \
311		alloc_pages_node(numa_node_id(), gfp_mask, order)
312#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
313#endif
314#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
315
316extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
317extern unsigned long get_zeroed_page(gfp_t gfp_mask);
318
319void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
320void free_pages_exact(void *virt, size_t size);
321
322#define __get_free_page(gfp_mask) \
323		__get_free_pages((gfp_mask), 0)
324
325#define __get_dma_pages(gfp_mask, order) \
326		__get_free_pages((gfp_mask) | GFP_DMA, (order))
327
328extern void __free_pages(struct page *page, unsigned int order);
329extern void free_pages(unsigned long addr, unsigned int order);
330extern void free_hot_cold_page(struct page *page, int cold);
331
332#define __free_page(page) __free_pages((page), 0)
333#define free_page(addr) free_pages((addr), 0)
334
335void page_alloc_init(void);
336void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
337void drain_all_pages(void);
338void drain_local_pages(void *dummy);
339
340extern gfp_t gfp_allowed_mask;
341
342extern void pm_restrict_gfp_mask(void);
343extern void pm_restore_gfp_mask(void);
344
345#endif /* __LINUX_GFP_H */
346