Searched refs:gfp_mask (Results 1 - 25 of 101) sorted by relevance

12345

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/include/linux/
H A Dgfp.h110 * We get the zone list from the current node and the gfp_mask.
127 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, argument
137 return __alloc_pages(gfp_mask, order,
138 NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
142 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
145 alloc_pages(gfp_t gfp_mask, unsigned int order) argument
150 return alloc_pages_current(gfp_mask, order);
152 extern struct page *alloc_page_vma(gfp_t gfp_mask,
155 #define alloc_pages(gfp_mask, order) \
156 alloc_pages_node(numa_node_id(), gfp_mask, orde
[all...]
H A Dmempool.h11 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
31 extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
33 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
40 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
53 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
54 void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data);
71 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
H A Dcpuset.h33 extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
34 extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
36 static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
39 __cpuset_zone_allowed_softwall(z, gfp_mask);
42 static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
45 __cpuset_zone_allowed_hardwall(z, gfp_mask);
106 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) argument
111 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) argument
H A Dradix-tree.h57 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
60 gfp_t gfp_mask; member in struct:radix_tree_root
66 .gfp_mask = (mask), \
76 (root)->gfp_mask = (mask); \
158 int radix_tree_preload(gfp_t gfp_mask);
H A Dvmalloc.h45 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
46 extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
66 gfp_t gfp_mask);
H A Dtextsearch.h163 gfp_t gfp_mask)
167 conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
162 alloc_ts_config(size_t payload, gfp_t gfp_mask) argument
H A Dkfifo.h38 gfp_t gfp_mask, spinlock_t *lock);
39 extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
H A Dsuspend.h68 extern unsigned long get_safe_page(gfp_t gfp_mask);
H A Didr.h78 int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/mm/
H A Dmempool.c105 * @gfp_mask: the usual allocation bitmask.
115 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) argument
137 new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
156 element = pool->alloc(gfp_mask, pool->pool_data);
196 * @gfp_mask: the usual allocation bitmask.
203 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) argument
210 might_sleep_if(gfp_mask & __GFP_WAIT);
212 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
213 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
214 gfp_mask |
280 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) argument
298 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) argument
305 mempool_kzalloc(gfp_t gfp_mask, void *pool_data) argument
322 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) argument
[all...]
H A Dvmalloc.c27 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
165 int node, gfp_t gfp_mask)
187 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
256 int node, gfp_t gfp_mask)
259 gfp_mask);
418 void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, argument
430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
434 (gfp_mask & GFP_LEVEL_MASK),
447 area->pages[i] = alloc_page(gfp_mask);
449 area->pages[i] = alloc_pages_node(node, gfp_mask,
163 __get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask) argument
255 get_vm_area_node(unsigned long size, unsigned long flags, int node, gfp_t gfp_mask) argument
466 __vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) argument
482 __vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, int node) argument
498 __vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) argument
[all...]
H A Dvmscan.c53 gfp_t gfp_mask; member in struct:scan_control
173 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, argument
188 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
216 nr_before = (*shrinker->shrinker)(0, gfp_mask);
217 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
498 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
499 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
569 if (!try_to_release_page(page, sc->gfp_mask))
951 throttle_vm_writeout(sc->gfp_mask);
1014 unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) argument
1613 __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument
1687 zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) argument
[all...]
H A Doom_kill.c175 static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) argument
189 if (cpuset_zone_allowed_softwall(*z, gfp_mask))
397 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) argument
411 "gfp_mask=0x%x, order=%d, oomkilladj=%d\n",
412 current->comm, gfp_mask, order, current->oomkilladj);
424 constraint = constrained_alloc(zonelist, gfp_mask);
H A Dswap_state.c73 gfp_t gfp_mask)
79 error = radix_tree_preload(gfp_mask);
146 int add_to_swap(struct page * page, gfp_t gfp_mask) argument
170 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
72 __add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) argument
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/ntfs/
H A Dmalloc.h32 * @gfp_mask: extra flags for the allocator
40 * Depending on @gfp_mask the allocation may be guaranteed to succeed.
42 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) argument
47 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
48 /* return (void *)__get_free_page(gfp_mask); */
51 return __vmalloc(size, gfp_mask, PAGE_KERNEL);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/gfs2/
H A Dops_address.h20 extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/amso1100/
H A Dc2_alloc.c40 static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, argument
48 &dma_addr, gfp_mask);
72 int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, argument
75 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
91 dma_addr_t *dma_addr, gfp_t gfp_mask)
101 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
90 c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, dma_addr_t *dma_addr, gfp_t gfp_mask) argument
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/kernel/
H A Dkfifo.c32 * @gfp_mask: get_free_pages mask, passed to kmalloc()
39 gfp_t gfp_mask, spinlock_t *lock)
46 fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
62 * @gfp_mask: get_free_pages mask, passed to kmalloc()
67 struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) argument
81 buffer = kmalloc(size, gfp_mask);
85 ret = kfifo_init(buffer, size, gfp_mask, lock);
38 kfifo_init(unsigned char *buffer, unsigned int size, gfp_t gfp_mask, spinlock_t *lock) argument
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/core/
H A Dsa.h56 int timeout_ms, gfp_t gfp_mask,
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/net/ieee80211/softmac/
H A Dieee80211softmac_event.c88 int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask)
99 eventptr = kmalloc(sizeof(struct ieee80211softmac_event), gfp_mask);
119 int event, notify_function_ptr fun, void *context, gfp_t gfp_mask)
126 return ieee80211softmac_notify_internal(mac, event, NULL, fun, context, gfp_mask);
87 ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask) argument
118 ieee80211softmac_notify_gfp(struct net_device *dev, int event, notify_function_ptr fun, void *context, gfp_t gfp_mask) argument
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/fs/xfs/linux-2.6/
H A Dkmem.h123 kmem_shake_allow(gfp_t gfp_mask) argument
125 return (gfp_mask & __GFP_WAIT);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/lib/
H A Dts_kmp.c89 gfp_t gfp_mask)
96 conf = alloc_ts_config(priv_size, gfp_mask);
88 kmp_init(const void *pattern, unsigned int len, gfp_t gfp_mask) argument
H A Dradix-tree.c83 return root->gfp_mask & __GFP_BITS_MASK;
94 gfp_t gfp_mask = root_gfp_mask(root); local
96 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
97 if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
130 int radix_tree_preload(gfp_t gfp_mask) argument
140 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
175 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
181 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
186 root->gfp_mask &= __GFP_BITS_MASK;
191 return (__force unsigned)root->gfp_mask
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/security/selinux/ss/
H A Dmls.h37 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/block/
H A Drd.c350 gfp_t gfp_mask; local
380 gfp_mask = mapping_gfp_mask(mapping);
381 gfp_mask &= ~(__GFP_FS|__GFP_IO);
382 gfp_mask |= __GFP_HIGH;
383 mapping_set_gfp_mask(mapping, gfp_mask);

Completed in 196 milliseconds

12345