Searched refs:zonelist (Results 1 - 15 of 15) sorted by last modified time

/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/parisc/mm/
H A Dinit.c601 struct zonelist *zl;
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/include/linux/
H A Dswap.h162 extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
H A Dvmstat.h176 extern void zone_statistics(struct zonelist *, struct zone *);
H A Dcpuset.h31 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
101 static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
H A Dgfp.h125 FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
H A Dmempolicy.h56 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
60 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
66 struct zonelist *zonelist; /* bind */ member in union:mempolicy::__anon10016
161 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
258 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
H A Dmmzone.h314 /* Maximum number of zones on a zonelist */
319 * We cache key information from each zonelist for smaller cache
322 * 1) The BITMAP fullzones tracks which zones in a zonelist have come
325 * 2) The array z_to_n[] maps each zone in the zonelist to its node
329 * Both fullzones and z_to_n[] are one-to-one with the zonelist,
330 * indexed by a zones offset in the zonelist zones[] array.
347 * zonelist. However, the mempolicy zonelists constructed for
355 * at the front of the zonelist struct, ending in a variable length
358 * Then we put the optional zonelist cache on the end of the zonelist
396 struct zonelist { struct
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/kernel/
H A Dcpuset.c2327 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
2328 * @zl: the zonelist to be checked
2330 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
2332 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
2381 * by forcibly using a zonelist starting at a specified node, and by
2383 * any node on the zonelist except the first. By the time any such
2396 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2470 * by forcibly using a zonelist starting at a specified node, and by
2472 * any node on the zonelist except the first. By the time any such
2547 * the node where the search should start. The zonelist passe
[all...]
/netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/mm/
H A Dhugetlb.c71 struct zonelist *zonelist = huge_zonelist(vma, address); local
74 for (z = zonelist->zones; *z; z++) {
H A Dmempolicy.c83 /* Generate a custom zonelist for the BIND policy. */
84 static struct zonelist *bind_zonelist(nodemask_t *nodes)
86 struct zonelist *zl;
146 policy->v.zonelist = bind_zonelist(nodes);
147 if (IS_ERR(policy->v.zonelist)) {
148 void *error_code = policy->v.zonelist;
434 for (i = 0; p->v.zonelist->zones[i]; i++)
435 node_set(zone_to_nid(p->v.zonelist->zones[i]),
1039 /* Return a zonelist representing a mempolicy */
1040 static struct zonelist *zonelist_polic
1598 struct zonelist *zonelist; local
[all...]
H A Doom_kill.c175 static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) argument
188 for (z = zonelist->zones; *z; z++)
397 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) argument
424 constraint = constrained_alloc(zonelist, gfp_mask);
H A Dpage_alloc.c848 static struct page *buffered_rmqueue(struct zonelist *zonelist, argument
881 zone_statistics(zonelist, zone);
1023 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
1026 * comments in mmzone.h. Reduces cache footprint of zonelist scans
1029 * If the zonelist cache is present in the passed in zonelist, then
1033 * If the zonelist cache is not available for this zonelist, does
1036 * If the fullzones BITMAP in the zonelist cach
1044 zlc_setup(struct zonelist *zonelist, int alloc_flags) argument
1086 zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, nodemask_t *allowednodes) argument
1109 zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) argument
1125 zlc_setup(struct zonelist *zonelist, int alloc_flags) argument
1130 zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, nodemask_t *allowednodes) argument
1136 zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) argument
1146 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, int alloc_flags) argument
1219 __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) argument
1454 struct zonelist *zonelist = pgdat->node_zonelists + offset; local
1632 build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) argument
1720 struct zonelist *zonelist; local
1770 struct zonelist *zonelist; local
1791 struct zonelist *zonelist; local
[all...]
H A Dslab.c3207 struct zonelist *zonelist; local
3216 zonelist = &NODE_DATA(slab_node(current->mempolicy))
3225 for (z = zonelist->zones; *z && !obj; z++) {
H A Dslub.c1216 struct zonelist *zonelist; local
1241 zonelist = &NODE_DATA(slab_node(current->mempolicy))
1243 for (z = zonelist->zones; *z; z++) {
H A Dvmstat.c376 * zonelist = the list of zones passed to the allocator
381 void zone_statistics(struct zonelist *zonelist, struct zone *z) argument
383 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
387 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);

Completed in 139 milliseconds