• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/mm/

Lines Matching defs:cachep

248 	struct kmem_cache *cachep;
316 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
318 static int enable_cpucache(struct kmem_cache *cachep);
363 #define MAKE_LIST(cachep, listp, slab, nodeid) \
366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
369 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
371 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
372 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
373 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
518 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
521 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
523 * cachep->obj_offset: The real object.
524 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
525 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
528 static int obj_offset(struct kmem_cache *cachep)
530 return cachep->obj_offset;
533 static int obj_size(struct kmem_cache *cachep)
535 return cachep->obj_size;
538 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
540 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
541 return (unsigned long long*) (objp + obj_offset(cachep) -
545 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
548 if (cachep->flags & SLAB_STORE_USER)
549 return (unsigned long long *)(objp + cachep->buffer_size -
552 return (unsigned long long *) (objp + cachep->buffer_size -
556 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
558 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
559 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
565 #define obj_size(cachep) (cachep->buffer_size)
566 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
567 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
568 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
580 * Functions for storing/retrieving the cachep and or slab from the page
754 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
756 return cachep->array[smp_processor_id()];
776 * has cs_{dma,}cachep==NULL. Thus no special case
857 #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
859 static void __slab_error(const char *function, struct kmem_cache *cachep,
863 function, cachep->name, msg);
985 #define drain_alien_cache(cachep, alien) do { } while (0)
986 #define reap_alien(cachep, l3) do { } while (0)
997 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1002 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1008 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1057 static void __drain_alien_cache(struct kmem_cache *cachep,
1060 struct kmem_list3 *rl3 = cachep->nodelists[node];
1072 free_block(cachep, ac->entry, ac->avail, node);
1081 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1089 __drain_alien_cache(cachep, ac, node);
1095 static void drain_alien_cache(struct kmem_cache *cachep,
1106 __drain_alien_cache(cachep, ac, i);
1112 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1129 l3 = cachep->nodelists[node];
1130 STATS_INC_NODEFREES(cachep);
1135 STATS_INC_ACOVERFLOW(cachep);
1136 __drain_alien_cache(cachep, alien, nodeid);
1141 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1142 free_block(cachep, &objp, 1, nodeid);
1143 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1153 struct kmem_cache *cachep;
1171 list_for_each_entry(cachep, &cache_chain, next) {
1177 if (!cachep->nodelists[node]) {
1183 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1190 cachep->nodelists[node] = l3;
1193 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1194 cachep->nodelists[node]->free_limit =
1196 cachep->batchcount + cachep->num;
1197 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1204 list_for_each_entry(cachep, &cache_chain, next) {
1209 nc = alloc_arraycache(node, cachep->limit,
1210 cachep->batchcount);
1213 if (cachep->shared) {
1215 cachep->shared * cachep->batchcount,
1221 alien = alloc_alien_cache(node, cachep->limit);
1225 cachep->array[cpu] = nc;
1226 l3 = cachep->nodelists[node];
1284 list_for_each_entry(cachep, &cache_chain, next) {
1292 nc = cachep->array[cpu];
1293 cachep->array[cpu] = NULL;
1294 l3 = cachep->nodelists[node];
1302 l3->free_limit -= cachep->batchcount;
1304 free_block(cachep, nc->entry, nc->avail, node);
1313 free_block(cachep, shared->entry,
1325 drain_alien_cache(cachep, alien);
1336 list_for_each_entry(cachep, &cache_chain, next) {
1337 l3 = cachep->nodelists[node];
1340 drain_freelist(cachep, l3, l3->free_objects);
1359 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1374 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1375 cachep->nodelists[nodeid] = ptr;
1571 struct kmem_cache *cachep;
1573 list_for_each_entry(cachep, &cache_chain, next)
1574 if (enable_cpucache(cachep))
1618 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1632 flags |= cachep->gfpflags;
1634 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1638 nr_pages = (1 << cachep->gfporder);
1639 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1653 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1655 unsigned long i = (1 << cachep->gfporder);
1659 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1672 free_pages((unsigned long)addr, cachep->gfporder);
1678 struct kmem_cache *cachep = slab_rcu->cachep;
1680 kmem_freepages(cachep, slab_rcu->addr);
1681 if (OFF_SLAB(cachep))
1682 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1688 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1691 int size = obj_size(cachep);
1693 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1721 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1723 int size = obj_size(cachep);
1724 addr = &((char *)addr)[obj_offset(cachep)];
1764 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1769 if (cachep->flags & SLAB_RED_ZONE) {
1771 *dbg_redzone1(cachep, objp),
1772 *dbg_redzone2(cachep, objp));
1775 if (cachep->flags & SLAB_STORE_USER) {
1777 *dbg_userword(cachep, objp));
1779 (unsigned long)*dbg_userword(cachep, objp));
1782 realobj = (char *)objp + obj_offset(cachep);
1783 size = obj_size(cachep);
1793 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1799 realobj = (char *)objp + obj_offset(cachep);
1800 size = obj_size(cachep);
1813 cachep->name, realobj, size);
1814 print_objinfo(cachep, objp, 0);
1836 objnr = obj_to_index(cachep, slabp, objp);
1838 objp = index_to_obj(cachep, slabp, objnr - 1);
1839 realobj = (char *)objp + obj_offset(cachep);
1842 print_objinfo(cachep, objp, 2);
1844 if (objnr + 1 < cachep->num) {
1845 objp = index_to_obj(cachep, slabp, objnr + 1);
1846 realobj = (char *)objp + obj_offset(cachep);
1849 print_objinfo(cachep, objp, 2);
1858 * @cachep: cache pointer being destroyed
1864 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1867 for (i = 0; i < cachep->num; i++) {
1868 void *objp = index_to_obj(cachep, slabp, i);
1870 if (cachep->flags & SLAB_POISON) {
1872 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1873 OFF_SLAB(cachep))
1875 cachep->buffer_size / PAGE_SIZE, 1);
1877 check_poison_obj(cachep, objp);
1879 check_poison_obj(cachep, objp);
1882 if (cachep->flags & SLAB_RED_ZONE) {
1883 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1884 slab_error(cachep, "start of a freed object "
1886 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1887 slab_error(cachep, "end of a freed object "
1893 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1900 * @cachep: cache pointer being destroyed
1907 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1911 slab_destroy_objs(cachep, slabp);
1912 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1916 slab_rcu->cachep = cachep;
1920 kmem_freepages(cachep, addr);
1921 if (OFF_SLAB(cachep))
1922 kmem_cache_free(cachep->slabp_cache, slabp);
1930 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1935 cachep->nodelists[node] = &initkmem_list3[index + node];
1936 cachep->nodelists[node]->next_reap = jiffies +
1938 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1942 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1948 kfree(cachep->array[i]);
1952 l3 = cachep->nodelists[i];
1959 kmem_cache_free(&cache_cache, cachep);
1965 * @cachep: pointer to the cache that is being created
1976 static size_t calculate_slab_order(struct kmem_cache *cachep,
2005 cachep->num = num;
2006 cachep->gfporder = gfporder;
2033 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2036 return enable_cpucache(cachep);
2044 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2051 set_up_list3s(cachep, SIZE_AC);
2057 cachep->array[smp_processor_id()] =
2061 set_up_list3s(cachep, SIZE_L3);
2066 cachep->nodelists[node] =
2069 BUG_ON(!cachep->nodelists[node]);
2070 kmem_list3_init(cachep->nodelists[node]);
2074 cachep->nodelists[numa_node_id()]->next_reap =
2076 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2078 cpu_cache_get(cachep)->avail = 0;
2079 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2080 cpu_cache_get(cachep)->batchcount = 1;
2081 cpu_cache_get(cachep)->touched = 0;
2082 cachep->batchcount = 1;
2083 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2123 struct kmem_cache *cachep = NULL, *pc;
2249 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2250 if (!cachep)
2254 cachep->obj_size = size;
2262 cachep->obj_offset += sizeof(unsigned long long);
2277 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2278 cachep->obj_offset += PAGE_SIZE - size;
2298 left_over = calculate_slab_order(cachep, size, align, flags);
2300 if (!cachep->num) {
2303 kmem_cache_free(&cache_cache, cachep);
2304 cachep = NULL;
2307 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2322 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2325 cachep->colour_off = cache_line_size();
2327 if (cachep->colour_off < align)
2328 cachep->colour_off = align;
2329 cachep->colour = left_over / cachep->colour_off;
2330 cachep->slab_size = slab_size;
2331 cachep->flags = flags;
2332 cachep->gfpflags = 0;
2334 cachep->gfpflags |= GFP_DMA;
2335 cachep->buffer_size = size;
2336 cachep->reciprocal_buffer_size = reciprocal_value(size);
2339 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2347 BUG_ON(!cachep->slabp_cache);
2349 cachep->ctor = ctor;
2350 cachep->name = name;
2352 if (setup_cpu_cache(cachep)) {
2353 __kmem_cache_destroy(cachep);
2354 cachep = NULL;
2359 list_add(&cachep->next, &cache_chain);
2361 if (!cachep && (flags & SLAB_PANIC))
2365 return cachep;
2380 static void check_spinlock_acquired(struct kmem_cache *cachep)
2384 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2388 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2392 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2403 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2409 struct kmem_cache *cachep = arg;
2414 ac = cpu_cache_get(cachep);
2415 spin_lock(&cachep->nodelists[node]->list_lock);
2416 free_block(cachep, ac->entry, ac->avail, node);
2417 spin_unlock(&cachep->nodelists[node]->list_lock);
2421 static void drain_cpu_caches(struct kmem_cache *cachep)
2426 on_each_cpu(do_drain, cachep, 1, 1);
2429 l3 = cachep->nodelists[node];
2431 drain_alien_cache(cachep, l3->alien);
2435 l3 = cachep->nodelists[node];
2437 drain_array(cachep, l3, l3->shared, 1, node);
2483 static int __cache_shrink(struct kmem_cache *cachep)
2488 drain_cpu_caches(cachep);
2492 l3 = cachep->nodelists[i];
2496 drain_freelist(cachep, l3, l3->free_objects);
2506 * @cachep: The cache to shrink.
2511 int kmem_cache_shrink(struct kmem_cache *cachep)
2514 BUG_ON(!cachep || in_interrupt());
2517 ret = __cache_shrink(cachep);
2525 * @cachep: the cache to destroy
2539 void kmem_cache_destroy(struct kmem_cache *cachep)
2541 BUG_ON(!cachep || in_interrupt());
2548 list_del(&cachep->next);
2549 if (__cache_shrink(cachep)) {
2550 slab_error(cachep, "Can't free all objects");
2551 list_add(&cachep->next, &cache_chain);
2556 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2559 __kmem_cache_destroy(cachep);
2575 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2581 if (OFF_SLAB(cachep)) {
2583 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2589 colour_off += cachep->slab_size;
2603 static void cache_init_objs(struct kmem_cache *cachep,
2608 for (i = 0; i < cachep->num; i++) {
2609 void *objp = index_to_obj(cachep, slabp, i);
2612 if (cachep->flags & SLAB_POISON)
2613 poison_obj(cachep, objp, POISON_FREE);
2614 if (cachep->flags & SLAB_STORE_USER)
2615 *dbg_userword(cachep, objp) = NULL;
2617 if (cachep->flags & SLAB_RED_ZONE) {
2618 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2619 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2626 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2627 cachep->ctor(objp + obj_offset(cachep), cachep,
2630 if (cachep->flags & SLAB_RED_ZONE) {
2631 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2632 slab_error(cachep, "constructor overwrote the"
2634 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2635 slab_error(cachep, "constructor overwrote the"
2638 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2639 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2641 cachep->buffer_size / PAGE_SIZE, 0);
2643 if (cachep->ctor)
2644 cachep->ctor(objp, cachep, 0);
2652 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2656 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2658 BUG_ON(cachep->gfpflags & GFP_DMA);
2662 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2665 void *objp = index_to_obj(cachep, slabp, slabp->free);
2679 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2682 unsigned int objnr = obj_to_index(cachep, slabp, objp);
2690 "'%s', objp %p\n", cachep->name, objp);
2727 static int cache_grow(struct kmem_cache *cachep,
2744 l3 = cachep->nodelists[nodeid];
2750 if (l3->colour_next >= cachep->colour)
2754 offset *= cachep->colour_off;
2765 kmem_flagcheck(cachep, flags);
2772 objp = kmem_getpages(cachep, flags, nodeid);
2777 slabp = alloc_slabmgmt(cachep, objp, offset,
2783 slab_map_pages(cachep, slabp, objp);
2785 cache_init_objs(cachep, slabp);
2794 STATS_INC_GROWN(cachep);
2795 l3->free_objects += cachep->num;
2799 kmem_freepages(cachep, objp);
2844 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2851 objp -= obj_offset(cachep);
2857 if (cachep->flags & SLAB_RED_ZONE) {
2858 verify_redzone_free(cachep, objp);
2859 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2860 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2862 if (cachep->flags & SLAB_STORE_USER)
2863 *dbg_userword(cachep, objp) = caller;
2865 objnr = obj_to_index(cachep, slabp, objp);
2867 BUG_ON(objnr >= cachep->num);
2868 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2873 if (cachep->flags & SLAB_POISON) {
2875 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2876 store_stackinfo(cachep, objp, (unsigned long)caller);
2878 cachep->buffer_size / PAGE_SIZE, 0);
2880 poison_obj(cachep, objp, POISON_FREE);
2883 poison_obj(cachep, objp, POISON_FREE);
2889 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2897 if (entries > cachep->num || i >= cachep->num)
2900 if (entries != cachep->num - slabp->inuse) {
2904 cachep->name, cachep->num, slabp, slabp->inuse);
2906 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2922 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2932 ac = cpu_cache_get(cachep);
2943 l3 = cachep->nodelists[node];
2965 check_slabp(cachep, slabp);
2966 check_spinlock_acquired(cachep);
2973 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
2975 while (slabp->inuse < cachep->num && batchcount--) {
2976 STATS_INC_ALLOCED(cachep);
2977 STATS_INC_ACTIVE(cachep);
2978 STATS_SET_HIGH(cachep);
2980 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2983 check_slabp(cachep, slabp);
3000 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3003 ac = cpu_cache_get(cachep);
3014 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3019 kmem_flagcheck(cachep, flags);
3024 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3029 if (cachep->flags & SLAB_POISON) {
3031 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3033 cachep->buffer_size / PAGE_SIZE, 1);
3035 check_poison_obj(cachep, objp);
3037 check_poison_obj(cachep, objp);
3039 poison_obj(cachep, objp, POISON_INUSE);
3041 if (cachep->flags & SLAB_STORE_USER)
3042 *dbg_userword(cachep, objp) = caller;
3044 if (cachep->flags & SLAB_RED_ZONE) {
3045 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3046 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3047 slab_error(cachep, "double free, or memory outside"
3051 objp, *dbg_redzone1(cachep, objp),
3052 *dbg_redzone2(cachep, objp));
3054 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3055 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3063 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3067 objp += obj_offset(cachep);
3068 if (cachep->ctor && cachep->flags & SLAB_POISON)
3069 cachep->ctor(objp, cachep, 0);
3104 static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3106 if (cachep == &cache_cache)
3113 return should_fail(&failslab.attr, obj_size(cachep));
3148 static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3155 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3162 ac = cpu_cache_get(cachep);
3164 STATS_INC_ALLOCHIT(cachep);
3168 STATS_INC_ALLOCMISS(cachep);
3169 objp = cache_alloc_refill(cachep, flags);
3181 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3188 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3193 return ____cache_alloc_node(cachep, flags, nid_alloc);
3275 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3284 l3 = cachep->nodelists[nodeid];
3299 check_spinlock_acquired_node(cachep, nodeid);
3300 check_slabp(cachep, slabp);
3302 STATS_INC_NODEALLOCS(cachep);
3303 STATS_INC_ACTIVE(cachep);
3304 STATS_SET_HIGH(cachep);
3306 BUG_ON(slabp->inuse == cachep->num);
3308 obj = slab_get_obj(cachep, slabp, nodeid);
3309 check_slabp(cachep, slabp);
3324 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3328 return fallback_alloc(cachep, flags);
3336 * @cachep: The cache to allocate from.
3347 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3353 if (should_failslab(cachep, flags))
3356 cache_alloc_debugcheck_before(cachep, flags);
3362 if (unlikely(!cachep->nodelists[nodeid])) {
3364 ptr = fallback_alloc(cachep, flags);
3375 ptr = ____cache_alloc(cachep, flags);
3380 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3383 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3413 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3415 return ____cache_alloc(cachep, flags);
3421 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3426 if (should_failslab(cachep, flags))
3429 cache_alloc_debugcheck_before(cachep, flags);
3431 objp = __do_cache_alloc(cachep, flags);
3433 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3442 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3453 l3 = cachep->nodelists[node];
3455 check_spinlock_acquired_node(cachep, node);
3456 check_slabp(cachep, slabp);
3457 slab_put_obj(cachep, slabp, objp, node);
3458 STATS_DEC_ACTIVE(cachep);
3460 check_slabp(cachep, slabp);
3465 l3->free_objects -= cachep->num;
3472 slab_destroy(cachep, slabp);
3486 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3497 l3 = cachep->nodelists[node];
3512 free_block(cachep, ac->entry, batchcount, node);
3529 STATS_SET_FREEABLE(cachep, i);
3541 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3543 struct array_cache *ac = cpu_cache_get(cachep);
3546 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3548 if (cache_free_alien(cachep, objp))
3552 STATS_INC_FREEHIT(cachep);
3556 STATS_INC_FREEMISS(cachep);
3557 cache_flusharray(cachep, ac);
3564 * @cachep: The cache to allocate from.
3570 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3572 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3596 * @cachep: the cache we're checking against
3607 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3612 unsigned long size = cachep->buffer_size;
3628 if (unlikely(page_get_cache(page) != cachep))
3636 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3638 return __cache_alloc_node(cachep, flags, nodeid,
3646 struct kmem_cache *cachep;
3648 cachep = kmem_find_general_cachep(size, flags);
3649 if (unlikely(cachep == NULL))
3651 return kmem_cache_alloc_node(cachep, flags, node);
3686 struct kmem_cache *cachep;
3693 cachep = __find_general_cachep(size, flags);
3694 if (unlikely(cachep == NULL))
3696 return __cache_alloc(cachep, flags, caller);
3769 * @cachep: The cache the allocation was from.
3775 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3779 BUG_ON(virt_to_cache(objp) != cachep);
3782 debug_check_no_locks_freed(objp, obj_size(cachep));
3783 __cache_free(cachep, objp);
3813 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3815 return obj_size(cachep);
3819 const char *kmem_cache_name(struct kmem_cache *cachep)
3821 return cachep->name;
3828 static int alloc_kmemlist(struct kmem_cache *cachep)
3838 new_alien = alloc_alien_cache(node, cachep->limit);
3844 if (cachep->shared) {
3846 cachep->shared*cachep->batchcount,
3854 l3 = cachep->nodelists[node];
3861 free_block(cachep, shared->entry,
3870 cachep->batchcount + cachep->num;
3885 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3889 cachep->batchcount + cachep->num;
3890 cachep->nodelists[node] = l3;
3895 if (!cachep->next.next) {
3899 if (cachep->nodelists[node]) {
3900 l3 = cachep->nodelists[node];
3905 cachep->nodelists[node] = NULL;
3914 struct kmem_cache *cachep;
3924 old = cpu_cache_get(new->cachep);
3926 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3931 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3951 new->cachep = cachep;
3956 cachep->batchcount = batchcount;
3957 cachep->limit = limit;
3958 cachep->shared = shared;
3964 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3965 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3966 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3970 return alloc_kmemlist(cachep);
3974 static int enable_cpucache(struct kmem_cache *cachep)
3988 if (cachep->buffer_size > 131072)
3990 else if (cachep->buffer_size > PAGE_SIZE)
3992 else if (cachep->buffer_size > 1024)
3994 else if (cachep->buffer_size > 256)
4009 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4020 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
4023 cachep->name, -err);
4032 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4047 free_block(cachep, ac->entry, tofree, node);
4169 struct kmem_cache *cachep = p;
4171 return cachep->next.next == &cache_chain ?
4172 NULL : list_entry(cachep->next.next, struct kmem_cache, next);
4182 struct kmem_cache *cachep = p;
4196 l3 = cachep->nodelists[node];
4204 if (slabp->inuse != cachep->num && !error)
4206 active_objs += cachep->num;
4210 if (slabp->inuse == cachep->num && !error)
4229 num_objs = num_slabs * cachep->num;
4233 name = cachep->name;
4238 name, active_objs, num_objs, cachep->buffer_size,
4239 cachep->num, (1 << cachep->gfporder));
4241 cachep->limit, cachep->batchcount, cachep->shared);
4246 unsigned long high = cachep->high_mark;
4247 unsigned long allocs = cachep->num_allocations;
4248 unsigned long grown = cachep->grown;
4249 unsigned long reaped = cachep->reaped;
4250 unsigned long errors = cachep->errors;
4251 unsigned long max_freeable = cachep->max_freeable;
4252 unsigned long node_allocs = cachep->node_allocs;
4253 unsigned long node_frees = cachep->node_frees;
4254 unsigned long overflows = cachep->node_overflow;
4263 unsigned long allochit = atomic_read(&cachep->allochit);
4264 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4265 unsigned long freehit = atomic_read(&cachep->freehit);
4266 unsigned long freemiss = atomic_read(&cachep->freemiss);
4310 struct kmem_cache *cachep;
4329 list_for_each_entry(cachep, &cache_chain, next) {
4330 if (!strcmp(cachep->name, kbuf)) {
4335 res = do_tune_cpucache(cachep, limit,
4426 struct kmem_cache *cachep = p;
4434 if (!(cachep->flags & SLAB_STORE_USER))
4436 if (!(cachep->flags & SLAB_RED_ZONE))
4444 l3 = cachep->nodelists[node];
4452 handle_slab(n, cachep, slabp);
4454 handle_slab(n, cachep, slabp);
4457 name = cachep->name;