Lines Matching refs:cachep

227 	struct kmem_cache *cachep;
294 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
296 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
341 #define MAKE_LIST(cachep, listp, slab, nodeid) \
344 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
347 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
349 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
350 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
351 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
415 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
418 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
420 * cachep->obj_offset: The real object.
421 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
422 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
425 static int obj_offset(struct kmem_cache *cachep)
427 return cachep->obj_offset;
430 static int obj_size(struct kmem_cache *cachep)
432 return cachep->obj_size;
435 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
437 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
438 return (unsigned long long*) (objp + obj_offset(cachep) -
442 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
444 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
445 if (cachep->flags & SLAB_STORE_USER)
446 return (unsigned long long *)(objp + cachep->buffer_size -
449 return (unsigned long long *) (objp + cachep->buffer_size -
453 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
455 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
456 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
462 #define obj_size(cachep) (cachep->buffer_size)
463 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
464 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
465 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
470 size_t slab_buffer_size(struct kmem_cache *cachep)
472 return cachep->buffer_size;
485 * Functions for storing/retrieving the cachep and or slab from the page
671 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
673 return cachep->array[smp_processor_id()];
696 * has cs_{dma,}cachep==NULL. Thus no special case
777 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
779 static void __slab_error(const char *function, struct kmem_cache *cachep,
783 function, cachep->name, msg);
912 #define drain_alien_cache(cachep, alien) do { } while (0)
913 #define reap_alien(cachep, l3) do { } while (0)
924 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
929 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
935 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
982 static void __drain_alien_cache(struct kmem_cache *cachep,
985 struct kmem_list3 *rl3 = cachep->nodelists[node];
997 free_block(cachep, ac->entry, ac->avail, node);
1006 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1014 __drain_alien_cache(cachep, ac, node);
1020 static void drain_alien_cache(struct kmem_cache *cachep,
1031 __drain_alien_cache(cachep, ac, i);
1037 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1054 l3 = cachep->nodelists[node];
1055 STATS_INC_NODEFREES(cachep);
1060 STATS_INC_ACOVERFLOW(cachep);
1061 __drain_alien_cache(cachep, alien, nodeid);
1066 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1067 free_block(cachep, &objp, 1, nodeid);
1068 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1085 struct kmem_cache *cachep;
1089 list_for_each_entry(cachep, &cache_chain, next) {
1095 if (!cachep->nodelists[node]) {
1101 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1108 cachep->nodelists[node] = l3;
1111 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1112 cachep->nodelists[node]->free_limit =
1114 cachep->batchcount + cachep->num;
1115 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1122 struct kmem_cache *cachep;
1127 list_for_each_entry(cachep, &cache_chain, next) {
1133 nc = cachep->array[cpu];
1134 cachep->array[cpu] = NULL;
1135 l3 = cachep->nodelists[node];
1143 l3->free_limit -= cachep->batchcount;
1145 free_block(cachep, nc->entry, nc->avail, node);
1154 free_block(cachep, shared->entry,
1166 drain_alien_cache(cachep, alien);
1177 list_for_each_entry(cachep, &cache_chain, next) {
1178 l3 = cachep->nodelists[node];
1181 drain_freelist(cachep, l3, l3->free_objects);
1187 struct kmem_cache *cachep;
1206 list_for_each_entry(cachep, &cache_chain, next) {
1211 nc = alloc_arraycache(node, cachep->limit,
1212 cachep->batchcount, GFP_KERNEL);
1215 if (cachep->shared) {
1217 cachep->shared * cachep->batchcount,
1225 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1232 cachep->array[cpu] = nc;
1233 l3 = cachep->nodelists[node];
1333 struct kmem_cache *cachep;
1336 list_for_each_entry(cachep, &cache_chain, next) {
1339 l3 = cachep->nodelists[node];
1343 drain_freelist(cachep, l3, l3->free_objects);
1390 static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1404 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1405 cachep->nodelists[nodeid] = ptr;
1412 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1417 cachep->nodelists[node] = &initkmem_list3[index + node];
1418 cachep->nodelists[node]->next_reap = jiffies +
1420 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1615 struct kmem_cache *cachep;
1619 list_for_each_entry(cachep, &cache_chain, next)
1620 if (enable_cpucache(cachep, GFP_NOWAIT))
1670 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1684 flags |= cachep->gfpflags;
1685 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1688 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1692 nr_pages = (1 << cachep->gfporder);
1693 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1702 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1703 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1705 if (cachep->ctor)
1717 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1719 unsigned long i = (1 << cachep->gfporder);
1723 kmemcheck_free_shadow(page, cachep->gfporder);
1725 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1738 free_pages((unsigned long)addr, cachep->gfporder);
1744 struct kmem_cache *cachep = slab_rcu->cachep;
1746 kmem_freepages(cachep, slab_rcu->addr);
1747 if (OFF_SLAB(cachep))
1748 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1754 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1757 int size = obj_size(cachep);
1759 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1787 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1789 int size = obj_size(cachep);
1790 addr = &((char *)addr)[obj_offset(cachep)];
1830 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1835 if (cachep->flags & SLAB_RED_ZONE) {
1837 *dbg_redzone1(cachep, objp),
1838 *dbg_redzone2(cachep, objp));
1841 if (cachep->flags & SLAB_STORE_USER) {
1843 *dbg_userword(cachep, objp));
1845 (unsigned long)*dbg_userword(cachep, objp));
1848 realobj = (char *)objp + obj_offset(cachep);
1849 size = obj_size(cachep);
1859 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1865 realobj = (char *)objp + obj_offset(cachep);
1866 size = obj_size(cachep);
1879 cachep->name, realobj, size);
1880 print_objinfo(cachep, objp, 0);
1902 objnr = obj_to_index(cachep, slabp, objp);
1904 objp = index_to_obj(cachep, slabp, objnr - 1);
1905 realobj = (char *)objp + obj_offset(cachep);
1908 print_objinfo(cachep, objp, 2);
1910 if (objnr + 1 < cachep->num) {
1911 objp = index_to_obj(cachep, slabp, objnr + 1);
1912 realobj = (char *)objp + obj_offset(cachep);
1915 print_objinfo(cachep, objp, 2);
1922 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1925 for (i = 0; i < cachep->num; i++) {
1926 void *objp = index_to_obj(cachep, slabp, i);
1928 if (cachep->flags & SLAB_POISON) {
1930 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1931 OFF_SLAB(cachep))
1933 cachep->buffer_size / PAGE_SIZE, 1);
1935 check_poison_obj(cachep, objp);
1937 check_poison_obj(cachep, objp);
1940 if (cachep->flags & SLAB_RED_ZONE) {
1941 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1942 slab_error(cachep, "start of a freed object "
1944 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1945 slab_error(cachep, "end of a freed object "
1951 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1958 * @cachep: cache pointer being destroyed
1965 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1969 slab_destroy_debugcheck(cachep, slabp);
1970 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1974 slab_rcu->cachep = cachep;
1978 kmem_freepages(cachep, addr);
1979 if (OFF_SLAB(cachep))
1980 kmem_cache_free(cachep->slabp_cache, slabp);
1984 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1990 kfree(cachep->array[i]);
1994 l3 = cachep->nodelists[i];
2001 kmem_cache_free(&cache_cache, cachep);
2007 * @cachep: pointer to the cache that is being created
2018 static size_t calculate_slab_order(struct kmem_cache *cachep,
2047 cachep->num = num;
2048 cachep->gfporder = gfporder;
2075 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2078 return enable_cpucache(cachep, gfp);
2086 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2093 set_up_list3s(cachep, SIZE_AC);
2099 cachep->array[smp_processor_id()] =
2103 set_up_list3s(cachep, SIZE_L3);
2108 cachep->nodelists[node] =
2111 BUG_ON(!cachep->nodelists[node]);
2112 kmem_list3_init(cachep->nodelists[node]);
2116 cachep->nodelists[numa_mem_id()]->next_reap =
2118 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2120 cpu_cache_get(cachep)->avail = 0;
2121 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2122 cpu_cache_get(cachep)->batchcount = 1;
2123 cpu_cache_get(cachep)->touched = 0;
2124 cachep->batchcount = 1;
2125 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2163 struct kmem_cache *cachep = NULL, *pc;
2298 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2299 if (!cachep)
2303 cachep->obj_size = size;
2311 cachep->obj_offset += align;
2326 && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
2327 cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
2349 left_over = calculate_slab_order(cachep, size, align, flags);
2351 if (!cachep->num) {
2354 kmem_cache_free(&cache_cache, cachep);
2355 cachep = NULL;
2358 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2373 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2385 cachep->colour_off = cache_line_size();
2387 if (cachep->colour_off < align)
2388 cachep->colour_off = align;
2389 cachep->colour = left_over / cachep->colour_off;
2390 cachep->slab_size = slab_size;
2391 cachep->flags = flags;
2392 cachep->gfpflags = 0;
2394 cachep->gfpflags |= GFP_DMA;
2395 cachep->buffer_size = size;
2396 cachep->reciprocal_buffer_size = reciprocal_value(size);
2399 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2407 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2409 cachep->ctor = ctor;
2410 cachep->name = name;
2412 if (setup_cpu_cache(cachep, gfp)) {
2413 __kmem_cache_destroy(cachep);
2414 cachep = NULL;
2419 list_add(&cachep->next, &cache_chain);
2421 if (!cachep && (flags & SLAB_PANIC))
2428 return cachep;
2443 static void check_spinlock_acquired(struct kmem_cache *cachep)
2447 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2451 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2455 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2466 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2472 struct kmem_cache *cachep = arg;
2477 ac = cpu_cache_get(cachep);
2478 spin_lock(&cachep->nodelists[node]->list_lock);
2479 free_block(cachep, ac->entry, ac->avail, node);
2480 spin_unlock(&cachep->nodelists[node]->list_lock);
2484 static void drain_cpu_caches(struct kmem_cache *cachep)
2489 on_each_cpu(do_drain, cachep, 1);
2492 l3 = cachep->nodelists[node];
2494 drain_alien_cache(cachep, l3->alien);
2498 l3 = cachep->nodelists[node];
2500 drain_array(cachep, l3, l3->shared, 1, node);
2546 static int __cache_shrink(struct kmem_cache *cachep)
2551 drain_cpu_caches(cachep);
2555 l3 = cachep->nodelists[i];
2559 drain_freelist(cachep, l3, l3->free_objects);
2569 * @cachep: The cache to shrink.
2574 int kmem_cache_shrink(struct kmem_cache *cachep)
2577 BUG_ON(!cachep || in_interrupt());
2581 ret = __cache_shrink(cachep);
2590 * @cachep: the cache to destroy
2604 void kmem_cache_destroy(struct kmem_cache *cachep)
2606 BUG_ON(!cachep || in_interrupt());
2614 list_del(&cachep->next);
2615 if (__cache_shrink(cachep)) {
2616 slab_error(cachep, "Can't free all objects");
2617 list_add(&cachep->next, &cache_chain);
2623 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2626 __kmem_cache_destroy(cachep);
2643 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2649 if (OFF_SLAB(cachep)) {
2651 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2665 colour_off += cachep->slab_size;
2680 static void cache_init_objs(struct kmem_cache *cachep,
2685 for (i = 0; i < cachep->num; i++) {
2686 void *objp = index_to_obj(cachep, slabp, i);
2689 if (cachep->flags & SLAB_POISON)
2690 poison_obj(cachep, objp, POISON_FREE);
2691 if (cachep->flags & SLAB_STORE_USER)
2692 *dbg_userword(cachep, objp) = NULL;
2694 if (cachep->flags & SLAB_RED_ZONE) {
2695 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2696 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2703 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2704 cachep->ctor(objp + obj_offset(cachep));
2706 if (cachep->flags & SLAB_RED_ZONE) {
2707 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2708 slab_error(cachep, "constructor overwrote the"
2710 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2711 slab_error(cachep, "constructor overwrote the"
2714 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2715 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2717 cachep->buffer_size / PAGE_SIZE, 0);
2719 if (cachep->ctor)
2720 cachep->ctor(objp);
2727 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2731 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2733 BUG_ON(cachep->gfpflags & GFP_DMA);
2737 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2740 void *objp = index_to_obj(cachep, slabp, slabp->free);
2754 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2757 unsigned int objnr = obj_to_index(cachep, slabp, objp);
2765 "'%s', objp %p\n", cachep->name, objp);
2802 static int cache_grow(struct kmem_cache *cachep,
2819 l3 = cachep->nodelists[nodeid];
2825 if (l3->colour_next >= cachep->colour)
2829 offset *= cachep->colour_off;
2840 kmem_flagcheck(cachep, flags);
2847 objp = kmem_getpages(cachep, local_flags, nodeid);
2852 slabp = alloc_slabmgmt(cachep, objp, offset,
2857 slab_map_pages(cachep, slabp, objp);
2859 cache_init_objs(cachep, slabp);
2868 STATS_INC_GROWN(cachep);
2869 l3->free_objects += cachep->num;
2873 kmem_freepages(cachep, objp);
2918 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2925 BUG_ON(virt_to_cache(objp) != cachep);
2927 objp -= obj_offset(cachep);
2933 if (cachep->flags & SLAB_RED_ZONE) {
2934 verify_redzone_free(cachep, objp);
2935 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2936 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2938 if (cachep->flags & SLAB_STORE_USER)
2939 *dbg_userword(cachep, objp) = caller;
2941 objnr = obj_to_index(cachep, slabp, objp);
2943 BUG_ON(objnr >= cachep->num);
2944 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2949 if (cachep->flags & SLAB_POISON) {
2951 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2952 store_stackinfo(cachep, objp, (unsigned long)caller);
2954 cachep->buffer_size / PAGE_SIZE, 0);
2956 poison_obj(cachep, objp, POISON_FREE);
2959 poison_obj(cachep, objp, POISON_FREE);
2965 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2973 if (entries > cachep->num || i >= cachep->num)
2976 if (entries != cachep->num - slabp->inuse) {
2980 cachep->name, cachep->num, slabp, slabp->inuse);
2982 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2998 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3008 ac = cpu_cache_get(cachep);
3018 l3 = cachep->nodelists[node];
3042 check_slabp(cachep, slabp);
3043 check_spinlock_acquired(cachep);
3050 BUG_ON(slabp->inuse >= cachep->num);
3052 while (slabp->inuse < cachep->num && batchcount--) {
3053 STATS_INC_ALLOCED(cachep);
3054 STATS_INC_ACTIVE(cachep);
3055 STATS_SET_HIGH(cachep);
3057 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3060 check_slabp(cachep, slabp);
3077 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3080 ac = cpu_cache_get(cachep);
3091 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3096 kmem_flagcheck(cachep, flags);
3101 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3106 if (cachep->flags & SLAB_POISON) {
3108 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3110 cachep->buffer_size / PAGE_SIZE, 1);
3112 check_poison_obj(cachep, objp);
3114 check_poison_obj(cachep, objp);
3116 poison_obj(cachep, objp, POISON_INUSE);
3118 if (cachep->flags & SLAB_STORE_USER)
3119 *dbg_userword(cachep, objp) = caller;
3121 if (cachep->flags & SLAB_RED_ZONE) {
3122 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3123 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3124 slab_error(cachep, "double free, or memory outside"
3128 objp, *dbg_redzone1(cachep, objp),
3129 *dbg_redzone2(cachep, objp));
3131 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3132 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3140 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3144 objp += obj_offset(cachep);
3145 if (cachep->ctor && cachep->flags & SLAB_POISON)
3146 cachep->ctor(objp);
3159 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3161 if (cachep == &cache_cache)
3164 return should_failslab(obj_size(cachep), flags, cachep->flags);
3167 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3174 ac = cpu_cache_get(cachep);
3176 STATS_INC_ALLOCHIT(cachep);
3180 STATS_INC_ALLOCMISS(cachep);
3181 objp = cache_alloc_refill(cachep, flags);
3186 ac = cpu_cache_get(cachep);
3205 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3213 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3219 return ____cache_alloc_node(cachep, flags, nid_alloc);
3307 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3316 l3 = cachep->nodelists[nodeid];
3331 check_spinlock_acquired_node(cachep, nodeid);
3332 check_slabp(cachep, slabp);
3334 STATS_INC_NODEALLOCS(cachep);
3335 STATS_INC_ACTIVE(cachep);
3336 STATS_SET_HIGH(cachep);
3338 BUG_ON(slabp->inuse == cachep->num);
3340 obj = slab_get_obj(cachep, slabp, nodeid);
3341 check_slabp(cachep, slabp);
3356 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3360 return fallback_alloc(cachep, flags);
3368 * @cachep: The cache to allocate from.
3379 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3390 if (slab_should_failslab(cachep, flags))
3393 cache_alloc_debugcheck_before(cachep, flags);
3399 if (unlikely(!cachep->nodelists[nodeid])) {
3401 ptr = fallback_alloc(cachep, flags);
3412 ptr = ____cache_alloc(cachep, flags);
3417 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3420 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3421 kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
3425 kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
3428 memset(ptr, 0, obj_size(cachep));
3458 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3460 return ____cache_alloc(cachep, flags);
3466 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3475 if (slab_should_failslab(cachep, flags))
3478 cache_alloc_debugcheck_before(cachep, flags);
3480 objp = __do_cache_alloc(cachep, flags);
3482 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3483 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
3488 kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
3491 memset(objp, 0, obj_size(cachep));
3499 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3510 l3 = cachep->nodelists[node];
3512 check_spinlock_acquired_node(cachep, node);
3513 check_slabp(cachep, slabp);
3514 slab_put_obj(cachep, slabp, objp, node);
3515 STATS_DEC_ACTIVE(cachep);
3517 check_slabp(cachep, slabp);
3522 l3->free_objects -= cachep->num;
3529 slab_destroy(cachep, slabp);
3543 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3554 l3 = cachep->nodelists[node];
3569 free_block(cachep, ac->entry, batchcount, node);
3586 STATS_SET_FREEABLE(cachep, i);
3598 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3600 struct array_cache *ac = cpu_cache_get(cachep);
3603 kmemleak_free_recursive(objp, cachep->flags);
3604 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3606 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3615 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3619 STATS_INC_FREEHIT(cachep);
3623 STATS_INC_FREEMISS(cachep);
3624 cache_flusharray(cachep, ac);
3631 * @cachep: The cache to allocate from.
3637 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3639 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3642 obj_size(cachep), cachep->buffer_size, flags);
3649 void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3651 return __cache_alloc(cachep, flags, __builtin_return_address(0));
3658 * @cachep: the cache we're checking against
3669 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3671 unsigned long size = cachep->buffer_size;
3679 if (unlikely(page_get_cache(page) != cachep))
3687 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3689 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3693 obj_size(cachep), cachep->buffer_size,
3701 void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3705 return __cache_alloc_node(cachep, flags, nodeid,
3714 struct kmem_cache *cachep;
3717 cachep = kmem_find_general_cachep(size, flags);
3718 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3719 return cachep;
3720 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
3723 size, cachep->buffer_size, flags, node);
3760 struct kmem_cache *cachep;
3768 cachep = __find_general_cachep(size, flags);
3769 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3770 return cachep;
3771 ret = __cache_alloc(cachep, flags, caller);
3774 size, cachep->buffer_size, flags);
3803 * @cachep: The cache the allocation was from.
3809 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3814 debug_check_no_locks_freed(objp, obj_size(cachep));
3815 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3816 debug_check_no_obj_freed(objp, obj_size(cachep));
3817 __cache_free(cachep, objp);
3852 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3854 return obj_size(cachep);
3858 const char *kmem_cache_name(struct kmem_cache *cachep)
3860 return cachep->name;
3867 static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3877 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3883 if (cachep->shared) {
3885 cachep->shared*cachep->batchcount,
3893 l3 = cachep->nodelists[node];
3900 free_block(cachep, shared->entry,
3909 cachep->batchcount + cachep->num;
3924 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3928 cachep->batchcount + cachep->num;
3929 cachep->nodelists[node] = l3;
3934 if (!cachep->next.next) {
3938 if (cachep->nodelists[node]) {
3939 l3 = cachep->nodelists[node];
3944 cachep->nodelists[node] = NULL;
3953 struct kmem_cache *cachep;
3963 old = cpu_cache_get(new->cachep);
3965 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3970 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3990 new->cachep = cachep;
3995 cachep->batchcount = batchcount;
3996 cachep->limit = limit;
3997 cachep->shared = shared;
4003 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4004 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4005 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4009 return alloc_kmemlist(cachep, gfp);
4013 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4027 if (cachep->buffer_size > 131072)
4029 else if (cachep->buffer_size > PAGE_SIZE)
4031 else if (cachep->buffer_size > 1024)
4033 else if (cachep->buffer_size > 256)
4048 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
4059 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
4062 cachep->name, -err);
4071 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4086 free_block(cachep, ac->entry, tofree, node);
4211 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4225 l3 = cachep->nodelists[node];
4233 if (slabp->inuse != cachep->num && !error)
4235 active_objs += cachep->num;
4239 if (slabp->inuse == cachep->num && !error)
4258 num_objs = num_slabs * cachep->num;
4262 name = cachep->name;
4267 name, active_objs, num_objs, cachep->buffer_size,
4268 cachep->num, (1 << cachep->gfporder));
4270 cachep->limit, cachep->batchcount, cachep->shared);
4275 unsigned long high = cachep->high_mark;
4276 unsigned long allocs = cachep->num_allocations;
4277 unsigned long grown = cachep->grown;
4278 unsigned long reaped = cachep->reaped;
4279 unsigned long errors = cachep->errors;
4280 unsigned long max_freeable = cachep->max_freeable;
4281 unsigned long node_allocs = cachep->node_allocs;
4282 unsigned long node_frees = cachep->node_frees;
4283 unsigned long overflows = cachep->node_overflow;
4293 unsigned long allochit = atomic_read(&cachep->allochit);
4294 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4295 unsigned long freehit = atomic_read(&cachep->freehit);
4296 unsigned long freemiss = atomic_read(&cachep->freemiss);
4340 struct kmem_cache *cachep;
4359 list_for_each_entry(cachep, &cache_chain, next) {
4360 if (!strcmp(cachep->name, kbuf)) {
4365 res = do_tune_cpucache(cachep, limit,
4461 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4469 if (!(cachep->flags & SLAB_STORE_USER))
4471 if (!(cachep->flags & SLAB_RED_ZONE))
4479 l3 = cachep->nodelists[node];
4487 handle_slab(n, cachep, slabp);
4489 handle_slab(n, cachep, slabp);
4492 name = cachep->name;