• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/mm/

Lines Matching defs:slab

2  * linux/mm/slab.c
25 * page long) and always contiguous), and each slab contains multiple
58 * Several members in struct kmem_cache and struct slab never change, they
61 * and local interrupts are disabled so slab code is preempt-safe.
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
78 * 15 March 2005. NUMA slab allocator.
84 * Modified the slab allocator to be node aware on NUMA systems.
86 * All object allocations for a node occur from node specific slab lists.
89 #include <linux/slab.h>
192 * Bufctl's are used for linking objs within a slab
196 * slab an object belongs to.
198 * the number of objects a slab (not a cache) can contain when off-slab
200 * that does not use off-slab slabs.
203 * to have too many per slab.
215 * struct slab
217 * Manages the objs in a slab. Placed either at the beginning of mem allocated
218 * for a slab, or allocated from an general cache.
221 struct slab {
225 unsigned int inuse; /* num of objs active in slab */
244 * We assume struct slab_rcu can overlay struct slab when destroying.
289 * The slab lists for all objects.
323 * it. Mostly the same as what is in linux/slab.h except it returns an index.
363 #define MAKE_LIST(cachep, listp, slab, nodeid) \
366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
395 unsigned int num; /* # of objs per slab */
398 /* order of pgs per slab (2^n) */
573 * Do not go above this order unless 0 objects fit into the slab.
580 * Functions for storing/retrieving the cachep and or slab from the page
581 * allocator. These are used to find the slab an obj belongs to. With kfree(),
596 static inline void page_set_slab(struct page *page, struct slab *slab)
598 page->lru.prev = (struct list_head *)slab;
601 static inline struct slab *page_get_slab(struct page *page)
604 return (struct slab *)page->lru.prev;
613 static inline struct slab *virt_to_slab(const void *obj)
619 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
622 return slab->s_mem + cache->buffer_size * idx;
632 const struct slab *slab, void *obj)
634 u32 offset = (obj - slab->s_mem);
681 * Slab sometimes uses the kmalloc slabs to store the slab headers
682 * for other slabs "off slab".
685 * locking we put on-slab caches into a separate lock-class.
745 * used by boot code to determine if it can use slab based allocator
793 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
808 * The slab management structure can be either off the slab or
810 * slab is used for:
812 * - The struct slab
817 * If the slab management structure is off the slab, then the
837 nr_objs = (slab_size - sizeof(struct slab)) /
862 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
1114 struct slab *slabp = virt_to_slab(objp);
1167 * kmalloc_node allows us to add the slab to the right
1461 sizeof(struct slab), cache_line_size());
1579 /* Annotate slab for lockdep -- annotate the malloc caches */
1626 * Nommu uses slab's for process anonymous memory allocations, and thus
1833 struct slab *slabp = virt_to_slab(objp);
1857 * slab_destroy_objs - destroy a slab and its objects
1859 * @slabp: slab pointer being destroyed
1861 * Call the registered destructor for each object in a slab that is being
1864 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1893 static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1899 * slab_destroy - destroy and release all objects in a slab
1901 * @slabp: slab pointer being destroyed
1903 * Destroy all the objs in a slab, and release the mem back to the system.
1904 * Before calling the slab must have been unlinked from the cache. The
1907 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1968 * @flags: slab allocation flags
1970 * Also calculates the number of objects per slab.
1993 * Max number of objs-per-slab for caches which
1994 * use off-slab slabs. Needed to avoid a possible
1997 offslab_limit = size - sizeof(struct slab);
2010 * A VFS-reclaimable slab tends to have most allocations
2106 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2130 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
2147 * destroy its slab cache and no-one else reuses the vmalloc
2193 * sure any on-slab bufctl's are also correctly aligned.
2285 * Determine if the slab management is 'on' or 'off' slab.
2291 * Size is large, assume best to place the slab management obj
2292 * off-slab (should allow better packing of objs).
2308 + sizeof(struct slab), align);
2311 * If the slab has been placed off-slab, and we have enough space then
2312 * move it on-slab. This is at the expense of any extra colouring.
2320 /* really off slab. No need for manual alignment */
2322 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2342 * But since we go off slab only for object size greater than
2362 panic("kmem_cache_create(): failed to create slab `%s'\n",
2452 struct slab *slabp;
2464 slabp = list_entry(p, struct slab, list);
2470 * Safe to drop the lock. The slab is no longer linked
2527 * Remove a &struct kmem_cache object from the slab cache.
2565 * Get the memory for a slab management obj.
2566 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2567 * always come from malloc_sizes caches. The slab descriptor cannot
2575 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2579 struct slab *slabp;
2582 /* Slab management obj is off-slab. */
2598 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2604 struct slab *slabp)
2662 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2679 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2685 /* Verify that the slab belongs to the intended node */
2689 printk(KERN_ERR "slab: double free detected in cache "
2700 * Map pages beginning at addr to the given cache and slab. This is required
2701 * for the slab allocator to be able to lookup the cache and slab of a
2702 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2704 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2718 page_set_slab(page, slab);
2730 struct slab *slabp;
2747 /* Get colour for the slab, and cal the next value. */
2776 /* Get slab management. */
2792 /* Make slab active. */
2849 struct slab *slabp;
2889 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2894 /* Check slab's freelist to see if this obj is there. */
2902 printk(KERN_ERR "slab: Internal list corruption detected in "
2954 struct slab *slabp;
2955 /* Get slab alloc is to come from. */
2964 slabp = list_entry(entry, struct slab, list);
2969 * The slab was either on partial or free list so
3059 struct slab *slabp;
3203 * slab into the proper nodelist and then allocate from it.
3259 * objects in the slab since we are
3273 * A interface to enable slab creation on nodeid
3279 struct slab *slabp;
3298 slabp = list_entry(entry, struct slab, list);
3450 struct slab *slabp;
3462 /* fixup slab chains */
3467 * lock here, even if we have a off-slab slab
3477 /* Unconditionally move a slab to the end of the
3521 struct slab *slabp;
3523 slabp = list_entry(p, struct slab, list);
3595 * be a slab entry.
3601 * part of the slab cache in question, but it at least
3983 * - reduce the number of linked list operations on the slab and
4183 struct slab *slabp;
4235 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4286 * num-pages-per-slab
4299 * slabinfo_write - Tuning for the slab allocator
4394 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4427 struct slab *slabp;