• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/mm/

Lines Matching refs:slab

2  * linux/mm/slab.c
25 * page long) and always contiguous), and each slab contains multiple
58 * Several members in struct kmem_cache and struct slab never change, they
61 * and local interrupts are disabled so slab code is preempt-safe.
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
78 * 15 March 2005. NUMA slab allocator.
84 * Modified the slab allocator to be node aware on NUMA systems.
86 * All object allocations for a node occur from node specific slab lists.
89 #include <linux/slab.h>
171 * Bufctl's are used for linking objs within a slab
175 * slab an object belongs to.
177 * the number of objects a slab (not a cache) can contain when off-slab
179 * that does not use off-slab slabs.
182 * to have too many per slab.
194 * struct slab
196 * Manages the objs in a slab. Placed either at the beginning of mem allocated
197 * for a slab, or allocated from an general cache.
200 struct slab {
204 unsigned int inuse; /* num of objs active in slab */
223 * We assume struct slab_rcu can overlay struct slab when destroying.
267 * The slab lists for all objects.
301 * it. Mostly the same as what is in linux/slab.h except it returns an index.
341 #define MAKE_LIST(cachep, listp, slab, nodeid) \
344 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
478 * Do not go above this order unless 0 objects fit into the slab.
485 * Functions for storing/retrieving the cachep and or slab from the page
486 * allocator. These are used to find the slab an obj belongs to. With kfree(),
501 static inline void page_set_slab(struct page *page, struct slab *slab)
503 page->lru.prev = (struct list_head *)slab;
506 static inline struct slab *page_get_slab(struct page *page)
509 return (struct slab *)page->lru.prev;
518 static inline struct slab *virt_to_slab(const void *obj)
524 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
527 return slab->s_mem + cache->buffer_size * idx;
537 const struct slab *slab, void *obj)
539 u32 offset = (obj - slab->s_mem);
596 * used by boot code to determine if it can use slab based allocator
606 * Slab sometimes uses the kmalloc slabs to store the slab headers
607 * for other slabs "off slab".
610 * locking we put on-slab caches into a separate lock-class.
713 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
728 * The slab management structure can be either off the slab or
730 * slab is used for:
732 * - The struct slab
737 * If the slab management structure is off the slab, then the
757 nr_objs = (slab_size - sizeof(struct slab)) /
782 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
1039 struct slab *slabp = virt_to_slab(objp);
1075 * Allocates and initializes nodelists for a node on each slab cache, used for
1195 * kmalloc_node allows us to add the slab to the right
1325 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1507 sizeof(struct slab), cache_line_size());
1627 /* Annotate slab for lockdep -- annotate the malloc caches */
1678 * Nommu uses slab's for process anonymous memory allocations, and thus
1899 struct slab *slabp = virt_to_slab(objp);
1922 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1951 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1957 * slab_destroy - destroy and release all objects in a slab
1959 * @slabp: slab pointer being destroyed
1961 * Destroy all the objs in a slab, and release the mem back to the system.
1962 * Before calling the slab must have been unlinked from the cache. The
1965 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2010 * @flags: slab allocation flags
2012 * Also calculates the number of objects per slab.
2035 * Max number of objs-per-slab for caches which
2036 * use off-slab slabs. Needed to avoid a possible
2039 offslab_limit = size - sizeof(struct slab);
2052 * A VFS-reclaimable slab tends to have most allocations
2148 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2171 printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2191 * destroy its slab cache and no-one else reuses the vmalloc
2237 * sure any on-slab bufctl's are also correctly aligned.
2334 * Determine if the slab management is 'on' or 'off' slab.
2336 * it too early on. Always use on-slab management when
2342 * Size is large, assume best to place the slab management obj
2343 * off-slab (should allow better packing of objs).
2359 + sizeof(struct slab), align);
2362 * If the slab has been placed off-slab, and we have enough space then
2363 * move it on-slab. This is at the expense of any extra colouring.
2371 /* really off slab. No need for manual alignment */
2373 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2402 * But since we go off slab only for object size greater than
2422 panic("kmem_cache_create(): failed to create slab `%s'\n",
2515 struct slab *slabp;
2527 slabp = list_entry(p, struct slab, list);
2533 * Safe to drop the lock. The slab is no longer linked
2592 * Remove a &struct kmem_cache object from the slab cache.
2633 * Get the memory for a slab management obj.
2634 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2635 * always come from malloc_sizes caches. The slab descriptor cannot
2643 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2647 struct slab *slabp;
2650 /* Slab management obj is off-slab. */
2654 * If the first object in the slab is leaked (it's allocated
2675 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2681 struct slab *slabp)
2737 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2754 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2760 /* Verify that the slab belongs to the intended node */
2764 printk(KERN_ERR "slab: double free detected in cache "
2775 * Map pages beginning at addr to the given cache and slab. This is required
2776 * for the slab allocator to be able to lookup the cache and slab of a
2777 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2779 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2793 page_set_slab(page, slab);
2805 struct slab *slabp;
2822 /* Get colour for the slab, and cal the next value. */
2851 /* Get slab management. */
2866 /* Make slab active. */
2923 struct slab *slabp;
2965 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2970 /* Check slab's freelist to see if this obj is there. */
2978 printk(KERN_ERR "slab: Internal list corruption detected in "
3031 struct slab *slabp;
3032 /* Get slab alloc is to come from. */
3041 slabp = list_entry(entry, struct slab, list);
3046 * The slab was either on partial or free list so
3136 struct slab *slabp;
3229 * slab into the proper nodelist and then allocate from it.
3290 * objects in the slab since we are
3305 * A interface to enable slab creation on nodeid
3311 struct slab *slabp;
3330 slabp = list_entry(entry, struct slab, list);
3507 struct slab *slabp;
3519 /* fixup slab chains */
3524 * lock here, even if we have a off-slab slab
3534 /* Unconditionally move a slab to the end of the
3578 struct slab *slabp;
3580 slabp = list_entry(p, struct slab, list);
3657 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3663 * part of the slab cache in question, but it at least
4022 * - reduce the number of linked list operations on the slab and
4212 struct slab *slabp;
4264 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4316 * num-pages-per-slab
4329 * slabinfo_write - Tuning for the slab allocator
4429 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4462 struct slab *slabp;