1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays 9 * (c) 2000 Manfred Spraul 10 * 11 * Cleanup, make the head arrays unconditional, preparation for NUMA 12 * (c) 2002 Manfred Spraul 13 * 14 * An implementation of the Slab Allocator as described in outline in; 15 * UNIX Internals: The New Frontiers by Uresh Vahalia 16 * Pub: Prentice Hall ISBN 0-13-101908-2 17 * or with a little more detail in; 18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator 19 * Jeff Bonwick (Sun Microsystems). 20 * Presented at: USENIX Summer 1994 Technical Conference 21 * 22 * The memory is organized in caches, one cache for each object type. 23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) 24 * Each cache consists out of many slabs (they are small (usually one 25 * page long) and always contiguous), and each slab contains multiple 26 * initialized objects. 27 * 28 * This means, that your constructor is used only for newly allocated 29 * slabs and you must pass objects with the same intializations to 30 * kmem_cache_free. 31 * 32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, 33 * normal). If you need a special memory type, then must create a new 34 * cache for that memory type. 35 * 36 * In order to reduce fragmentation, the slabs are sorted in 3 groups: 37 * full slabs with 0 free objects 38 * partial slabs 39 * empty slabs with no allocated objects 40 * 41 * If partial slabs exist, then new allocations come from these slabs, 42 * otherwise from empty slabs or new slabs are allocated. 43 * 44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache 45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs. 46 * 47 * Each cache has a short per-cpu head array, most allocs 48 * and frees go into that array, and if that array overflows, then 1/2 49 * of the entries in the array are given back into the global cache. 50 * The head array is strictly LIFO and should improve the cache hit rates. 51 * On SMP, it additionally reduces the spinlock operations. 52 * 53 * The c_cpuarray may not be read with enabled local interrupts - 54 * it's changed with a smp_call_function(). 55 * 56 * SMP synchronization: 57 * constructors and destructors are called without any locking. 58 * Several members in struct kmem_cache and struct slab never change, they 59 * are accessed without any locking. 60 * The per-cpu arrays are never accessed from the wrong cpu, no locking, 61 * and local interrupts are disabled so slab code is preempt-safe. 62 * The non-constant members are protected with a per-cache irq spinlock. 63 * 64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch 65 * in 2000 - many ideas in the current implementation are derived from 66 * his patch. 67 * 68 * Further notes from the original documentation: 69 * 70 * 11 April '97. Started multi-threading - markhe 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'. 72 * The sem is only needed when accessing/extending the cache-chain, which 73 * can never happen inside an interrupt (kmem_cache_create(), 74 * kmem_cache_shrink() and kmem_cache_reap()). 75 * 76 * At present, each engine can be growing a cache. This should be blocked. 77 * 78 * 15 March 2005. NUMA slab allocator. 79 * Shai Fultheim <shai@scalex86.org>. 80 * Shobhit Dayal <shobhit@calsoftinc.com> 81 * Alok N Kataria <alokk@calsoftinc.com> 82 * Christoph Lameter <christoph@lameter.com> 83 * 84 * Modified the slab allocator to be node aware on NUMA systems. 85 * Each node has its own list of partial, free and full slabs. 86 * All object allocations for a node occur from node specific slab lists. 87 */ 88 89#include <linux/slab.h> 90#include <linux/mm.h> 91#include <linux/poison.h> 92#include <linux/swap.h> 93#include <linux/cache.h> 94#include <linux/interrupt.h> 95#include <linux/init.h> 96#include <linux/compiler.h> 97#include <linux/cpuset.h> 98#include <linux/seq_file.h> 99#include <linux/notifier.h> 100#include <linux/kallsyms.h> 101#include <linux/cpu.h> 102#include <linux/sysctl.h> 103#include <linux/module.h> 104#include <linux/rcupdate.h> 105#include <linux/string.h> 106#include <linux/uaccess.h> 107#include <linux/nodemask.h> 108#include <linux/mempolicy.h> 109#include <linux/mutex.h> 110#include <linux/fault-inject.h> 111#include <linux/rtmutex.h> 112#include <linux/reciprocal_div.h> 113 114#include <asm/cacheflush.h> 115#include <asm/tlbflush.h> 116#include <asm/page.h> 117 118/* 119 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. 120 * 0 for faster, smaller code (especially in the critical paths). 121 * 122 * STATS - 1 to collect stats for /proc/slabinfo. 123 * 0 for faster, smaller code (especially in the critical paths). 124 * 125 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) 126 */ 127 128#ifdef CONFIG_DEBUG_SLAB 129#define DEBUG 1 130#define STATS 1 131#define FORCED_DEBUG 1 132#else 133#define DEBUG 0 134#define STATS 0 135#define FORCED_DEBUG 0 136#endif 137 138/* Shouldn't this be in a header file somewhere? */ 139#define BYTES_PER_WORD sizeof(void *) 140#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 141 142#ifndef cache_line_size 143#define cache_line_size() L1_CACHE_BYTES 144#endif 145 146#ifndef ARCH_KMALLOC_MINALIGN 147/* 148 * Enforce a minimum alignment for the kmalloc caches. 149 * Usually, the kmalloc caches are cache_line_size() aligned, except when 150 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. 151 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 152 * alignment larger than the alignment of a 64-bit integer. 153 * ARCH_KMALLOC_MINALIGN allows that. 154 * Note that increasing this value may disable some debug features. 155 */ 156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 157#endif 158 159#ifndef ARCH_SLAB_MINALIGN 160/* 161 * Enforce a minimum alignment for all caches. 162 * Intended for archs that get misalignment faults even for BYTES_PER_WORD 163 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. 164 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables 165 * some debug features. 166 */ 167#define ARCH_SLAB_MINALIGN 0 168#endif 169 170#ifndef ARCH_KMALLOC_FLAGS 171#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 172#endif 173 174/* Legal flag mask for kmem_cache_create(). */ 175#if DEBUG 176# define CREATE_MASK (SLAB_RED_ZONE | \ 177 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 178 SLAB_CACHE_DMA | \ 179 SLAB_STORE_USER | \ 180 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 181 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 182#else 183# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 184 SLAB_CACHE_DMA | \ 185 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 186 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 187#endif 188 189/* 190 * kmem_bufctl_t: 191 * 192 * Bufctl's are used for linking objs within a slab 193 * linked offsets. 194 * 195 * This implementation relies on "struct page" for locating the cache & 196 * slab an object belongs to. 197 * This allows the bufctl structure to be small (one int), but limits 198 * the number of objects a slab (not a cache) can contain when off-slab 199 * bufctls are used. The limit is the size of the largest general cache 200 * that does not use off-slab slabs. 201 * For 32bit archs with 4 kB pages, is this 56. 202 * This is not serious, as it is only for large objects, when it is unwise 203 * to have too many per slab. 204 * Note: This limit can be raised by introducing a general cache whose size 205 * is less than 512 (PAGE_SIZE<<3), but greater than 256. 206 */ 207 208typedef unsigned int kmem_bufctl_t; 209#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) 210#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) 211#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2) 212#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 213 214/* 215 * struct slab 216 * 217 * Manages the objs in a slab. Placed either at the beginning of mem allocated 218 * for a slab, or allocated from an general cache. 219 * Slabs are chained into three list: fully used, partial, fully free slabs. 220 */ 221struct slab { 222 struct list_head list; 223 unsigned long colouroff; 224 void *s_mem; /* including colour offset */ 225 unsigned int inuse; /* num of objs active in slab */ 226 kmem_bufctl_t free; 227 unsigned short nodeid; 228}; 229 230/* 231 * struct slab_rcu 232 * 233 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 234 * arrange for kmem_freepages to be called via RCU. This is useful if 235 * we need to approach a kernel structure obliquely, from its address 236 * obtained without the usual locking. We can lock the structure to 237 * stabilize it and check it's still at the given address, only if we 238 * can be sure that the memory has not been meanwhile reused for some 239 * other kind of object (which our subsystem's lock might corrupt). 240 * 241 * rcu_read_lock before reading the address, then rcu_read_unlock after 242 * taking the spinlock within the structure expected at that address. 243 * 244 * We assume struct slab_rcu can overlay struct slab when destroying. 245 */ 246struct slab_rcu { 247 struct rcu_head head; 248 struct kmem_cache *cachep; 249 void *addr; 250}; 251 252/* 253 * struct array_cache 254 * 255 * Purpose: 256 * - LIFO ordering, to hand out cache-warm objects from _alloc 257 * - reduce the number of linked list operations 258 * - reduce spinlock operations 259 * 260 * The limit is stored in the per-cpu structure to reduce the data cache 261 * footprint. 262 * 263 */ 264struct array_cache { 265 unsigned int avail; 266 unsigned int limit; 267 unsigned int batchcount; 268 unsigned int touched; 269 spinlock_t lock; 270 void *entry[0]; /* 271 * Must have this definition in here for the proper 272 * alignment of array_cache. Also simplifies accessing 273 * the entries. 274 * [0] is for gcc 2.95. It should really be []. 275 */ 276}; 277 278/* 279 * bootstrap: The caches do not work without cpuarrays anymore, but the 280 * cpuarrays are allocated from the generic caches... 281 */ 282#define BOOT_CPUCACHE_ENTRIES 1 283struct arraycache_init { 284 struct array_cache cache; 285 void *entries[BOOT_CPUCACHE_ENTRIES]; 286}; 287 288/* 289 * The slab lists for all objects. 290 */ 291struct kmem_list3 { 292 struct list_head slabs_partial; /* partial list first, better asm code */ 293 struct list_head slabs_full; 294 struct list_head slabs_free; 295 unsigned long free_objects; 296 unsigned int free_limit; 297 unsigned int colour_next; /* Per-node cache coloring */ 298 spinlock_t list_lock; 299 struct array_cache *shared; /* shared per node */ 300 struct array_cache **alien; /* on other nodes */ 301 unsigned long next_reap; /* updated without locking */ 302 int free_touched; /* updated without locking */ 303}; 304 305/* 306 * Need this for bootstrapping a per node allocator. 307 */ 308#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 309struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 310#define CACHE_CACHE 0 311#define SIZE_AC 1 312#define SIZE_L3 (1 + MAX_NUMNODES) 313 314static int drain_freelist(struct kmem_cache *cache, 315 struct kmem_list3 *l3, int tofree); 316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 317 int node); 318static int enable_cpucache(struct kmem_cache *cachep); 319static void cache_reap(struct work_struct *unused); 320 321/* 322 * This function must be completely optimized away if a constant is passed to 323 * it. Mostly the same as what is in linux/slab.h except it returns an index. 324 */ 325static __always_inline int index_of(const size_t size) 326{ 327 extern void __bad_size(void); 328 329 if (__builtin_constant_p(size)) { 330 int i = 0; 331 332#define CACHE(x) \ 333 if (size <=x) \ 334 return i; \ 335 else \ 336 i++; 337#include "linux/kmalloc_sizes.h" 338#undef CACHE 339 __bad_size(); 340 } else 341 __bad_size(); 342 return 0; 343} 344 345static int slab_early_init = 1; 346 347#define INDEX_AC index_of(sizeof(struct arraycache_init)) 348#define INDEX_L3 index_of(sizeof(struct kmem_list3)) 349 350static void kmem_list3_init(struct kmem_list3 *parent) 351{ 352 INIT_LIST_HEAD(&parent->slabs_full); 353 INIT_LIST_HEAD(&parent->slabs_partial); 354 INIT_LIST_HEAD(&parent->slabs_free); 355 parent->shared = NULL; 356 parent->alien = NULL; 357 parent->colour_next = 0; 358 spin_lock_init(&parent->list_lock); 359 parent->free_objects = 0; 360 parent->free_touched = 0; 361} 362 363#define MAKE_LIST(cachep, listp, slab, nodeid) \ 364 do { \ 365 INIT_LIST_HEAD(listp); \ 366 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ 367 } while (0) 368 369#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ 370 do { \ 371 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ 372 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ 373 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 374 } while (0) 375 376/* 377 * struct kmem_cache 378 * 379 * manages a cache. 380 */ 381 382struct kmem_cache { 383/* 1) per-cpu data, touched during every alloc/free */ 384 struct array_cache *array[NR_CPUS]; 385/* 2) Cache tunables. Protected by cache_chain_mutex */ 386 unsigned int batchcount; 387 unsigned int limit; 388 unsigned int shared; 389 390 unsigned int buffer_size; 391 u32 reciprocal_buffer_size; 392/* 3) touched by every alloc & free from the backend */ 393 394 unsigned int flags; /* constant flags */ 395 unsigned int num; /* # of objs per slab */ 396 397/* 4) cache_grow/shrink */ 398 /* order of pgs per slab (2^n) */ 399 unsigned int gfporder; 400 401 /* force GFP flags, e.g. GFP_DMA */ 402 gfp_t gfpflags; 403 404 size_t colour; /* cache colouring range */ 405 unsigned int colour_off; /* colour offset */ 406 struct kmem_cache *slabp_cache; 407 unsigned int slab_size; 408 unsigned int dflags; /* dynamic flags */ 409 410 /* constructor func */ 411 void (*ctor) (void *, struct kmem_cache *, unsigned long); 412 413/* 5) cache creation/removal */ 414 const char *name; 415 struct list_head next; 416 417/* 6) statistics */ 418#if STATS 419 unsigned long num_active; 420 unsigned long num_allocations; 421 unsigned long high_mark; 422 unsigned long grown; 423 unsigned long reaped; 424 unsigned long errors; 425 unsigned long max_freeable; 426 unsigned long node_allocs; 427 unsigned long node_frees; 428 unsigned long node_overflow; 429 atomic_t allochit; 430 atomic_t allocmiss; 431 atomic_t freehit; 432 atomic_t freemiss; 433#endif 434#if DEBUG 435 /* 436 * If debugging is enabled, then the allocator can add additional 437 * fields and/or padding to every object. buffer_size contains the total 438 * object size including these internal fields, the following two 439 * variables contain the offset to the user object and its size. 440 */ 441 int obj_offset; 442 int obj_size; 443#endif 444 /* 445 * We put nodelists[] at the end of kmem_cache, because we want to size 446 * this array to nr_node_ids slots instead of MAX_NUMNODES 447 * (see kmem_cache_init()) 448 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache 449 * is statically defined, so we reserve the max number of nodes. 450 */ 451 struct kmem_list3 *nodelists[MAX_NUMNODES]; 452 /* 453 * Do not add fields after nodelists[] 454 */ 455}; 456 457#define CFLGS_OFF_SLAB (0x80000000UL) 458#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 459 460#define BATCHREFILL_LIMIT 16 461/* 462 * Optimization question: fewer reaps means less probability for unnessary 463 * cpucache drain/refill cycles. 464 * 465 * OTOH the cpuarrays can contain lots of objects, 466 * which could lock up otherwise freeable slabs. 467 */ 468#define REAPTIMEOUT_CPUC (2*HZ) 469#define REAPTIMEOUT_LIST3 (4*HZ) 470 471#if STATS 472#define STATS_INC_ACTIVE(x) ((x)->num_active++) 473#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 474#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 475#define STATS_INC_GROWN(x) ((x)->grown++) 476#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) 477#define STATS_SET_HIGH(x) \ 478 do { \ 479 if ((x)->num_active > (x)->high_mark) \ 480 (x)->high_mark = (x)->num_active; \ 481 } while (0) 482#define STATS_INC_ERR(x) ((x)->errors++) 483#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) 484#define STATS_INC_NODEFREES(x) ((x)->node_frees++) 485#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) 486#define STATS_SET_FREEABLE(x, i) \ 487 do { \ 488 if ((x)->max_freeable < i) \ 489 (x)->max_freeable = i; \ 490 } while (0) 491#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) 492#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) 493#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) 494#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) 495#else 496#define STATS_INC_ACTIVE(x) do { } while (0) 497#define STATS_DEC_ACTIVE(x) do { } while (0) 498#define STATS_INC_ALLOCED(x) do { } while (0) 499#define STATS_INC_GROWN(x) do { } while (0) 500#define STATS_ADD_REAPED(x,y) do { } while (0) 501#define STATS_SET_HIGH(x) do { } while (0) 502#define STATS_INC_ERR(x) do { } while (0) 503#define STATS_INC_NODEALLOCS(x) do { } while (0) 504#define STATS_INC_NODEFREES(x) do { } while (0) 505#define STATS_INC_ACOVERFLOW(x) do { } while (0) 506#define STATS_SET_FREEABLE(x, i) do { } while (0) 507#define STATS_INC_ALLOCHIT(x) do { } while (0) 508#define STATS_INC_ALLOCMISS(x) do { } while (0) 509#define STATS_INC_FREEHIT(x) do { } while (0) 510#define STATS_INC_FREEMISS(x) do { } while (0) 511#endif 512 513#if DEBUG 514 515/* 516 * memory layout of objects: 517 * 0 : objp 518 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that 519 * the end of an object is aligned with the end of the real 520 * allocation. Catches writes behind the end of the allocation. 521 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: 522 * redzone word. 523 * cachep->obj_offset: The real object. 524 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] 525 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address 526 * [BYTES_PER_WORD long] 527 */ 528static int obj_offset(struct kmem_cache *cachep) 529{ 530 return cachep->obj_offset; 531} 532 533static int obj_size(struct kmem_cache *cachep) 534{ 535 return cachep->obj_size; 536} 537 538static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) 539{ 540 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 541 return (unsigned long long*) (objp + obj_offset(cachep) - 542 sizeof(unsigned long long)); 543} 544 545static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) 546{ 547 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); 548 if (cachep->flags & SLAB_STORE_USER) 549 return (unsigned long long *)(objp + cachep->buffer_size - 550 sizeof(unsigned long long) - 551 REDZONE_ALIGN); 552 return (unsigned long long *) (objp + cachep->buffer_size - 553 sizeof(unsigned long long)); 554} 555 556static void **dbg_userword(struct kmem_cache *cachep, void *objp) 557{ 558 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); 559 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD); 560} 561 562#else 563 564#define obj_offset(x) 0 565#define obj_size(cachep) (cachep->buffer_size) 566#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 567#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) 568#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) 569 570#endif 571 572/* 573 * Do not go above this order unless 0 objects fit into the slab. 574 */ 575#define BREAK_GFP_ORDER_HI 1 576#define BREAK_GFP_ORDER_LO 0 577static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; 578 579/* 580 * Functions for storing/retrieving the cachep and or slab from the page 581 * allocator. These are used to find the slab an obj belongs to. With kfree(), 582 * these are used to find the cache which an obj belongs to. 583 */ 584static inline void page_set_cache(struct page *page, struct kmem_cache *cache) 585{ 586 page->lru.next = (struct list_head *)cache; 587} 588 589static inline struct kmem_cache *page_get_cache(struct page *page) 590{ 591 page = compound_head(page); 592 BUG_ON(!PageSlab(page)); 593 return (struct kmem_cache *)page->lru.next; 594} 595 596static inline void page_set_slab(struct page *page, struct slab *slab) 597{ 598 page->lru.prev = (struct list_head *)slab; 599} 600 601static inline struct slab *page_get_slab(struct page *page) 602{ 603 BUG_ON(!PageSlab(page)); 604 return (struct slab *)page->lru.prev; 605} 606 607static inline struct kmem_cache *virt_to_cache(const void *obj) 608{ 609 struct page *page = virt_to_head_page(obj); 610 return page_get_cache(page); 611} 612 613static inline struct slab *virt_to_slab(const void *obj) 614{ 615 struct page *page = virt_to_head_page(obj); 616 return page_get_slab(page); 617} 618 619static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab, 620 unsigned int idx) 621{ 622 return slab->s_mem + cache->buffer_size * idx; 623} 624 625/* 626 * We want to avoid an expensive divide : (offset / cache->buffer_size) 627 * Using the fact that buffer_size is a constant for a particular cache, 628 * we can replace (offset / cache->buffer_size) by 629 * reciprocal_divide(offset, cache->reciprocal_buffer_size) 630 */ 631static inline unsigned int obj_to_index(const struct kmem_cache *cache, 632 const struct slab *slab, void *obj) 633{ 634 u32 offset = (obj - slab->s_mem); 635 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 636} 637 638/* 639 * These are the default caches for kmalloc. Custom caches can have other sizes. 640 */ 641struct cache_sizes malloc_sizes[] = { 642#define CACHE(x) { .cs_size = (x) }, 643#include <linux/kmalloc_sizes.h> 644 CACHE(ULONG_MAX) 645#undef CACHE 646}; 647EXPORT_SYMBOL(malloc_sizes); 648 649/* Must match cache_sizes above. Out of line to keep cache footprint low. */ 650struct cache_names { 651 char *name; 652 char *name_dma; 653}; 654 655static struct cache_names __initdata cache_names[] = { 656#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, 657#include <linux/kmalloc_sizes.h> 658 {NULL,} 659#undef CACHE 660}; 661 662static struct arraycache_init initarray_cache __initdata = 663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 664static struct arraycache_init initarray_generic = 665 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; 666 667/* internal cache of cache description objs */ 668static struct kmem_cache cache_cache = { 669 .batchcount = 1, 670 .limit = BOOT_CPUCACHE_ENTRIES, 671 .shared = 1, 672 .buffer_size = sizeof(struct kmem_cache), 673 .name = "kmem_cache", 674}; 675 676#define BAD_ALIEN_MAGIC 0x01020304ul 677 678#ifdef CONFIG_LOCKDEP 679 680/* 681 * Slab sometimes uses the kmalloc slabs to store the slab headers 682 * for other slabs "off slab". 683 * The locking for this is tricky in that it nests within the locks 684 * of all other slabs in a few places; to deal with this special 685 * locking we put on-slab caches into a separate lock-class. 686 * 687 * We set lock class for alien array caches which are up during init. 688 * The lock annotation will be lost if all cpus of a node goes down and 689 * then comes back up during hotplug 690 */ 691static struct lock_class_key on_slab_l3_key; 692static struct lock_class_key on_slab_alc_key; 693 694static inline void init_lock_keys(void) 695 696{ 697 int q; 698 struct cache_sizes *s = malloc_sizes; 699 700 while (s->cs_size != ULONG_MAX) { 701 for_each_node(q) { 702 struct array_cache **alc; 703 int r; 704 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 705 if (!l3 || OFF_SLAB(s->cs_cachep)) 706 continue; 707 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 708 alc = l3->alien; 709 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 710 continue; 711 for_each_node(r) { 712 if (alc[r]) 713 lockdep_set_class(&alc[r]->lock, 714 &on_slab_alc_key); 715 } 716 } 717 s++; 718 } 719} 720#else 721static inline void init_lock_keys(void) 722{ 723} 724#endif 725 726/* 727 * 1. Guard access to the cache-chain. 728 * 2. Protect sanity of cpu_online_map against cpu hotplug events 729 */ 730static DEFINE_MUTEX(cache_chain_mutex); 731static struct list_head cache_chain; 732 733/* 734 * chicken and egg problem: delay the per-cpu array allocation 735 * until the general caches are up. 736 */ 737static enum { 738 NONE, 739 PARTIAL_AC, 740 PARTIAL_L3, 741 FULL 742} g_cpucache_up; 743 744/* 745 * used by boot code to determine if it can use slab based allocator 746 */ 747int slab_is_available(void) 748{ 749 return g_cpucache_up == FULL; 750} 751 752static DEFINE_PER_CPU(struct delayed_work, reap_work); 753 754static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 755{ 756 return cachep->array[smp_processor_id()]; 757} 758 759static inline struct kmem_cache *__find_general_cachep(size_t size, 760 gfp_t gfpflags) 761{ 762 struct cache_sizes *csizep = malloc_sizes; 763 764#if DEBUG 765 /* This happens if someone tries to call 766 * kmem_cache_create(), or __kmalloc(), before 767 * the generic caches are initialized. 768 */ 769 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 770#endif 771 while (size > csizep->cs_size) 772 csizep++; 773 774 /* 775 * Really subtle: The last entry with cs->cs_size==ULONG_MAX 776 * has cs_{dma,}cachep==NULL. Thus no special case 777 * for large kmalloc calls required. 778 */ 779#ifdef CONFIG_ZONE_DMA 780 if (unlikely(gfpflags & GFP_DMA)) 781 return csizep->cs_dmacachep; 782#endif 783 return csizep->cs_cachep; 784} 785 786static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 787{ 788 return __find_general_cachep(size, gfpflags); 789} 790 791static size_t slab_mgmt_size(size_t nr_objs, size_t align) 792{ 793 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align); 794} 795 796/* 797 * Calculate the number of objects and left-over bytes for a given buffer size. 798 */ 799static void cache_estimate(unsigned long gfporder, size_t buffer_size, 800 size_t align, int flags, size_t *left_over, 801 unsigned int *num) 802{ 803 int nr_objs; 804 size_t mgmt_size; 805 size_t slab_size = PAGE_SIZE << gfporder; 806 807 /* 808 * The slab management structure can be either off the slab or 809 * on it. For the latter case, the memory allocated for a 810 * slab is used for: 811 * 812 * - The struct slab 813 * - One kmem_bufctl_t for each object 814 * - Padding to respect alignment of @align 815 * - @buffer_size bytes for each object 816 * 817 * If the slab management structure is off the slab, then the 818 * alignment will already be calculated into the size. Because 819 * the slabs are all pages aligned, the objects will be at the 820 * correct alignment when allocated. 821 */ 822 if (flags & CFLGS_OFF_SLAB) { 823 mgmt_size = 0; 824 nr_objs = slab_size / buffer_size; 825 826 if (nr_objs > SLAB_LIMIT) 827 nr_objs = SLAB_LIMIT; 828 } else { 829 /* 830 * Ignore padding for the initial guess. The padding 831 * is at most @align-1 bytes, and @buffer_size is at 832 * least @align. In the worst case, this result will 833 * be one greater than the number of objects that fit 834 * into the memory allocation when taking the padding 835 * into account. 836 */ 837 nr_objs = (slab_size - sizeof(struct slab)) / 838 (buffer_size + sizeof(kmem_bufctl_t)); 839 840 /* 841 * This calculated number will be either the right 842 * amount, or one greater than what we want. 843 */ 844 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size 845 > slab_size) 846 nr_objs--; 847 848 if (nr_objs > SLAB_LIMIT) 849 nr_objs = SLAB_LIMIT; 850 851 mgmt_size = slab_mgmt_size(nr_objs, align); 852 } 853 *num = nr_objs; 854 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 855} 856 857#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) 858 859static void __slab_error(const char *function, struct kmem_cache *cachep, 860 char *msg) 861{ 862 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", 863 function, cachep->name, msg); 864 dump_stack(); 865} 866 867/* 868 * By default on NUMA we use alien caches to stage the freeing of 869 * objects allocated from other nodes. This causes massive memory 870 * inefficiencies when using fake NUMA setup to split memory into a 871 * large number of small nodes, so it can be disabled on the command 872 * line 873 */ 874 875static int use_alien_caches __read_mostly = 1; 876static int __init noaliencache_setup(char *s) 877{ 878 use_alien_caches = 0; 879 return 1; 880} 881__setup("noaliencache", noaliencache_setup); 882 883#ifdef CONFIG_NUMA 884/* 885 * Special reaping functions for NUMA systems called from cache_reap(). 886 * These take care of doing round robin flushing of alien caches (containing 887 * objects freed on different nodes from which they were allocated) and the 888 * flushing of remote pcps by calling drain_node_pages. 889 */ 890static DEFINE_PER_CPU(unsigned long, reap_node); 891 892static void init_reap_node(int cpu) 893{ 894 int node; 895 896 node = next_node(cpu_to_node(cpu), node_online_map); 897 if (node == MAX_NUMNODES) 898 node = first_node(node_online_map); 899 900 per_cpu(reap_node, cpu) = node; 901} 902 903static void next_reap_node(void) 904{ 905 int node = __get_cpu_var(reap_node); 906 907 node = next_node(node, node_online_map); 908 if (unlikely(node >= MAX_NUMNODES)) 909 node = first_node(node_online_map); 910 __get_cpu_var(reap_node) = node; 911} 912 913#else 914#define init_reap_node(cpu) do { } while (0) 915#define next_reap_node(void) do { } while (0) 916#endif 917 918/* 919 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz 920 * via the workqueue/eventd. 921 * Add the CPU number into the expiration time to minimize the possibility of 922 * the CPUs getting into lockstep and contending for the global cache chain 923 * lock. 924 */ 925static void __devinit start_cpu_timer(int cpu) 926{ 927 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 928 929 /* 930 * When this gets called from do_initcalls via cpucache_init(), 931 * init_workqueues() has already run, so keventd will be setup 932 * at that time. 933 */ 934 if (keventd_up() && reap_work->work.func == NULL) { 935 init_reap_node(cpu); 936 INIT_DELAYED_WORK(reap_work, cache_reap); 937 schedule_delayed_work_on(cpu, reap_work, 938 __round_jiffies_relative(HZ, cpu)); 939 } 940} 941 942static struct array_cache *alloc_arraycache(int node, int entries, 943 int batchcount) 944{ 945 int memsize = sizeof(void *) * entries + sizeof(struct array_cache); 946 struct array_cache *nc = NULL; 947 948 nc = kmalloc_node(memsize, GFP_KERNEL, node); 949 if (nc) { 950 nc->avail = 0; 951 nc->limit = entries; 952 nc->batchcount = batchcount; 953 nc->touched = 0; 954 spin_lock_init(&nc->lock); 955 } 956 return nc; 957} 958 959/* 960 * Transfer objects in one arraycache to another. 961 * Locking must be handled by the caller. 962 * 963 * Return the number of entries transferred. 964 */ 965static int transfer_objects(struct array_cache *to, 966 struct array_cache *from, unsigned int max) 967{ 968 /* Figure out how many entries to transfer */ 969 int nr = min(min(from->avail, max), to->limit - to->avail); 970 971 if (!nr) 972 return 0; 973 974 memcpy(to->entry + to->avail, from->entry + from->avail -nr, 975 sizeof(void *) *nr); 976 977 from->avail -= nr; 978 to->avail += nr; 979 to->touched = 1; 980 return nr; 981} 982 983#ifndef CONFIG_NUMA 984 985#define drain_alien_cache(cachep, alien) do { } while (0) 986#define reap_alien(cachep, l3) do { } while (0) 987 988static inline struct array_cache **alloc_alien_cache(int node, int limit) 989{ 990 return (struct array_cache **)BAD_ALIEN_MAGIC; 991} 992 993static inline void free_alien_cache(struct array_cache **ac_ptr) 994{ 995} 996 997static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 998{ 999 return 0; 1000} 1001 1002static inline void *alternate_node_alloc(struct kmem_cache *cachep, 1003 gfp_t flags) 1004{ 1005 return NULL; 1006} 1007 1008static inline void *____cache_alloc_node(struct kmem_cache *cachep, 1009 gfp_t flags, int nodeid) 1010{ 1011 return NULL; 1012} 1013 1014#else /* CONFIG_NUMA */ 1015 1016static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); 1017static void *alternate_node_alloc(struct kmem_cache *, gfp_t); 1018 1019static struct array_cache **alloc_alien_cache(int node, int limit) 1020{ 1021 struct array_cache **ac_ptr; 1022 int memsize = sizeof(void *) * nr_node_ids; 1023 int i; 1024 1025 if (limit > 1) 1026 limit = 12; 1027 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); 1028 if (ac_ptr) { 1029 for_each_node(i) { 1030 if (i == node || !node_online(i)) { 1031 ac_ptr[i] = NULL; 1032 continue; 1033 } 1034 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); 1035 if (!ac_ptr[i]) { 1036 for (i--; i <= 0; i--) 1037 kfree(ac_ptr[i]); 1038 kfree(ac_ptr); 1039 return NULL; 1040 } 1041 } 1042 } 1043 return ac_ptr; 1044} 1045 1046static void free_alien_cache(struct array_cache **ac_ptr) 1047{ 1048 int i; 1049 1050 if (!ac_ptr) 1051 return; 1052 for_each_node(i) 1053 kfree(ac_ptr[i]); 1054 kfree(ac_ptr); 1055} 1056 1057static void __drain_alien_cache(struct kmem_cache *cachep, 1058 struct array_cache *ac, int node) 1059{ 1060 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1061 1062 if (ac->avail) { 1063 spin_lock(&rl3->list_lock); 1064 /* 1065 * Stuff objects into the remote nodes shared array first. 1066 * That way we could avoid the overhead of putting the objects 1067 * into the free lists and getting them back later. 1068 */ 1069 if (rl3->shared) 1070 transfer_objects(rl3->shared, ac, ac->limit); 1071 1072 free_block(cachep, ac->entry, ac->avail, node); 1073 ac->avail = 0; 1074 spin_unlock(&rl3->list_lock); 1075 } 1076} 1077 1078/* 1079 * Called from cache_reap() to regularly drain alien caches round robin. 1080 */ 1081static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1082{ 1083 int node = __get_cpu_var(reap_node); 1084 1085 if (l3->alien) { 1086 struct array_cache *ac = l3->alien[node]; 1087 1088 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { 1089 __drain_alien_cache(cachep, ac, node); 1090 spin_unlock_irq(&ac->lock); 1091 } 1092 } 1093} 1094 1095static void drain_alien_cache(struct kmem_cache *cachep, 1096 struct array_cache **alien) 1097{ 1098 int i = 0; 1099 struct array_cache *ac; 1100 unsigned long flags; 1101 1102 for_each_online_node(i) { 1103 ac = alien[i]; 1104 if (ac) { 1105 spin_lock_irqsave(&ac->lock, flags); 1106 __drain_alien_cache(cachep, ac, i); 1107 spin_unlock_irqrestore(&ac->lock, flags); 1108 } 1109 } 1110} 1111 1112static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 1113{ 1114 struct slab *slabp = virt_to_slab(objp); 1115 int nodeid = slabp->nodeid; 1116 struct kmem_list3 *l3; 1117 struct array_cache *alien = NULL; 1118 int node; 1119 1120 node = numa_node_id(); 1121 1122 /* 1123 * Make sure we are not freeing a object from another node to the array 1124 * cache on this cpu. 1125 */ 1126 if (likely(slabp->nodeid == node)) 1127 return 0; 1128 1129 l3 = cachep->nodelists[node]; 1130 STATS_INC_NODEFREES(cachep); 1131 if (l3->alien && l3->alien[nodeid]) { 1132 alien = l3->alien[nodeid]; 1133 spin_lock(&alien->lock); 1134 if (unlikely(alien->avail == alien->limit)) { 1135 STATS_INC_ACOVERFLOW(cachep); 1136 __drain_alien_cache(cachep, alien, nodeid); 1137 } 1138 alien->entry[alien->avail++] = objp; 1139 spin_unlock(&alien->lock); 1140 } else { 1141 spin_lock(&(cachep->nodelists[nodeid])->list_lock); 1142 free_block(cachep, &objp, 1, nodeid); 1143 spin_unlock(&(cachep->nodelists[nodeid])->list_lock); 1144 } 1145 return 1; 1146} 1147#endif 1148 1149static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1150 unsigned long action, void *hcpu) 1151{ 1152 long cpu = (long)hcpu; 1153 struct kmem_cache *cachep; 1154 struct kmem_list3 *l3 = NULL; 1155 int node = cpu_to_node(cpu); 1156 int memsize = sizeof(struct kmem_list3); 1157 1158 switch (action) { 1159 case CPU_LOCK_ACQUIRE: 1160 mutex_lock(&cache_chain_mutex); 1161 break; 1162 case CPU_UP_PREPARE: 1163 case CPU_UP_PREPARE_FROZEN: 1164 /* 1165 * We need to do this right in the beginning since 1166 * alloc_arraycache's are going to use this list. 1167 * kmalloc_node allows us to add the slab to the right 1168 * kmem_list3 and not this cpu's kmem_list3 1169 */ 1170 1171 list_for_each_entry(cachep, &cache_chain, next) { 1172 /* 1173 * Set up the size64 kmemlist for cpu before we can 1174 * begin anything. Make sure some other cpu on this 1175 * node has not already allocated this 1176 */ 1177 if (!cachep->nodelists[node]) { 1178 l3 = kmalloc_node(memsize, GFP_KERNEL, node); 1179 if (!l3) 1180 goto bad; 1181 kmem_list3_init(l3); 1182 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 1183 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1184 1185 /* 1186 * The l3s don't come and go as CPUs come and 1187 * go. cache_chain_mutex is sufficient 1188 * protection here. 1189 */ 1190 cachep->nodelists[node] = l3; 1191 } 1192 1193 spin_lock_irq(&cachep->nodelists[node]->list_lock); 1194 cachep->nodelists[node]->free_limit = 1195 (1 + nr_cpus_node(node)) * 1196 cachep->batchcount + cachep->num; 1197 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 1198 } 1199 1200 /* 1201 * Now we can go ahead with allocating the shared arrays and 1202 * array caches 1203 */ 1204 list_for_each_entry(cachep, &cache_chain, next) { 1205 struct array_cache *nc; 1206 struct array_cache *shared = NULL; 1207 struct array_cache **alien = NULL; 1208 1209 nc = alloc_arraycache(node, cachep->limit, 1210 cachep->batchcount); 1211 if (!nc) 1212 goto bad; 1213 if (cachep->shared) { 1214 shared = alloc_arraycache(node, 1215 cachep->shared * cachep->batchcount, 1216 0xbaadf00d); 1217 if (!shared) 1218 goto bad; 1219 } 1220 if (use_alien_caches) { 1221 alien = alloc_alien_cache(node, cachep->limit); 1222 if (!alien) 1223 goto bad; 1224 } 1225 cachep->array[cpu] = nc; 1226 l3 = cachep->nodelists[node]; 1227 BUG_ON(!l3); 1228 1229 spin_lock_irq(&l3->list_lock); 1230 if (!l3->shared) { 1231 /* 1232 * We are serialised from CPU_DEAD or 1233 * CPU_UP_CANCELLED by the cpucontrol lock 1234 */ 1235 l3->shared = shared; 1236 shared = NULL; 1237 } 1238#ifdef CONFIG_NUMA 1239 if (!l3->alien) { 1240 l3->alien = alien; 1241 alien = NULL; 1242 } 1243#endif 1244 spin_unlock_irq(&l3->list_lock); 1245 kfree(shared); 1246 free_alien_cache(alien); 1247 } 1248 break; 1249 case CPU_ONLINE: 1250 case CPU_ONLINE_FROZEN: 1251 start_cpu_timer(cpu); 1252 break; 1253#ifdef CONFIG_HOTPLUG_CPU 1254 case CPU_DOWN_PREPARE: 1255 case CPU_DOWN_PREPARE_FROZEN: 1256 /* 1257 * Shutdown cache reaper. Note that the cache_chain_mutex is 1258 * held so that if cache_reap() is invoked it cannot do 1259 * anything expensive but will only modify reap_work 1260 * and reschedule the timer. 1261 */ 1262 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1263 /* Now the cache_reaper is guaranteed to be not running. */ 1264 per_cpu(reap_work, cpu).work.func = NULL; 1265 break; 1266 case CPU_DOWN_FAILED: 1267 case CPU_DOWN_FAILED_FROZEN: 1268 start_cpu_timer(cpu); 1269 break; 1270 case CPU_DEAD: 1271 case CPU_DEAD_FROZEN: 1272 /* 1273 * Even if all the cpus of a node are down, we don't free the 1274 * kmem_list3 of any cache. This to avoid a race between 1275 * cpu_down, and a kmalloc allocation from another cpu for 1276 * memory from the node of the cpu going down. The list3 1277 * structure is usually allocated from kmem_cache_create() and 1278 * gets destroyed at kmem_cache_destroy(). 1279 */ 1280 /* fall thru */ 1281#endif 1282 case CPU_UP_CANCELED: 1283 case CPU_UP_CANCELED_FROZEN: 1284 list_for_each_entry(cachep, &cache_chain, next) { 1285 struct array_cache *nc; 1286 struct array_cache *shared; 1287 struct array_cache **alien; 1288 cpumask_t mask; 1289 1290 mask = node_to_cpumask(node); 1291 /* cpu is dead; no one can alloc from it. */ 1292 nc = cachep->array[cpu]; 1293 cachep->array[cpu] = NULL; 1294 l3 = cachep->nodelists[node]; 1295 1296 if (!l3) 1297 goto free_array_cache; 1298 1299 spin_lock_irq(&l3->list_lock); 1300 1301 /* Free limit for this kmem_list3 */ 1302 l3->free_limit -= cachep->batchcount; 1303 if (nc) 1304 free_block(cachep, nc->entry, nc->avail, node); 1305 1306 if (!cpus_empty(mask)) { 1307 spin_unlock_irq(&l3->list_lock); 1308 goto free_array_cache; 1309 } 1310 1311 shared = l3->shared; 1312 if (shared) { 1313 free_block(cachep, shared->entry, 1314 shared->avail, node); 1315 l3->shared = NULL; 1316 } 1317 1318 alien = l3->alien; 1319 l3->alien = NULL; 1320 1321 spin_unlock_irq(&l3->list_lock); 1322 1323 kfree(shared); 1324 if (alien) { 1325 drain_alien_cache(cachep, alien); 1326 free_alien_cache(alien); 1327 } 1328free_array_cache: 1329 kfree(nc); 1330 } 1331 /* 1332 * In the previous loop, all the objects were freed to 1333 * the respective cache's slabs, now we can go ahead and 1334 * shrink each nodelist to its limit. 1335 */ 1336 list_for_each_entry(cachep, &cache_chain, next) { 1337 l3 = cachep->nodelists[node]; 1338 if (!l3) 1339 continue; 1340 drain_freelist(cachep, l3, l3->free_objects); 1341 } 1342 break; 1343 case CPU_LOCK_RELEASE: 1344 mutex_unlock(&cache_chain_mutex); 1345 break; 1346 } 1347 return NOTIFY_OK; 1348bad: 1349 return NOTIFY_BAD; 1350} 1351 1352static struct notifier_block __cpuinitdata cpucache_notifier = { 1353 &cpuup_callback, NULL, 0 1354}; 1355 1356/* 1357 * swap the static kmem_list3 with kmalloced memory 1358 */ 1359static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1360 int nodeid) 1361{ 1362 struct kmem_list3 *ptr; 1363 1364 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); 1365 BUG_ON(!ptr); 1366 1367 local_irq_disable(); 1368 memcpy(ptr, list, sizeof(struct kmem_list3)); 1369 /* 1370 * Do not assume that spinlocks can be initialized via memcpy: 1371 */ 1372 spin_lock_init(&ptr->list_lock); 1373 1374 MAKE_ALL_LISTS(cachep, ptr, nodeid); 1375 cachep->nodelists[nodeid] = ptr; 1376 local_irq_enable(); 1377} 1378 1379/* 1380 * Initialisation. Called after the page allocator have been initialised and 1381 * before smp_init(). 1382 */ 1383void __init kmem_cache_init(void) 1384{ 1385 size_t left_over; 1386 struct cache_sizes *sizes; 1387 struct cache_names *names; 1388 int i; 1389 int order; 1390 int node; 1391 1392 if (num_possible_nodes() == 1) 1393 use_alien_caches = 0; 1394 1395 for (i = 0; i < NUM_INIT_LISTS; i++) { 1396 kmem_list3_init(&initkmem_list3[i]); 1397 if (i < MAX_NUMNODES) 1398 cache_cache.nodelists[i] = NULL; 1399 } 1400 1401 /* 1402 * Fragmentation resistance on low memory - only use bigger 1403 * page orders on machines with more than 32MB of memory. 1404 */ 1405 if (num_physpages > (32 << 20) >> PAGE_SHIFT) 1406 slab_break_gfp_order = BREAK_GFP_ORDER_HI; 1407 1408 /* Bootstrap is tricky, because several objects are allocated 1409 * from caches that do not exist yet: 1410 * 1) initialize the cache_cache cache: it contains the struct 1411 * kmem_cache structures of all caches, except cache_cache itself: 1412 * cache_cache is statically allocated. 1413 * Initially an __init data area is used for the head array and the 1414 * kmem_list3 structures, it's replaced with a kmalloc allocated 1415 * array at the end of the bootstrap. 1416 * 2) Create the first kmalloc cache. 1417 * The struct kmem_cache for the new cache is allocated normally. 1418 * An __init data area is used for the head array. 1419 * 3) Create the remaining kmalloc caches, with minimally sized 1420 * head arrays. 1421 * 4) Replace the __init data head arrays for cache_cache and the first 1422 * kmalloc cache with kmalloc allocated arrays. 1423 * 5) Replace the __init data for kmem_list3 for cache_cache and 1424 * the other cache's with kmalloc allocated memory. 1425 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1426 */ 1427 1428 node = numa_node_id(); 1429 1430 /* 1) create the cache_cache */ 1431 INIT_LIST_HEAD(&cache_chain); 1432 list_add(&cache_cache.next, &cache_chain); 1433 cache_cache.colour_off = cache_line_size(); 1434 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1435 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE]; 1436 1437 /* 1438 * struct kmem_cache size depends on nr_node_ids, which 1439 * can be less than MAX_NUMNODES. 1440 */ 1441 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) + 1442 nr_node_ids * sizeof(struct kmem_list3 *); 1443#if DEBUG 1444 cache_cache.obj_size = cache_cache.buffer_size; 1445#endif 1446 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1447 cache_line_size()); 1448 cache_cache.reciprocal_buffer_size = 1449 reciprocal_value(cache_cache.buffer_size); 1450 1451 for (order = 0; order < MAX_ORDER; order++) { 1452 cache_estimate(order, cache_cache.buffer_size, 1453 cache_line_size(), 0, &left_over, &cache_cache.num); 1454 if (cache_cache.num) 1455 break; 1456 } 1457 BUG_ON(!cache_cache.num); 1458 cache_cache.gfporder = order; 1459 cache_cache.colour = left_over / cache_cache.colour_off; 1460 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + 1461 sizeof(struct slab), cache_line_size()); 1462 1463 /* 2+3) create the kmalloc caches */ 1464 sizes = malloc_sizes; 1465 names = cache_names; 1466 1467 /* 1468 * Initialize the caches that provide memory for the array cache and the 1469 * kmem_list3 structures first. Without this, further allocations will 1470 * bug. 1471 */ 1472 1473 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, 1474 sizes[INDEX_AC].cs_size, 1475 ARCH_KMALLOC_MINALIGN, 1476 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1477 NULL, NULL); 1478 1479 if (INDEX_AC != INDEX_L3) { 1480 sizes[INDEX_L3].cs_cachep = 1481 kmem_cache_create(names[INDEX_L3].name, 1482 sizes[INDEX_L3].cs_size, 1483 ARCH_KMALLOC_MINALIGN, 1484 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1485 NULL, NULL); 1486 } 1487 1488 slab_early_init = 0; 1489 1490 while (sizes->cs_size != ULONG_MAX) { 1491 /* 1492 * For performance, all the general caches are L1 aligned. 1493 * This should be particularly beneficial on SMP boxes, as it 1494 * eliminates "false sharing". 1495 * Note for systems short on memory removing the alignment will 1496 * allow tighter packing of the smaller caches. 1497 */ 1498 if (!sizes->cs_cachep) { 1499 sizes->cs_cachep = kmem_cache_create(names->name, 1500 sizes->cs_size, 1501 ARCH_KMALLOC_MINALIGN, 1502 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1503 NULL, NULL); 1504 } 1505#ifdef CONFIG_ZONE_DMA 1506 sizes->cs_dmacachep = kmem_cache_create( 1507 names->name_dma, 1508 sizes->cs_size, 1509 ARCH_KMALLOC_MINALIGN, 1510 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1511 SLAB_PANIC, 1512 NULL, NULL); 1513#endif 1514 sizes++; 1515 names++; 1516 } 1517 /* 4) Replace the bootstrap head arrays */ 1518 { 1519 struct array_cache *ptr; 1520 1521 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1522 1523 local_irq_disable(); 1524 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); 1525 memcpy(ptr, cpu_cache_get(&cache_cache), 1526 sizeof(struct arraycache_init)); 1527 /* 1528 * Do not assume that spinlocks can be initialized via memcpy: 1529 */ 1530 spin_lock_init(&ptr->lock); 1531 1532 cache_cache.array[smp_processor_id()] = ptr; 1533 local_irq_enable(); 1534 1535 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 1536 1537 local_irq_disable(); 1538 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) 1539 != &initarray_generic.cache); 1540 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), 1541 sizeof(struct arraycache_init)); 1542 /* 1543 * Do not assume that spinlocks can be initialized via memcpy: 1544 */ 1545 spin_lock_init(&ptr->lock); 1546 1547 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = 1548 ptr; 1549 local_irq_enable(); 1550 } 1551 /* 5) Replace the bootstrap kmem_list3's */ 1552 { 1553 int nid; 1554 1555 /* Replace the static kmem_list3 structures for the boot cpu */ 1556 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node); 1557 1558 for_each_online_node(nid) { 1559 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1560 &initkmem_list3[SIZE_AC + nid], nid); 1561 1562 if (INDEX_AC != INDEX_L3) { 1563 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1564 &initkmem_list3[SIZE_L3 + nid], nid); 1565 } 1566 } 1567 } 1568 1569 /* 6) resize the head arrays to their final sizes */ 1570 { 1571 struct kmem_cache *cachep; 1572 mutex_lock(&cache_chain_mutex); 1573 list_for_each_entry(cachep, &cache_chain, next) 1574 if (enable_cpucache(cachep)) 1575 BUG(); 1576 mutex_unlock(&cache_chain_mutex); 1577 } 1578 1579 /* Annotate slab for lockdep -- annotate the malloc caches */ 1580 init_lock_keys(); 1581 1582 1583 /* Done! */ 1584 g_cpucache_up = FULL; 1585 1586 /* 1587 * Register a cpu startup notifier callback that initializes 1588 * cpu_cache_get for all new cpus 1589 */ 1590 register_cpu_notifier(&cpucache_notifier); 1591 1592 /* 1593 * The reap timers are started later, with a module init call: That part 1594 * of the kernel is not yet operational. 1595 */ 1596} 1597 1598static int __init cpucache_init(void) 1599{ 1600 int cpu; 1601 1602 /* 1603 * Register the timers that return unneeded pages to the page allocator 1604 */ 1605 for_each_online_cpu(cpu) 1606 start_cpu_timer(cpu); 1607 return 0; 1608} 1609__initcall(cpucache_init); 1610 1611/* 1612 * Interface to system's page allocator. No need to hold the cache-lock. 1613 * 1614 * If we requested dmaable memory, we will get it. Even if we 1615 * did not request dmaable memory, we might get it, but that 1616 * would be relatively rare and ignorable. 1617 */ 1618static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) 1619{ 1620 struct page *page; 1621 int nr_pages; 1622 int i; 1623 1624#ifndef CONFIG_MMU 1625 /* 1626 * Nommu uses slab's for process anonymous memory allocations, and thus 1627 * requires __GFP_COMP to properly refcount higher order allocations 1628 */ 1629 flags |= __GFP_COMP; 1630#endif 1631 1632 flags |= cachep->gfpflags; 1633 1634 page = alloc_pages_node(nodeid, flags, cachep->gfporder); 1635 if (!page) 1636 return NULL; 1637 1638 nr_pages = (1 << cachep->gfporder); 1639 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1640 add_zone_page_state(page_zone(page), 1641 NR_SLAB_RECLAIMABLE, nr_pages); 1642 else 1643 add_zone_page_state(page_zone(page), 1644 NR_SLAB_UNRECLAIMABLE, nr_pages); 1645 for (i = 0; i < nr_pages; i++) 1646 __SetPageSlab(page + i); 1647 return page_address(page); 1648} 1649 1650/* 1651 * Interface to system's page release. 1652 */ 1653static void kmem_freepages(struct kmem_cache *cachep, void *addr) 1654{ 1655 unsigned long i = (1 << cachep->gfporder); 1656 struct page *page = virt_to_page(addr); 1657 const unsigned long nr_freed = i; 1658 1659 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1660 sub_zone_page_state(page_zone(page), 1661 NR_SLAB_RECLAIMABLE, nr_freed); 1662 else 1663 sub_zone_page_state(page_zone(page), 1664 NR_SLAB_UNRECLAIMABLE, nr_freed); 1665 while (i--) { 1666 BUG_ON(!PageSlab(page)); 1667 __ClearPageSlab(page); 1668 page++; 1669 } 1670 if (current->reclaim_state) 1671 current->reclaim_state->reclaimed_slab += nr_freed; 1672 free_pages((unsigned long)addr, cachep->gfporder); 1673} 1674 1675static void kmem_rcu_free(struct rcu_head *head) 1676{ 1677 struct slab_rcu *slab_rcu = (struct slab_rcu *)head; 1678 struct kmem_cache *cachep = slab_rcu->cachep; 1679 1680 kmem_freepages(cachep, slab_rcu->addr); 1681 if (OFF_SLAB(cachep)) 1682 kmem_cache_free(cachep->slabp_cache, slab_rcu); 1683} 1684 1685#if DEBUG 1686 1687#ifdef CONFIG_DEBUG_PAGEALLOC 1688static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, 1689 unsigned long caller) 1690{ 1691 int size = obj_size(cachep); 1692 1693 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; 1694 1695 if (size < 5 * sizeof(unsigned long)) 1696 return; 1697 1698 *addr++ = 0x12345678; 1699 *addr++ = caller; 1700 *addr++ = smp_processor_id(); 1701 size -= 3 * sizeof(unsigned long); 1702 { 1703 unsigned long *sptr = &caller; 1704 unsigned long svalue; 1705 1706 while (!kstack_end(sptr)) { 1707 svalue = *sptr++; 1708 if (kernel_text_address(svalue)) { 1709 *addr++ = svalue; 1710 size -= sizeof(unsigned long); 1711 if (size <= sizeof(unsigned long)) 1712 break; 1713 } 1714 } 1715 1716 } 1717 *addr++ = 0x87654321; 1718} 1719#endif 1720 1721static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) 1722{ 1723 int size = obj_size(cachep); 1724 addr = &((char *)addr)[obj_offset(cachep)]; 1725 1726 memset(addr, val, size); 1727 *(unsigned char *)(addr + size - 1) = POISON_END; 1728} 1729 1730static void dump_line(char *data, int offset, int limit) 1731{ 1732 int i; 1733 unsigned char error = 0; 1734 int bad_count = 0; 1735 1736 printk(KERN_ERR "%03x:", offset); 1737 for (i = 0; i < limit; i++) { 1738 if (data[offset + i] != POISON_FREE) { 1739 error = data[offset + i]; 1740 bad_count++; 1741 } 1742 printk(" %02x", (unsigned char)data[offset + i]); 1743 } 1744 printk("\n"); 1745 1746 if (bad_count == 1) { 1747 error ^= POISON_FREE; 1748 if (!(error & (error - 1))) { 1749 printk(KERN_ERR "Single bit error detected. Probably " 1750 "bad RAM.\n"); 1751#ifdef CONFIG_X86 1752 printk(KERN_ERR "Run memtest86+ or a similar memory " 1753 "test tool.\n"); 1754#else 1755 printk(KERN_ERR "Run a memory test tool.\n"); 1756#endif 1757 } 1758 } 1759} 1760#endif 1761 1762#if DEBUG 1763 1764static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) 1765{ 1766 int i, size; 1767 char *realobj; 1768 1769 if (cachep->flags & SLAB_RED_ZONE) { 1770 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", 1771 *dbg_redzone1(cachep, objp), 1772 *dbg_redzone2(cachep, objp)); 1773 } 1774 1775 if (cachep->flags & SLAB_STORE_USER) { 1776 printk(KERN_ERR "Last user: [<%p>]", 1777 *dbg_userword(cachep, objp)); 1778 print_symbol("(%s)", 1779 (unsigned long)*dbg_userword(cachep, objp)); 1780 printk("\n"); 1781 } 1782 realobj = (char *)objp + obj_offset(cachep); 1783 size = obj_size(cachep); 1784 for (i = 0; i < size && lines; i += 16, lines--) { 1785 int limit; 1786 limit = 16; 1787 if (i + limit > size) 1788 limit = size - i; 1789 dump_line(realobj, i, limit); 1790 } 1791} 1792 1793static void check_poison_obj(struct kmem_cache *cachep, void *objp) 1794{ 1795 char *realobj; 1796 int size, i; 1797 int lines = 0; 1798 1799 realobj = (char *)objp + obj_offset(cachep); 1800 size = obj_size(cachep); 1801 1802 for (i = 0; i < size; i++) { 1803 char exp = POISON_FREE; 1804 if (i == size - 1) 1805 exp = POISON_END; 1806 if (realobj[i] != exp) { 1807 int limit; 1808 /* Mismatch ! */ 1809 /* Print header */ 1810 if (lines == 0) { 1811 printk(KERN_ERR 1812 "Slab corruption: %s start=%p, len=%d\n", 1813 cachep->name, realobj, size); 1814 print_objinfo(cachep, objp, 0); 1815 } 1816 /* Hexdump the affected line */ 1817 i = (i / 16) * 16; 1818 limit = 16; 1819 if (i + limit > size) 1820 limit = size - i; 1821 dump_line(realobj, i, limit); 1822 i += 16; 1823 lines++; 1824 /* Limit to 5 lines */ 1825 if (lines > 5) 1826 break; 1827 } 1828 } 1829 if (lines != 0) { 1830 /* Print some data about the neighboring objects, if they 1831 * exist: 1832 */ 1833 struct slab *slabp = virt_to_slab(objp); 1834 unsigned int objnr; 1835 1836 objnr = obj_to_index(cachep, slabp, objp); 1837 if (objnr) { 1838 objp = index_to_obj(cachep, slabp, objnr - 1); 1839 realobj = (char *)objp + obj_offset(cachep); 1840 printk(KERN_ERR "Prev obj: start=%p, len=%d\n", 1841 realobj, size); 1842 print_objinfo(cachep, objp, 2); 1843 } 1844 if (objnr + 1 < cachep->num) { 1845 objp = index_to_obj(cachep, slabp, objnr + 1); 1846 realobj = (char *)objp + obj_offset(cachep); 1847 printk(KERN_ERR "Next obj: start=%p, len=%d\n", 1848 realobj, size); 1849 print_objinfo(cachep, objp, 2); 1850 } 1851 } 1852} 1853#endif 1854 1855#if DEBUG 1856/** 1857 * slab_destroy_objs - destroy a slab and its objects 1858 * @cachep: cache pointer being destroyed 1859 * @slabp: slab pointer being destroyed 1860 * 1861 * Call the registered destructor for each object in a slab that is being 1862 * destroyed. 1863 */ 1864static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1865{ 1866 int i; 1867 for (i = 0; i < cachep->num; i++) { 1868 void *objp = index_to_obj(cachep, slabp, i); 1869 1870 if (cachep->flags & SLAB_POISON) { 1871#ifdef CONFIG_DEBUG_PAGEALLOC 1872 if (cachep->buffer_size % PAGE_SIZE == 0 && 1873 OFF_SLAB(cachep)) 1874 kernel_map_pages(virt_to_page(objp), 1875 cachep->buffer_size / PAGE_SIZE, 1); 1876 else 1877 check_poison_obj(cachep, objp); 1878#else 1879 check_poison_obj(cachep, objp); 1880#endif 1881 } 1882 if (cachep->flags & SLAB_RED_ZONE) { 1883 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 1884 slab_error(cachep, "start of a freed object " 1885 "was overwritten"); 1886 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 1887 slab_error(cachep, "end of a freed object " 1888 "was overwritten"); 1889 } 1890 } 1891} 1892#else 1893static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) 1894{ 1895} 1896#endif 1897 1898/** 1899 * slab_destroy - destroy and release all objects in a slab 1900 * @cachep: cache pointer being destroyed 1901 * @slabp: slab pointer being destroyed 1902 * 1903 * Destroy all the objs in a slab, and release the mem back to the system. 1904 * Before calling the slab must have been unlinked from the cache. The 1905 * cache-lock is not held/needed. 1906 */ 1907static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) 1908{ 1909 void *addr = slabp->s_mem - slabp->colouroff; 1910 1911 slab_destroy_objs(cachep, slabp); 1912 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { 1913 struct slab_rcu *slab_rcu; 1914 1915 slab_rcu = (struct slab_rcu *)slabp; 1916 slab_rcu->cachep = cachep; 1917 slab_rcu->addr = addr; 1918 call_rcu(&slab_rcu->head, kmem_rcu_free); 1919 } else { 1920 kmem_freepages(cachep, addr); 1921 if (OFF_SLAB(cachep)) 1922 kmem_cache_free(cachep->slabp_cache, slabp); 1923 } 1924} 1925 1926/* 1927 * For setting up all the kmem_list3s for cache whose buffer_size is same as 1928 * size of kmem_list3. 1929 */ 1930static void __init set_up_list3s(struct kmem_cache *cachep, int index) 1931{ 1932 int node; 1933 1934 for_each_online_node(node) { 1935 cachep->nodelists[node] = &initkmem_list3[index + node]; 1936 cachep->nodelists[node]->next_reap = jiffies + 1937 REAPTIMEOUT_LIST3 + 1938 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 1939 } 1940} 1941 1942static void __kmem_cache_destroy(struct kmem_cache *cachep) 1943{ 1944 int i; 1945 struct kmem_list3 *l3; 1946 1947 for_each_online_cpu(i) 1948 kfree(cachep->array[i]); 1949 1950 /* NUMA: free the list3 structures */ 1951 for_each_online_node(i) { 1952 l3 = cachep->nodelists[i]; 1953 if (l3) { 1954 kfree(l3->shared); 1955 free_alien_cache(l3->alien); 1956 kfree(l3); 1957 } 1958 } 1959 kmem_cache_free(&cache_cache, cachep); 1960} 1961 1962 1963/** 1964 * calculate_slab_order - calculate size (page order) of slabs 1965 * @cachep: pointer to the cache that is being created 1966 * @size: size of objects to be created in this cache. 1967 * @align: required alignment for the objects. 1968 * @flags: slab allocation flags 1969 * 1970 * Also calculates the number of objects per slab. 1971 * 1972 * This could be made much more intelligent. For now, try to avoid using 1973 * high order pages for slabs. When the gfp() functions are more friendly 1974 * towards high-order requests, this should be changed. 1975 */ 1976static size_t calculate_slab_order(struct kmem_cache *cachep, 1977 size_t size, size_t align, unsigned long flags) 1978{ 1979 unsigned long offslab_limit; 1980 size_t left_over = 0; 1981 int gfporder; 1982 1983 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { 1984 unsigned int num; 1985 size_t remainder; 1986 1987 cache_estimate(gfporder, size, align, flags, &remainder, &num); 1988 if (!num) 1989 continue; 1990 1991 if (flags & CFLGS_OFF_SLAB) { 1992 /* 1993 * Max number of objs-per-slab for caches which 1994 * use off-slab slabs. Needed to avoid a possible 1995 * looping condition in cache_grow(). 1996 */ 1997 offslab_limit = size - sizeof(struct slab); 1998 offslab_limit /= sizeof(kmem_bufctl_t); 1999 2000 if (num > offslab_limit) 2001 break; 2002 } 2003 2004 /* Found something acceptable - save it away */ 2005 cachep->num = num; 2006 cachep->gfporder = gfporder; 2007 left_over = remainder; 2008 2009 /* 2010 * A VFS-reclaimable slab tends to have most allocations 2011 * as GFP_NOFS and we really don't want to have to be allocating 2012 * higher-order pages when we are unable to shrink dcache. 2013 */ 2014 if (flags & SLAB_RECLAIM_ACCOUNT) 2015 break; 2016 2017 /* 2018 * Large number of objects is good, but very large slabs are 2019 * currently bad for the gfp()s. 2020 */ 2021 if (gfporder >= slab_break_gfp_order) 2022 break; 2023 2024 /* 2025 * Acceptable internal fragmentation? 2026 */ 2027 if (left_over * 8 <= (PAGE_SIZE << gfporder)) 2028 break; 2029 } 2030 return left_over; 2031} 2032 2033static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) 2034{ 2035 if (g_cpucache_up == FULL) 2036 return enable_cpucache(cachep); 2037 2038 if (g_cpucache_up == NONE) { 2039 /* 2040 * Note: the first kmem_cache_create must create the cache 2041 * that's used by kmalloc(24), otherwise the creation of 2042 * further caches will BUG(). 2043 */ 2044 cachep->array[smp_processor_id()] = &initarray_generic.cache; 2045 2046 /* 2047 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is 2048 * the first cache, then we need to set up all its list3s, 2049 * otherwise the creation of further caches will BUG(). 2050 */ 2051 set_up_list3s(cachep, SIZE_AC); 2052 if (INDEX_AC == INDEX_L3) 2053 g_cpucache_up = PARTIAL_L3; 2054 else 2055 g_cpucache_up = PARTIAL_AC; 2056 } else { 2057 cachep->array[smp_processor_id()] = 2058 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); 2059 2060 if (g_cpucache_up == PARTIAL_AC) { 2061 set_up_list3s(cachep, SIZE_L3); 2062 g_cpucache_up = PARTIAL_L3; 2063 } else { 2064 int node; 2065 for_each_online_node(node) { 2066 cachep->nodelists[node] = 2067 kmalloc_node(sizeof(struct kmem_list3), 2068 GFP_KERNEL, node); 2069 BUG_ON(!cachep->nodelists[node]); 2070 kmem_list3_init(cachep->nodelists[node]); 2071 } 2072 } 2073 } 2074 cachep->nodelists[numa_node_id()]->next_reap = 2075 jiffies + REAPTIMEOUT_LIST3 + 2076 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2077 2078 cpu_cache_get(cachep)->avail = 0; 2079 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; 2080 cpu_cache_get(cachep)->batchcount = 1; 2081 cpu_cache_get(cachep)->touched = 0; 2082 cachep->batchcount = 1; 2083 cachep->limit = BOOT_CPUCACHE_ENTRIES; 2084 return 0; 2085} 2086 2087/** 2088 * kmem_cache_create - Create a cache. 2089 * @name: A string which is used in /proc/slabinfo to identify this cache. 2090 * @size: The size of objects to be created in this cache. 2091 * @align: The required alignment for the objects. 2092 * @flags: SLAB flags 2093 * @ctor: A constructor for the objects. 2094 * @dtor: A destructor for the objects (not implemented anymore). 2095 * 2096 * Returns a ptr to the cache on success, NULL on failure. 2097 * Cannot be called within a int, but can be interrupted. 2098 * The @ctor is run when new pages are allocated by the cache 2099 * and the @dtor is run before the pages are handed back. 2100 * 2101 * @name must be valid until the cache is destroyed. This implies that 2102 * the module calling this has to destroy the cache before getting unloaded. 2103 * 2104 * The flags are 2105 * 2106 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2107 * to catch references to uninitialised memory. 2108 * 2109 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check 2110 * for buffer overruns. 2111 * 2112 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware 2113 * cacheline. This can be beneficial if you're counting cycles as closely 2114 * as davem. 2115 */ 2116struct kmem_cache * 2117kmem_cache_create (const char *name, size_t size, size_t align, 2118 unsigned long flags, 2119 void (*ctor)(void*, struct kmem_cache *, unsigned long), 2120 void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2121{ 2122 size_t left_over, slab_size, ralign; 2123 struct kmem_cache *cachep = NULL, *pc; 2124 2125 /* 2126 * Sanity checks... these are all serious usage bugs. 2127 */ 2128 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2129 size > KMALLOC_MAX_SIZE || dtor) { 2130 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2131 name); 2132 BUG(); 2133 } 2134 2135 /* 2136 * We use cache_chain_mutex to ensure a consistent view of 2137 * cpu_online_map as well. Please see cpuup_callback 2138 */ 2139 mutex_lock(&cache_chain_mutex); 2140 2141 list_for_each_entry(pc, &cache_chain, next) { 2142 char tmp; 2143 int res; 2144 2145 /* 2146 * This happens when the module gets unloaded and doesn't 2147 * destroy its slab cache and no-one else reuses the vmalloc 2148 * area of the module. Print a warning. 2149 */ 2150 res = probe_kernel_address(pc->name, tmp); 2151 if (res) { 2152 printk(KERN_ERR 2153 "SLAB: cache with size %d has lost its name\n", 2154 pc->buffer_size); 2155 continue; 2156 } 2157 2158 if (!strcmp(pc->name, name)) { 2159 printk(KERN_ERR 2160 "kmem_cache_create: duplicate cache %s\n", name); 2161 dump_stack(); 2162 goto oops; 2163 } 2164 } 2165 2166#if DEBUG 2167 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 2168#if FORCED_DEBUG 2169 /* 2170 * Enable redzoning and last user accounting, except for caches with 2171 * large objects, if the increased size would increase the object size 2172 * above the next power of two: caches with object sizes just above a 2173 * power of two have a significant amount of internal fragmentation. 2174 */ 2175 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2176 2 * sizeof(unsigned long long))) 2177 flags |= SLAB_RED_ZONE | SLAB_STORE_USER; 2178 if (!(flags & SLAB_DESTROY_BY_RCU)) 2179 flags |= SLAB_POISON; 2180#endif 2181 if (flags & SLAB_DESTROY_BY_RCU) 2182 BUG_ON(flags & SLAB_POISON); 2183#endif 2184 /* 2185 * Always checks flags, a caller might be expecting debug support which 2186 * isn't available. 2187 */ 2188 BUG_ON(flags & ~CREATE_MASK); 2189 2190 /* 2191 * Check that size is in terms of words. This is needed to avoid 2192 * unaligned accesses for some archs when redzoning is used, and makes 2193 * sure any on-slab bufctl's are also correctly aligned. 2194 */ 2195 if (size & (BYTES_PER_WORD - 1)) { 2196 size += (BYTES_PER_WORD - 1); 2197 size &= ~(BYTES_PER_WORD - 1); 2198 } 2199 2200 /* calculate the final buffer alignment: */ 2201 2202 /* 1) arch recommendation: can be overridden for debug */ 2203 if (flags & SLAB_HWCACHE_ALIGN) { 2204 /* 2205 * Default alignment: as specified by the arch code. Except if 2206 * an object is really small, then squeeze multiple objects into 2207 * one cacheline. 2208 */ 2209 ralign = cache_line_size(); 2210 while (size <= ralign / 2) 2211 ralign /= 2; 2212 } else { 2213 ralign = BYTES_PER_WORD; 2214 } 2215 2216 /* 2217 * Redzoning and user store require word alignment or possibly larger. 2218 * Note this will be overridden by architecture or caller mandated 2219 * alignment if either is greater than BYTES_PER_WORD. 2220 */ 2221 if (flags & SLAB_STORE_USER) 2222 ralign = BYTES_PER_WORD; 2223 2224 if (flags & SLAB_RED_ZONE) { 2225 ralign = REDZONE_ALIGN; 2226 /* If redzoning, ensure that the second redzone is suitably 2227 * aligned, by adjusting the object size accordingly. */ 2228 size += REDZONE_ALIGN - 1; 2229 size &= ~(REDZONE_ALIGN - 1); 2230 } 2231 2232 /* 2) arch mandated alignment */ 2233 if (ralign < ARCH_SLAB_MINALIGN) { 2234 ralign = ARCH_SLAB_MINALIGN; 2235 } 2236 /* 3) caller mandated alignment */ 2237 if (ralign < align) { 2238 ralign = align; 2239 } 2240 /* disable debug if necessary */ 2241 if (ralign > __alignof__(unsigned long long)) 2242 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2243 /* 2244 * 4) Store it. 2245 */ 2246 align = ralign; 2247 2248 /* Get cache's description obj. */ 2249 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2250 if (!cachep) 2251 goto oops; 2252 2253#if DEBUG 2254 cachep->obj_size = size; 2255 2256 /* 2257 * Both debugging options require word-alignment which is calculated 2258 * into align above. 2259 */ 2260 if (flags & SLAB_RED_ZONE) { 2261 /* add space for red zone words */ 2262 cachep->obj_offset += sizeof(unsigned long long); 2263 size += 2 * sizeof(unsigned long long); 2264 } 2265 if (flags & SLAB_STORE_USER) { 2266 /* user store requires one word storage behind the end of 2267 * the real object. But if the second red zone needs to be 2268 * aligned to 64 bits, we must allow that much space. 2269 */ 2270 if (flags & SLAB_RED_ZONE) 2271 size += REDZONE_ALIGN; 2272 else 2273 size += BYTES_PER_WORD; 2274 } 2275#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2276 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size 2277 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { 2278 cachep->obj_offset += PAGE_SIZE - size; 2279 size = PAGE_SIZE; 2280 } 2281#endif 2282#endif 2283 2284 /* 2285 * Determine if the slab management is 'on' or 'off' slab. 2286 * (bootstrapping cannot cope with offslab caches so don't do 2287 * it too early on.) 2288 */ 2289 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) 2290 /* 2291 * Size is large, assume best to place the slab management obj 2292 * off-slab (should allow better packing of objs). 2293 */ 2294 flags |= CFLGS_OFF_SLAB; 2295 2296 size = ALIGN(size, align); 2297 2298 left_over = calculate_slab_order(cachep, size, align, flags); 2299 2300 if (!cachep->num) { 2301 printk(KERN_ERR 2302 "kmem_cache_create: couldn't create cache %s.\n", name); 2303 kmem_cache_free(&cache_cache, cachep); 2304 cachep = NULL; 2305 goto oops; 2306 } 2307 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) 2308 + sizeof(struct slab), align); 2309 2310 /* 2311 * If the slab has been placed off-slab, and we have enough space then 2312 * move it on-slab. This is at the expense of any extra colouring. 2313 */ 2314 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { 2315 flags &= ~CFLGS_OFF_SLAB; 2316 left_over -= slab_size; 2317 } 2318 2319 if (flags & CFLGS_OFF_SLAB) { 2320 /* really off slab. No need for manual alignment */ 2321 slab_size = 2322 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); 2323 } 2324 2325 cachep->colour_off = cache_line_size(); 2326 /* Offset must be a multiple of the alignment. */ 2327 if (cachep->colour_off < align) 2328 cachep->colour_off = align; 2329 cachep->colour = left_over / cachep->colour_off; 2330 cachep->slab_size = slab_size; 2331 cachep->flags = flags; 2332 cachep->gfpflags = 0; 2333 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) 2334 cachep->gfpflags |= GFP_DMA; 2335 cachep->buffer_size = size; 2336 cachep->reciprocal_buffer_size = reciprocal_value(size); 2337 2338 if (flags & CFLGS_OFF_SLAB) { 2339 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); 2340 /* 2341 * This is a possibility for one of the malloc_sizes caches. 2342 * But since we go off slab only for object size greater than 2343 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order, 2344 * this should not happen at all. 2345 * But leave a BUG_ON for some lucky dude. 2346 */ 2347 BUG_ON(!cachep->slabp_cache); 2348 } 2349 cachep->ctor = ctor; 2350 cachep->name = name; 2351 2352 if (setup_cpu_cache(cachep)) { 2353 __kmem_cache_destroy(cachep); 2354 cachep = NULL; 2355 goto oops; 2356 } 2357 2358 /* cache setup completed, link it into the list */ 2359 list_add(&cachep->next, &cache_chain); 2360oops: 2361 if (!cachep && (flags & SLAB_PANIC)) 2362 panic("kmem_cache_create(): failed to create slab `%s'\n", 2363 name); 2364 mutex_unlock(&cache_chain_mutex); 2365 return cachep; 2366} 2367EXPORT_SYMBOL(kmem_cache_create); 2368 2369#if DEBUG 2370static void check_irq_off(void) 2371{ 2372 BUG_ON(!irqs_disabled()); 2373} 2374 2375static void check_irq_on(void) 2376{ 2377 BUG_ON(irqs_disabled()); 2378} 2379 2380static void check_spinlock_acquired(struct kmem_cache *cachep) 2381{ 2382#ifdef CONFIG_SMP 2383 check_irq_off(); 2384 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2385#endif 2386} 2387 2388static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) 2389{ 2390#ifdef CONFIG_SMP 2391 check_irq_off(); 2392 assert_spin_locked(&cachep->nodelists[node]->list_lock); 2393#endif 2394} 2395 2396#else 2397#define check_irq_off() do { } while(0) 2398#define check_irq_on() do { } while(0) 2399#define check_spinlock_acquired(x) do { } while(0) 2400#define check_spinlock_acquired_node(x, y) do { } while(0) 2401#endif 2402 2403static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2404 struct array_cache *ac, 2405 int force, int node); 2406 2407static void do_drain(void *arg) 2408{ 2409 struct kmem_cache *cachep = arg; 2410 struct array_cache *ac; 2411 int node = numa_node_id(); 2412 2413 check_irq_off(); 2414 ac = cpu_cache_get(cachep); 2415 spin_lock(&cachep->nodelists[node]->list_lock); 2416 free_block(cachep, ac->entry, ac->avail, node); 2417 spin_unlock(&cachep->nodelists[node]->list_lock); 2418 ac->avail = 0; 2419} 2420 2421static void drain_cpu_caches(struct kmem_cache *cachep) 2422{ 2423 struct kmem_list3 *l3; 2424 int node; 2425 2426 on_each_cpu(do_drain, cachep, 1, 1); 2427 check_irq_on(); 2428 for_each_online_node(node) { 2429 l3 = cachep->nodelists[node]; 2430 if (l3 && l3->alien) 2431 drain_alien_cache(cachep, l3->alien); 2432 } 2433 2434 for_each_online_node(node) { 2435 l3 = cachep->nodelists[node]; 2436 if (l3) 2437 drain_array(cachep, l3, l3->shared, 1, node); 2438 } 2439} 2440 2441/* 2442 * Remove slabs from the list of free slabs. 2443 * Specify the number of slabs to drain in tofree. 2444 * 2445 * Returns the actual number of slabs released. 2446 */ 2447static int drain_freelist(struct kmem_cache *cache, 2448 struct kmem_list3 *l3, int tofree) 2449{ 2450 struct list_head *p; 2451 int nr_freed; 2452 struct slab *slabp; 2453 2454 nr_freed = 0; 2455 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) { 2456 2457 spin_lock_irq(&l3->list_lock); 2458 p = l3->slabs_free.prev; 2459 if (p == &l3->slabs_free) { 2460 spin_unlock_irq(&l3->list_lock); 2461 goto out; 2462 } 2463 2464 slabp = list_entry(p, struct slab, list); 2465#if DEBUG 2466 BUG_ON(slabp->inuse); 2467#endif 2468 list_del(&slabp->list); 2469 /* 2470 * Safe to drop the lock. The slab is no longer linked 2471 * to the cache. 2472 */ 2473 l3->free_objects -= cache->num; 2474 spin_unlock_irq(&l3->list_lock); 2475 slab_destroy(cache, slabp); 2476 nr_freed++; 2477 } 2478out: 2479 return nr_freed; 2480} 2481 2482/* Called with cache_chain_mutex held to protect against cpu hotplug */ 2483static int __cache_shrink(struct kmem_cache *cachep) 2484{ 2485 int ret = 0, i = 0; 2486 struct kmem_list3 *l3; 2487 2488 drain_cpu_caches(cachep); 2489 2490 check_irq_on(); 2491 for_each_online_node(i) { 2492 l3 = cachep->nodelists[i]; 2493 if (!l3) 2494 continue; 2495 2496 drain_freelist(cachep, l3, l3->free_objects); 2497 2498 ret += !list_empty(&l3->slabs_full) || 2499 !list_empty(&l3->slabs_partial); 2500 } 2501 return (ret ? 1 : 0); 2502} 2503 2504/** 2505 * kmem_cache_shrink - Shrink a cache. 2506 * @cachep: The cache to shrink. 2507 * 2508 * Releases as many slabs as possible for a cache. 2509 * To help debugging, a zero exit status indicates all slabs were released. 2510 */ 2511int kmem_cache_shrink(struct kmem_cache *cachep) 2512{ 2513 int ret; 2514 BUG_ON(!cachep || in_interrupt()); 2515 2516 mutex_lock(&cache_chain_mutex); 2517 ret = __cache_shrink(cachep); 2518 mutex_unlock(&cache_chain_mutex); 2519 return ret; 2520} 2521EXPORT_SYMBOL(kmem_cache_shrink); 2522 2523/** 2524 * kmem_cache_destroy - delete a cache 2525 * @cachep: the cache to destroy 2526 * 2527 * Remove a &struct kmem_cache object from the slab cache. 2528 * 2529 * It is expected this function will be called by a module when it is 2530 * unloaded. This will remove the cache completely, and avoid a duplicate 2531 * cache being allocated each time a module is loaded and unloaded, if the 2532 * module doesn't have persistent in-kernel storage across loads and unloads. 2533 * 2534 * The cache must be empty before calling this function. 2535 * 2536 * The caller must guarantee that noone will allocate memory from the cache 2537 * during the kmem_cache_destroy(). 2538 */ 2539void kmem_cache_destroy(struct kmem_cache *cachep) 2540{ 2541 BUG_ON(!cachep || in_interrupt()); 2542 2543 /* Find the cache in the chain of caches. */ 2544 mutex_lock(&cache_chain_mutex); 2545 /* 2546 * the chain is never empty, cache_cache is never destroyed 2547 */ 2548 list_del(&cachep->next); 2549 if (__cache_shrink(cachep)) { 2550 slab_error(cachep, "Can't free all objects"); 2551 list_add(&cachep->next, &cache_chain); 2552 mutex_unlock(&cache_chain_mutex); 2553 return; 2554 } 2555 2556 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) 2557 synchronize_rcu(); 2558 2559 __kmem_cache_destroy(cachep); 2560 mutex_unlock(&cache_chain_mutex); 2561} 2562EXPORT_SYMBOL(kmem_cache_destroy); 2563 2564/* 2565 * Get the memory for a slab management obj. 2566 * For a slab cache when the slab descriptor is off-slab, slab descriptors 2567 * always come from malloc_sizes caches. The slab descriptor cannot 2568 * come from the same cache which is getting created because, 2569 * when we are searching for an appropriate cache for these 2570 * descriptors in kmem_cache_create, we search through the malloc_sizes array. 2571 * If we are creating a malloc_sizes cache here it would not be visible to 2572 * kmem_find_general_cachep till the initialization is complete. 2573 * Hence we cannot have slabp_cache same as the original cache. 2574 */ 2575static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, 2576 int colour_off, gfp_t local_flags, 2577 int nodeid) 2578{ 2579 struct slab *slabp; 2580 2581 if (OFF_SLAB(cachep)) { 2582 /* Slab management obj is off-slab. */ 2583 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2584 local_flags & ~GFP_THISNODE, nodeid); 2585 if (!slabp) 2586 return NULL; 2587 } else { 2588 slabp = objp + colour_off; 2589 colour_off += cachep->slab_size; 2590 } 2591 slabp->inuse = 0; 2592 slabp->colouroff = colour_off; 2593 slabp->s_mem = objp + colour_off; 2594 slabp->nodeid = nodeid; 2595 return slabp; 2596} 2597 2598static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) 2599{ 2600 return (kmem_bufctl_t *) (slabp + 1); 2601} 2602 2603static void cache_init_objs(struct kmem_cache *cachep, 2604 struct slab *slabp) 2605{ 2606 int i; 2607 2608 for (i = 0; i < cachep->num; i++) { 2609 void *objp = index_to_obj(cachep, slabp, i); 2610#if DEBUG 2611 /* need to poison the objs? */ 2612 if (cachep->flags & SLAB_POISON) 2613 poison_obj(cachep, objp, POISON_FREE); 2614 if (cachep->flags & SLAB_STORE_USER) 2615 *dbg_userword(cachep, objp) = NULL; 2616 2617 if (cachep->flags & SLAB_RED_ZONE) { 2618 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2619 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2620 } 2621 /* 2622 * Constructors are not allowed to allocate memory from the same 2623 * cache which they are a constructor for. Otherwise, deadlock. 2624 * They must also be threaded. 2625 */ 2626 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) 2627 cachep->ctor(objp + obj_offset(cachep), cachep, 2628 0); 2629 2630 if (cachep->flags & SLAB_RED_ZONE) { 2631 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) 2632 slab_error(cachep, "constructor overwrote the" 2633 " end of an object"); 2634 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) 2635 slab_error(cachep, "constructor overwrote the" 2636 " start of an object"); 2637 } 2638 if ((cachep->buffer_size % PAGE_SIZE) == 0 && 2639 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) 2640 kernel_map_pages(virt_to_page(objp), 2641 cachep->buffer_size / PAGE_SIZE, 0); 2642#else 2643 if (cachep->ctor) 2644 cachep->ctor(objp, cachep, 0); 2645#endif 2646 slab_bufctl(slabp)[i] = i + 1; 2647 } 2648 slab_bufctl(slabp)[i - 1] = BUFCTL_END; 2649 slabp->free = 0; 2650} 2651 2652static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) 2653{ 2654 if (CONFIG_ZONE_DMA_FLAG) { 2655 if (flags & GFP_DMA) 2656 BUG_ON(!(cachep->gfpflags & GFP_DMA)); 2657 else 2658 BUG_ON(cachep->gfpflags & GFP_DMA); 2659 } 2660} 2661 2662static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, 2663 int nodeid) 2664{ 2665 void *objp = index_to_obj(cachep, slabp, slabp->free); 2666 kmem_bufctl_t next; 2667 2668 slabp->inuse++; 2669 next = slab_bufctl(slabp)[slabp->free]; 2670#if DEBUG 2671 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; 2672 WARN_ON(slabp->nodeid != nodeid); 2673#endif 2674 slabp->free = next; 2675 2676 return objp; 2677} 2678 2679static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, 2680 void *objp, int nodeid) 2681{ 2682 unsigned int objnr = obj_to_index(cachep, slabp, objp); 2683 2684#if DEBUG 2685 /* Verify that the slab belongs to the intended node */ 2686 WARN_ON(slabp->nodeid != nodeid); 2687 2688 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) { 2689 printk(KERN_ERR "slab: double free detected in cache " 2690 "'%s', objp %p\n", cachep->name, objp); 2691 BUG(); 2692 } 2693#endif 2694 slab_bufctl(slabp)[objnr] = slabp->free; 2695 slabp->free = objnr; 2696 slabp->inuse--; 2697} 2698 2699/* 2700 * Map pages beginning at addr to the given cache and slab. This is required 2701 * for the slab allocator to be able to lookup the cache and slab of a 2702 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. 2703 */ 2704static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, 2705 void *addr) 2706{ 2707 int nr_pages; 2708 struct page *page; 2709 2710 page = virt_to_page(addr); 2711 2712 nr_pages = 1; 2713 if (likely(!PageCompound(page))) 2714 nr_pages <<= cache->gfporder; 2715 2716 do { 2717 page_set_cache(page, cache); 2718 page_set_slab(page, slab); 2719 page++; 2720 } while (--nr_pages); 2721} 2722 2723/* 2724 * Grow (by 1) the number of slabs within a cache. This is called by 2725 * kmem_cache_alloc() when there are no active objs left in a cache. 2726 */ 2727static int cache_grow(struct kmem_cache *cachep, 2728 gfp_t flags, int nodeid, void *objp) 2729{ 2730 struct slab *slabp; 2731 size_t offset; 2732 gfp_t local_flags; 2733 struct kmem_list3 *l3; 2734 2735 /* 2736 * Be lazy and only check for valid flags here, keeping it out of the 2737 * critical path in kmem_cache_alloc(). 2738 */ 2739 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2740 2741 local_flags = (flags & GFP_LEVEL_MASK); 2742 /* Take the l3 list lock to change the colour_next on this node */ 2743 check_irq_off(); 2744 l3 = cachep->nodelists[nodeid]; 2745 spin_lock(&l3->list_lock); 2746 2747 /* Get colour for the slab, and cal the next value. */ 2748 offset = l3->colour_next; 2749 l3->colour_next++; 2750 if (l3->colour_next >= cachep->colour) 2751 l3->colour_next = 0; 2752 spin_unlock(&l3->list_lock); 2753 2754 offset *= cachep->colour_off; 2755 2756 if (local_flags & __GFP_WAIT) 2757 local_irq_enable(); 2758 2759 /* 2760 * The test for missing atomic flag is performed here, rather than 2761 * the more obvious place, simply to reduce the critical path length 2762 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they 2763 * will eventually be caught here (where it matters). 2764 */ 2765 kmem_flagcheck(cachep, flags); 2766 2767 /* 2768 * Get mem for the objs. Attempt to allocate a physical page from 2769 * 'nodeid'. 2770 */ 2771 if (!objp) 2772 objp = kmem_getpages(cachep, flags, nodeid); 2773 if (!objp) 2774 goto failed; 2775 2776 /* Get slab management. */ 2777 slabp = alloc_slabmgmt(cachep, objp, offset, 2778 local_flags & ~GFP_THISNODE, nodeid); 2779 if (!slabp) 2780 goto opps1; 2781 2782 slabp->nodeid = nodeid; 2783 slab_map_pages(cachep, slabp, objp); 2784 2785 cache_init_objs(cachep, slabp); 2786 2787 if (local_flags & __GFP_WAIT) 2788 local_irq_disable(); 2789 check_irq_off(); 2790 spin_lock(&l3->list_lock); 2791 2792 /* Make slab active. */ 2793 list_add_tail(&slabp->list, &(l3->slabs_free)); 2794 STATS_INC_GROWN(cachep); 2795 l3->free_objects += cachep->num; 2796 spin_unlock(&l3->list_lock); 2797 return 1; 2798opps1: 2799 kmem_freepages(cachep, objp); 2800failed: 2801 if (local_flags & __GFP_WAIT) 2802 local_irq_disable(); 2803 return 0; 2804} 2805 2806#if DEBUG 2807 2808/* 2809 * Perform extra freeing checks: 2810 * - detect bad pointers. 2811 * - POISON/RED_ZONE checking 2812 */ 2813static void kfree_debugcheck(const void *objp) 2814{ 2815 if (!virt_addr_valid(objp)) { 2816 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", 2817 (unsigned long)objp); 2818 BUG(); 2819 } 2820} 2821 2822static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) 2823{ 2824 unsigned long long redzone1, redzone2; 2825 2826 redzone1 = *dbg_redzone1(cache, obj); 2827 redzone2 = *dbg_redzone2(cache, obj); 2828 2829 /* 2830 * Redzone is ok. 2831 */ 2832 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) 2833 return; 2834 2835 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) 2836 slab_error(cache, "double free detected"); 2837 else 2838 slab_error(cache, "memory outside object was overwritten"); 2839 2840 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", 2841 obj, redzone1, redzone2); 2842} 2843 2844static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, 2845 void *caller) 2846{ 2847 struct page *page; 2848 unsigned int objnr; 2849 struct slab *slabp; 2850 2851 objp -= obj_offset(cachep); 2852 kfree_debugcheck(objp); 2853 page = virt_to_head_page(objp); 2854 2855 slabp = page_get_slab(page); 2856 2857 if (cachep->flags & SLAB_RED_ZONE) { 2858 verify_redzone_free(cachep, objp); 2859 *dbg_redzone1(cachep, objp) = RED_INACTIVE; 2860 *dbg_redzone2(cachep, objp) = RED_INACTIVE; 2861 } 2862 if (cachep->flags & SLAB_STORE_USER) 2863 *dbg_userword(cachep, objp) = caller; 2864 2865 objnr = obj_to_index(cachep, slabp, objp); 2866 2867 BUG_ON(objnr >= cachep->num); 2868 BUG_ON(objp != index_to_obj(cachep, slabp, objnr)); 2869 2870#ifdef CONFIG_DEBUG_SLAB_LEAK 2871 slab_bufctl(slabp)[objnr] = BUFCTL_FREE; 2872#endif 2873 if (cachep->flags & SLAB_POISON) { 2874#ifdef CONFIG_DEBUG_PAGEALLOC 2875 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2876 store_stackinfo(cachep, objp, (unsigned long)caller); 2877 kernel_map_pages(virt_to_page(objp), 2878 cachep->buffer_size / PAGE_SIZE, 0); 2879 } else { 2880 poison_obj(cachep, objp, POISON_FREE); 2881 } 2882#else 2883 poison_obj(cachep, objp, POISON_FREE); 2884#endif 2885 } 2886 return objp; 2887} 2888 2889static void check_slabp(struct kmem_cache *cachep, struct slab *slabp) 2890{ 2891 kmem_bufctl_t i; 2892 int entries = 0; 2893 2894 /* Check slab's freelist to see if this obj is there. */ 2895 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { 2896 entries++; 2897 if (entries > cachep->num || i >= cachep->num) 2898 goto bad; 2899 } 2900 if (entries != cachep->num - slabp->inuse) { 2901bad: 2902 printk(KERN_ERR "slab: Internal list corruption detected in " 2903 "cache '%s'(%d), slabp %p(%d). Hexdump:\n", 2904 cachep->name, cachep->num, slabp, slabp->inuse); 2905 for (i = 0; 2906 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t); 2907 i++) { 2908 if (i % 16 == 0) 2909 printk("\n%03x:", i); 2910 printk(" %02x", ((unsigned char *)slabp)[i]); 2911 } 2912 printk("\n"); 2913 BUG(); 2914 } 2915} 2916#else 2917#define kfree_debugcheck(x) do { } while(0) 2918#define cache_free_debugcheck(x,objp,z) (objp) 2919#define check_slabp(x,y) do { } while(0) 2920#endif 2921 2922static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) 2923{ 2924 int batchcount; 2925 struct kmem_list3 *l3; 2926 struct array_cache *ac; 2927 int node; 2928 2929 node = numa_node_id(); 2930 2931 check_irq_off(); 2932 ac = cpu_cache_get(cachep); 2933retry: 2934 batchcount = ac->batchcount; 2935 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 2936 /* 2937 * If there was little recent activity on this cache, then 2938 * perform only a partial refill. Otherwise we could generate 2939 * refill bouncing. 2940 */ 2941 batchcount = BATCHREFILL_LIMIT; 2942 } 2943 l3 = cachep->nodelists[node]; 2944 2945 BUG_ON(ac->avail > 0 || !l3); 2946 spin_lock(&l3->list_lock); 2947 2948 /* See if we can refill from the shared array */ 2949 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) 2950 goto alloc_done; 2951 2952 while (batchcount > 0) { 2953 struct list_head *entry; 2954 struct slab *slabp; 2955 /* Get slab alloc is to come from. */ 2956 entry = l3->slabs_partial.next; 2957 if (entry == &l3->slabs_partial) { 2958 l3->free_touched = 1; 2959 entry = l3->slabs_free.next; 2960 if (entry == &l3->slabs_free) 2961 goto must_grow; 2962 } 2963 2964 slabp = list_entry(entry, struct slab, list); 2965 check_slabp(cachep, slabp); 2966 check_spinlock_acquired(cachep); 2967 2968 /* 2969 * The slab was either on partial or free list so 2970 * there must be at least one object available for 2971 * allocation. 2972 */ 2973 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 2974 2975 while (slabp->inuse < cachep->num && batchcount--) { 2976 STATS_INC_ALLOCED(cachep); 2977 STATS_INC_ACTIVE(cachep); 2978 STATS_SET_HIGH(cachep); 2979 2980 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2981 node); 2982 } 2983 check_slabp(cachep, slabp); 2984 2985 /* move slabp to correct slabp list: */ 2986 list_del(&slabp->list); 2987 if (slabp->free == BUFCTL_END) 2988 list_add(&slabp->list, &l3->slabs_full); 2989 else 2990 list_add(&slabp->list, &l3->slabs_partial); 2991 } 2992 2993must_grow: 2994 l3->free_objects -= ac->avail; 2995alloc_done: 2996 spin_unlock(&l3->list_lock); 2997 2998 if (unlikely(!ac->avail)) { 2999 int x; 3000 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); 3001 3002 /* cache_grow can reenable interrupts, then ac could change. */ 3003 ac = cpu_cache_get(cachep); 3004 if (!x && ac->avail == 0) /* no objects in sight? abort */ 3005 return NULL; 3006 3007 if (!ac->avail) /* objects refilled by interrupt? */ 3008 goto retry; 3009 } 3010 ac->touched = 1; 3011 return ac->entry[--ac->avail]; 3012} 3013 3014static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, 3015 gfp_t flags) 3016{ 3017 might_sleep_if(flags & __GFP_WAIT); 3018#if DEBUG 3019 kmem_flagcheck(cachep, flags); 3020#endif 3021} 3022 3023#if DEBUG 3024static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3025 gfp_t flags, void *objp, void *caller) 3026{ 3027 if (!objp) 3028 return objp; 3029 if (cachep->flags & SLAB_POISON) { 3030#ifdef CONFIG_DEBUG_PAGEALLOC 3031 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) 3032 kernel_map_pages(virt_to_page(objp), 3033 cachep->buffer_size / PAGE_SIZE, 1); 3034 else 3035 check_poison_obj(cachep, objp); 3036#else 3037 check_poison_obj(cachep, objp); 3038#endif 3039 poison_obj(cachep, objp, POISON_INUSE); 3040 } 3041 if (cachep->flags & SLAB_STORE_USER) 3042 *dbg_userword(cachep, objp) = caller; 3043 3044 if (cachep->flags & SLAB_RED_ZONE) { 3045 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3046 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3047 slab_error(cachep, "double free, or memory outside" 3048 " object was overwritten"); 3049 printk(KERN_ERR 3050 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3051 objp, *dbg_redzone1(cachep, objp), 3052 *dbg_redzone2(cachep, objp)); 3053 } 3054 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3055 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3056 } 3057#ifdef CONFIG_DEBUG_SLAB_LEAK 3058 { 3059 struct slab *slabp; 3060 unsigned objnr; 3061 3062 slabp = page_get_slab(virt_to_head_page(objp)); 3063 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size; 3064 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE; 3065 } 3066#endif 3067 objp += obj_offset(cachep); 3068 if (cachep->ctor && cachep->flags & SLAB_POISON) 3069 cachep->ctor(objp, cachep, 0); 3070#if ARCH_SLAB_MINALIGN 3071 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { 3072 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3073 objp, ARCH_SLAB_MINALIGN); 3074 } 3075#endif 3076 return objp; 3077} 3078#else 3079#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3080#endif 3081 3082#ifdef CONFIG_FAILSLAB 3083 3084static struct failslab_attr { 3085 3086 struct fault_attr attr; 3087 3088 u32 ignore_gfp_wait; 3089#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3090 struct dentry *ignore_gfp_wait_file; 3091#endif 3092 3093} failslab = { 3094 .attr = FAULT_ATTR_INITIALIZER, 3095 .ignore_gfp_wait = 1, 3096}; 3097 3098static int __init setup_failslab(char *str) 3099{ 3100 return setup_fault_attr(&failslab.attr, str); 3101} 3102__setup("failslab=", setup_failslab); 3103 3104static int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3105{ 3106 if (cachep == &cache_cache) 3107 return 0; 3108 if (flags & __GFP_NOFAIL) 3109 return 0; 3110 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) 3111 return 0; 3112 3113 return should_fail(&failslab.attr, obj_size(cachep)); 3114} 3115 3116#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3117 3118static int __init failslab_debugfs(void) 3119{ 3120 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3121 struct dentry *dir; 3122 int err; 3123 3124 err = init_fault_attr_dentries(&failslab.attr, "failslab"); 3125 if (err) 3126 return err; 3127 dir = failslab.attr.dentries.dir; 3128 3129 failslab.ignore_gfp_wait_file = 3130 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3131 &failslab.ignore_gfp_wait); 3132 3133 if (!failslab.ignore_gfp_wait_file) { 3134 err = -ENOMEM; 3135 debugfs_remove(failslab.ignore_gfp_wait_file); 3136 cleanup_fault_attr_dentries(&failslab.attr); 3137 } 3138 3139 return err; 3140} 3141 3142late_initcall(failslab_debugfs); 3143 3144#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3145 3146#else /* CONFIG_FAILSLAB */ 3147 3148static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) 3149{ 3150 return 0; 3151} 3152 3153#endif /* CONFIG_FAILSLAB */ 3154 3155static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3156{ 3157 void *objp; 3158 struct array_cache *ac; 3159 3160 check_irq_off(); 3161 3162 ac = cpu_cache_get(cachep); 3163 if (likely(ac->avail)) { 3164 STATS_INC_ALLOCHIT(cachep); 3165 ac->touched = 1; 3166 objp = ac->entry[--ac->avail]; 3167 } else { 3168 STATS_INC_ALLOCMISS(cachep); 3169 objp = cache_alloc_refill(cachep, flags); 3170 } 3171 return objp; 3172} 3173 3174#ifdef CONFIG_NUMA 3175/* 3176 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY. 3177 * 3178 * If we are in_interrupt, then process context, including cpusets and 3179 * mempolicy, may not apply and should not be used for allocation policy. 3180 */ 3181static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) 3182{ 3183 int nid_alloc, nid_here; 3184 3185 if (in_interrupt() || (flags & __GFP_THISNODE)) 3186 return NULL; 3187 nid_alloc = nid_here = numa_node_id(); 3188 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3189 nid_alloc = cpuset_mem_spread_node(); 3190 else if (current->mempolicy) 3191 nid_alloc = slab_node(current->mempolicy); 3192 if (nid_alloc != nid_here) 3193 return ____cache_alloc_node(cachep, flags, nid_alloc); 3194 return NULL; 3195} 3196 3197/* 3198 * Fallback function if there was no memory available and no objects on a 3199 * certain node and fall back is permitted. First we scan all the 3200 * available nodelists for available objects. If that fails then we 3201 * perform an allocation without specifying a node. This allows the page 3202 * allocator to do its reclaim / fallback magic. We then insert the 3203 * slab into the proper nodelist and then allocate from it. 3204 */ 3205static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) 3206{ 3207 struct zonelist *zonelist; 3208 gfp_t local_flags; 3209 struct zone **z; 3210 void *obj = NULL; 3211 int nid; 3212 3213 if (flags & __GFP_THISNODE) 3214 return NULL; 3215 3216 zonelist = &NODE_DATA(slab_node(current->mempolicy)) 3217 ->node_zonelists[gfp_zone(flags)]; 3218 local_flags = (flags & GFP_LEVEL_MASK); 3219 3220retry: 3221 /* 3222 * Look through allowed nodes for objects available 3223 * from existing per node queues. 3224 */ 3225 for (z = zonelist->zones; *z && !obj; z++) { 3226 nid = zone_to_nid(*z); 3227 3228 if (cpuset_zone_allowed_hardwall(*z, flags) && 3229 cache->nodelists[nid] && 3230 cache->nodelists[nid]->free_objects) 3231 obj = ____cache_alloc_node(cache, 3232 flags | GFP_THISNODE, nid); 3233 } 3234 3235 if (!obj) { 3236 /* 3237 * This allocation will be performed within the constraints 3238 * of the current cpuset / memory policy requirements. 3239 * We may trigger various forms of reclaim on the allowed 3240 * set and go into memory reserves if necessary. 3241 */ 3242 if (local_flags & __GFP_WAIT) 3243 local_irq_enable(); 3244 kmem_flagcheck(cache, flags); 3245 obj = kmem_getpages(cache, flags, -1); 3246 if (local_flags & __GFP_WAIT) 3247 local_irq_disable(); 3248 if (obj) { 3249 /* 3250 * Insert into the appropriate per node queues 3251 */ 3252 nid = page_to_nid(virt_to_page(obj)); 3253 if (cache_grow(cache, flags, nid, obj)) { 3254 obj = ____cache_alloc_node(cache, 3255 flags | GFP_THISNODE, nid); 3256 if (!obj) 3257 /* 3258 * Another processor may allocate the 3259 * objects in the slab since we are 3260 * not holding any locks. 3261 */ 3262 goto retry; 3263 } else { 3264 /* cache_grow already freed obj */ 3265 obj = NULL; 3266 } 3267 } 3268 } 3269 return obj; 3270} 3271 3272/* 3273 * A interface to enable slab creation on nodeid 3274 */ 3275static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, 3276 int nodeid) 3277{ 3278 struct list_head *entry; 3279 struct slab *slabp; 3280 struct kmem_list3 *l3; 3281 void *obj; 3282 int x; 3283 3284 l3 = cachep->nodelists[nodeid]; 3285 BUG_ON(!l3); 3286 3287retry: 3288 check_irq_off(); 3289 spin_lock(&l3->list_lock); 3290 entry = l3->slabs_partial.next; 3291 if (entry == &l3->slabs_partial) { 3292 l3->free_touched = 1; 3293 entry = l3->slabs_free.next; 3294 if (entry == &l3->slabs_free) 3295 goto must_grow; 3296 } 3297 3298 slabp = list_entry(entry, struct slab, list); 3299 check_spinlock_acquired_node(cachep, nodeid); 3300 check_slabp(cachep, slabp); 3301 3302 STATS_INC_NODEALLOCS(cachep); 3303 STATS_INC_ACTIVE(cachep); 3304 STATS_SET_HIGH(cachep); 3305 3306 BUG_ON(slabp->inuse == cachep->num); 3307 3308 obj = slab_get_obj(cachep, slabp, nodeid); 3309 check_slabp(cachep, slabp); 3310 l3->free_objects--; 3311 /* move slabp to correct slabp list: */ 3312 list_del(&slabp->list); 3313 3314 if (slabp->free == BUFCTL_END) 3315 list_add(&slabp->list, &l3->slabs_full); 3316 else 3317 list_add(&slabp->list, &l3->slabs_partial); 3318 3319 spin_unlock(&l3->list_lock); 3320 goto done; 3321 3322must_grow: 3323 spin_unlock(&l3->list_lock); 3324 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); 3325 if (x) 3326 goto retry; 3327 3328 return fallback_alloc(cachep, flags); 3329 3330done: 3331 return obj; 3332} 3333 3334/** 3335 * kmem_cache_alloc_node - Allocate an object on the specified node 3336 * @cachep: The cache to allocate from. 3337 * @flags: See kmalloc(). 3338 * @nodeid: node number of the target node. 3339 * @caller: return address of caller, used for debug information 3340 * 3341 * Identical to kmem_cache_alloc but it will allocate memory on the given 3342 * node, which can improve the performance for cpu bound structures. 3343 * 3344 * Fallback to other node is possible if __GFP_THISNODE is not set. 3345 */ 3346static __always_inline void * 3347__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, 3348 void *caller) 3349{ 3350 unsigned long save_flags; 3351 void *ptr; 3352 3353 if (should_failslab(cachep, flags)) 3354 return NULL; 3355 3356 cache_alloc_debugcheck_before(cachep, flags); 3357 local_irq_save(save_flags); 3358 3359 if (unlikely(nodeid == -1)) 3360 nodeid = numa_node_id(); 3361 3362 if (unlikely(!cachep->nodelists[nodeid])) { 3363 /* Node not bootstrapped yet */ 3364 ptr = fallback_alloc(cachep, flags); 3365 goto out; 3366 } 3367 3368 if (nodeid == numa_node_id()) { 3369 /* 3370 * Use the locally cached objects if possible. 3371 * However ____cache_alloc does not allow fallback 3372 * to other nodes. It may fail while we still have 3373 * objects on other nodes available. 3374 */ 3375 ptr = ____cache_alloc(cachep, flags); 3376 if (ptr) 3377 goto out; 3378 } 3379 /* ___cache_alloc_node can fall back to other nodes */ 3380 ptr = ____cache_alloc_node(cachep, flags, nodeid); 3381 out: 3382 local_irq_restore(save_flags); 3383 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3384 3385 return ptr; 3386} 3387 3388static __always_inline void * 3389__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) 3390{ 3391 void *objp; 3392 3393 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) { 3394 objp = alternate_node_alloc(cache, flags); 3395 if (objp) 3396 goto out; 3397 } 3398 objp = ____cache_alloc(cache, flags); 3399 3400 /* 3401 * We may just have run out of memory on the local node. 3402 * ____cache_alloc_node() knows how to locate memory on other nodes 3403 */ 3404 if (!objp) 3405 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3406 3407 out: 3408 return objp; 3409} 3410#else 3411 3412static __always_inline void * 3413__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3414{ 3415 return ____cache_alloc(cachep, flags); 3416} 3417 3418#endif /* CONFIG_NUMA */ 3419 3420static __always_inline void * 3421__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) 3422{ 3423 unsigned long save_flags; 3424 void *objp; 3425 3426 if (should_failslab(cachep, flags)) 3427 return NULL; 3428 3429 cache_alloc_debugcheck_before(cachep, flags); 3430 local_irq_save(save_flags); 3431 objp = __do_cache_alloc(cachep, flags); 3432 local_irq_restore(save_flags); 3433 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3434 prefetchw(objp); 3435 3436 return objp; 3437} 3438 3439/* 3440 * Caller needs to acquire correct kmem_list's list_lock 3441 */ 3442static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, 3443 int node) 3444{ 3445 int i; 3446 struct kmem_list3 *l3; 3447 3448 for (i = 0; i < nr_objects; i++) { 3449 void *objp = objpp[i]; 3450 struct slab *slabp; 3451 3452 slabp = virt_to_slab(objp); 3453 l3 = cachep->nodelists[node]; 3454 list_del(&slabp->list); 3455 check_spinlock_acquired_node(cachep, node); 3456 check_slabp(cachep, slabp); 3457 slab_put_obj(cachep, slabp, objp, node); 3458 STATS_DEC_ACTIVE(cachep); 3459 l3->free_objects++; 3460 check_slabp(cachep, slabp); 3461 3462 /* fixup slab chains */ 3463 if (slabp->inuse == 0) { 3464 if (l3->free_objects > l3->free_limit) { 3465 l3->free_objects -= cachep->num; 3466 /* No need to drop any previously held 3467 * lock here, even if we have a off-slab slab 3468 * descriptor it is guaranteed to come from 3469 * a different cache, refer to comments before 3470 * alloc_slabmgmt. 3471 */ 3472 slab_destroy(cachep, slabp); 3473 } else { 3474 list_add(&slabp->list, &l3->slabs_free); 3475 } 3476 } else { 3477 /* Unconditionally move a slab to the end of the 3478 * partial list on free - maximum time for the 3479 * other objects to be freed, too. 3480 */ 3481 list_add_tail(&slabp->list, &l3->slabs_partial); 3482 } 3483 } 3484} 3485 3486static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3487{ 3488 int batchcount; 3489 struct kmem_list3 *l3; 3490 int node = numa_node_id(); 3491 3492 batchcount = ac->batchcount; 3493#if DEBUG 3494 BUG_ON(!batchcount || batchcount > ac->avail); 3495#endif 3496 check_irq_off(); 3497 l3 = cachep->nodelists[node]; 3498 spin_lock(&l3->list_lock); 3499 if (l3->shared) { 3500 struct array_cache *shared_array = l3->shared; 3501 int max = shared_array->limit - shared_array->avail; 3502 if (max) { 3503 if (batchcount > max) 3504 batchcount = max; 3505 memcpy(&(shared_array->entry[shared_array->avail]), 3506 ac->entry, sizeof(void *) * batchcount); 3507 shared_array->avail += batchcount; 3508 goto free_done; 3509 } 3510 } 3511 3512 free_block(cachep, ac->entry, batchcount, node); 3513free_done: 3514#if STATS 3515 { 3516 int i = 0; 3517 struct list_head *p; 3518 3519 p = l3->slabs_free.next; 3520 while (p != &(l3->slabs_free)) { 3521 struct slab *slabp; 3522 3523 slabp = list_entry(p, struct slab, list); 3524 BUG_ON(slabp->inuse); 3525 3526 i++; 3527 p = p->next; 3528 } 3529 STATS_SET_FREEABLE(cachep, i); 3530 } 3531#endif 3532 spin_unlock(&l3->list_lock); 3533 ac->avail -= batchcount; 3534 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); 3535} 3536 3537/* 3538 * Release an obj back to its cache. If the obj has a constructed state, it must 3539 * be in this state _before_ it is released. Called with disabled ints. 3540 */ 3541static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3542{ 3543 struct array_cache *ac = cpu_cache_get(cachep); 3544 3545 check_irq_off(); 3546 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3547 3548 if (cache_free_alien(cachep, objp)) 3549 return; 3550 3551 if (likely(ac->avail < ac->limit)) { 3552 STATS_INC_FREEHIT(cachep); 3553 ac->entry[ac->avail++] = objp; 3554 return; 3555 } else { 3556 STATS_INC_FREEMISS(cachep); 3557 cache_flusharray(cachep, ac); 3558 ac->entry[ac->avail++] = objp; 3559 } 3560} 3561 3562/** 3563 * kmem_cache_alloc - Allocate an object 3564 * @cachep: The cache to allocate from. 3565 * @flags: See kmalloc(). 3566 * 3567 * Allocate an object from this cache. The flags are only relevant 3568 * if the cache has no available objects. 3569 */ 3570void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3571{ 3572 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3573} 3574EXPORT_SYMBOL(kmem_cache_alloc); 3575 3576/** 3577 * kmem_cache_zalloc - Allocate an object. The memory is set to zero. 3578 * @cache: The cache to allocate from. 3579 * @flags: See kmalloc(). 3580 * 3581 * Allocate an object from this cache and set the allocated memory to zero. 3582 * The flags are only relevant if the cache has no available objects. 3583 */ 3584void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags) 3585{ 3586 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0)); 3587 if (ret) 3588 memset(ret, 0, obj_size(cache)); 3589 return ret; 3590} 3591EXPORT_SYMBOL(kmem_cache_zalloc); 3592 3593/** 3594 * kmem_ptr_validate - check if an untrusted pointer might 3595 * be a slab entry. 3596 * @cachep: the cache we're checking against 3597 * @ptr: pointer to validate 3598 * 3599 * This verifies that the untrusted pointer looks sane: 3600 * it is _not_ a guarantee that the pointer is actually 3601 * part of the slab cache in question, but it at least 3602 * validates that the pointer can be dereferenced and 3603 * looks half-way sane. 3604 * 3605 * Currently only used for dentry validation. 3606 */ 3607int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3608{ 3609 unsigned long addr = (unsigned long)ptr; 3610 unsigned long min_addr = PAGE_OFFSET; 3611 unsigned long align_mask = BYTES_PER_WORD - 1; 3612 unsigned long size = cachep->buffer_size; 3613 struct page *page; 3614 3615 if (unlikely(addr < min_addr)) 3616 goto out; 3617 if (unlikely(addr > (unsigned long)high_memory - size)) 3618 goto out; 3619 if (unlikely(addr & align_mask)) 3620 goto out; 3621 if (unlikely(!kern_addr_valid(addr))) 3622 goto out; 3623 if (unlikely(!kern_addr_valid(addr + size - 1))) 3624 goto out; 3625 page = virt_to_page(ptr); 3626 if (unlikely(!PageSlab(page))) 3627 goto out; 3628 if (unlikely(page_get_cache(page) != cachep)) 3629 goto out; 3630 return 1; 3631out: 3632 return 0; 3633} 3634 3635#ifdef CONFIG_NUMA 3636void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) 3637{ 3638 return __cache_alloc_node(cachep, flags, nodeid, 3639 __builtin_return_address(0)); 3640} 3641EXPORT_SYMBOL(kmem_cache_alloc_node); 3642 3643static __always_inline void * 3644__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3645{ 3646 struct kmem_cache *cachep; 3647 3648 cachep = kmem_find_general_cachep(size, flags); 3649 if (unlikely(cachep == NULL)) 3650 return NULL; 3651 return kmem_cache_alloc_node(cachep, flags, node); 3652} 3653 3654#ifdef CONFIG_DEBUG_SLAB 3655void *__kmalloc_node(size_t size, gfp_t flags, int node) 3656{ 3657 return __do_kmalloc_node(size, flags, node, 3658 __builtin_return_address(0)); 3659} 3660EXPORT_SYMBOL(__kmalloc_node); 3661 3662void *__kmalloc_node_track_caller(size_t size, gfp_t flags, 3663 int node, void *caller) 3664{ 3665 return __do_kmalloc_node(size, flags, node, caller); 3666} 3667EXPORT_SYMBOL(__kmalloc_node_track_caller); 3668#else 3669void *__kmalloc_node(size_t size, gfp_t flags, int node) 3670{ 3671 return __do_kmalloc_node(size, flags, node, NULL); 3672} 3673EXPORT_SYMBOL(__kmalloc_node); 3674#endif /* CONFIG_DEBUG_SLAB */ 3675#endif /* CONFIG_NUMA */ 3676 3677/** 3678 * __do_kmalloc - allocate memory 3679 * @size: how many bytes of memory are required. 3680 * @flags: the type of memory to allocate (see kmalloc). 3681 * @caller: function caller for debug tracking of the caller 3682 */ 3683static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, 3684 void *caller) 3685{ 3686 struct kmem_cache *cachep; 3687 3688 /* If you want to save a few bytes .text space: replace 3689 * __ with kmem_. 3690 * Then kmalloc uses the uninlined functions instead of the inline 3691 * functions. 3692 */ 3693 cachep = __find_general_cachep(size, flags); 3694 if (unlikely(cachep == NULL)) 3695 return NULL; 3696 return __cache_alloc(cachep, flags, caller); 3697} 3698 3699 3700#ifdef CONFIG_DEBUG_SLAB 3701void *__kmalloc(size_t size, gfp_t flags) 3702{ 3703 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3704} 3705EXPORT_SYMBOL(__kmalloc); 3706 3707void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3708{ 3709 return __do_kmalloc(size, flags, caller); 3710} 3711EXPORT_SYMBOL(__kmalloc_track_caller); 3712 3713#else 3714void *__kmalloc(size_t size, gfp_t flags) 3715{ 3716 return __do_kmalloc(size, flags, NULL); 3717} 3718EXPORT_SYMBOL(__kmalloc); 3719#endif 3720 3721/** 3722 * krealloc - reallocate memory. The contents will remain unchanged. 3723 * @p: object to reallocate memory for. 3724 * @new_size: how many bytes of memory are required. 3725 * @flags: the type of memory to allocate. 3726 * 3727 * The contents of the object pointed to are preserved up to the 3728 * lesser of the new and old sizes. If @p is %NULL, krealloc() 3729 * behaves exactly like kmalloc(). If @size is 0 and @p is not a 3730 * %NULL pointer, the object pointed to is freed. 3731 */ 3732void *krealloc(const void *p, size_t new_size, gfp_t flags) 3733{ 3734 struct kmem_cache *cache, *new_cache; 3735 void *ret; 3736 3737 if (unlikely(!p)) 3738 return kmalloc_track_caller(new_size, flags); 3739 3740 if (unlikely(!new_size)) { 3741 kfree(p); 3742 return NULL; 3743 } 3744 3745 cache = virt_to_cache(p); 3746 new_cache = __find_general_cachep(new_size, flags); 3747 3748 /* 3749 * If new size fits in the current cache, bail out. 3750 */ 3751 if (likely(cache == new_cache)) 3752 return (void *)p; 3753 3754 /* 3755 * We are on the slow-path here so do not use __cache_alloc 3756 * because it bloats kernel text. 3757 */ 3758 ret = kmalloc_track_caller(new_size, flags); 3759 if (ret) { 3760 memcpy(ret, p, min(new_size, ksize(p))); 3761 kfree(p); 3762 } 3763 return ret; 3764} 3765EXPORT_SYMBOL(krealloc); 3766 3767/** 3768 * kmem_cache_free - Deallocate an object 3769 * @cachep: The cache the allocation was from. 3770 * @objp: The previously allocated object. 3771 * 3772 * Free an object which was previously allocated from this 3773 * cache. 3774 */ 3775void kmem_cache_free(struct kmem_cache *cachep, void *objp) 3776{ 3777 unsigned long flags; 3778 3779 BUG_ON(virt_to_cache(objp) != cachep); 3780 3781 local_irq_save(flags); 3782 debug_check_no_locks_freed(objp, obj_size(cachep)); 3783 __cache_free(cachep, objp); 3784 local_irq_restore(flags); 3785} 3786EXPORT_SYMBOL(kmem_cache_free); 3787 3788/** 3789 * kfree - free previously allocated memory 3790 * @objp: pointer returned by kmalloc. 3791 * 3792 * If @objp is NULL, no operation is performed. 3793 * 3794 * Don't free memory not originally allocated by kmalloc() 3795 * or you will run into trouble. 3796 */ 3797void kfree(const void *objp) 3798{ 3799 struct kmem_cache *c; 3800 unsigned long flags; 3801 3802 if (unlikely(!objp)) 3803 return; 3804 local_irq_save(flags); 3805 kfree_debugcheck(objp); 3806 c = virt_to_cache(objp); 3807 debug_check_no_locks_freed(objp, obj_size(c)); 3808 __cache_free(c, (void *)objp); 3809 local_irq_restore(flags); 3810} 3811EXPORT_SYMBOL(kfree); 3812 3813unsigned int kmem_cache_size(struct kmem_cache *cachep) 3814{ 3815 return obj_size(cachep); 3816} 3817EXPORT_SYMBOL(kmem_cache_size); 3818 3819const char *kmem_cache_name(struct kmem_cache *cachep) 3820{ 3821 return cachep->name; 3822} 3823EXPORT_SYMBOL_GPL(kmem_cache_name); 3824 3825/* 3826 * This initializes kmem_list3 or resizes varioius caches for all nodes. 3827 */ 3828static int alloc_kmemlist(struct kmem_cache *cachep) 3829{ 3830 int node; 3831 struct kmem_list3 *l3; 3832 struct array_cache *new_shared; 3833 struct array_cache **new_alien = NULL; 3834 3835 for_each_online_node(node) { 3836 3837 if (use_alien_caches) { 3838 new_alien = alloc_alien_cache(node, cachep->limit); 3839 if (!new_alien) 3840 goto fail; 3841 } 3842 3843 new_shared = NULL; 3844 if (cachep->shared) { 3845 new_shared = alloc_arraycache(node, 3846 cachep->shared*cachep->batchcount, 3847 0xbaadf00d); 3848 if (!new_shared) { 3849 free_alien_cache(new_alien); 3850 goto fail; 3851 } 3852 } 3853 3854 l3 = cachep->nodelists[node]; 3855 if (l3) { 3856 struct array_cache *shared = l3->shared; 3857 3858 spin_lock_irq(&l3->list_lock); 3859 3860 if (shared) 3861 free_block(cachep, shared->entry, 3862 shared->avail, node); 3863 3864 l3->shared = new_shared; 3865 if (!l3->alien) { 3866 l3->alien = new_alien; 3867 new_alien = NULL; 3868 } 3869 l3->free_limit = (1 + nr_cpus_node(node)) * 3870 cachep->batchcount + cachep->num; 3871 spin_unlock_irq(&l3->list_lock); 3872 kfree(shared); 3873 free_alien_cache(new_alien); 3874 continue; 3875 } 3876 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); 3877 if (!l3) { 3878 free_alien_cache(new_alien); 3879 kfree(new_shared); 3880 goto fail; 3881 } 3882 3883 kmem_list3_init(l3); 3884 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + 3885 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 3886 l3->shared = new_shared; 3887 l3->alien = new_alien; 3888 l3->free_limit = (1 + nr_cpus_node(node)) * 3889 cachep->batchcount + cachep->num; 3890 cachep->nodelists[node] = l3; 3891 } 3892 return 0; 3893 3894fail: 3895 if (!cachep->next.next) { 3896 /* Cache is not active yet. Roll back what we did */ 3897 node--; 3898 while (node >= 0) { 3899 if (cachep->nodelists[node]) { 3900 l3 = cachep->nodelists[node]; 3901 3902 kfree(l3->shared); 3903 free_alien_cache(l3->alien); 3904 kfree(l3); 3905 cachep->nodelists[node] = NULL; 3906 } 3907 node--; 3908 } 3909 } 3910 return -ENOMEM; 3911} 3912 3913struct ccupdate_struct { 3914 struct kmem_cache *cachep; 3915 struct array_cache *new[NR_CPUS]; 3916}; 3917 3918static void do_ccupdate_local(void *info) 3919{ 3920 struct ccupdate_struct *new = info; 3921 struct array_cache *old; 3922 3923 check_irq_off(); 3924 old = cpu_cache_get(new->cachep); 3925 3926 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; 3927 new->new[smp_processor_id()] = old; 3928} 3929 3930/* Always called with the cache_chain_mutex held */ 3931static int do_tune_cpucache(struct kmem_cache *cachep, int limit, 3932 int batchcount, int shared) 3933{ 3934 struct ccupdate_struct *new; 3935 int i; 3936 3937 new = kzalloc(sizeof(*new), GFP_KERNEL); 3938 if (!new) 3939 return -ENOMEM; 3940 3941 for_each_online_cpu(i) { 3942 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3943 batchcount); 3944 if (!new->new[i]) { 3945 for (i--; i >= 0; i--) 3946 kfree(new->new[i]); 3947 kfree(new); 3948 return -ENOMEM; 3949 } 3950 } 3951 new->cachep = cachep; 3952 3953 on_each_cpu(do_ccupdate_local, (void *)new, 1, 1); 3954 3955 check_irq_on(); 3956 cachep->batchcount = batchcount; 3957 cachep->limit = limit; 3958 cachep->shared = shared; 3959 3960 for_each_online_cpu(i) { 3961 struct array_cache *ccold = new->new[i]; 3962 if (!ccold) 3963 continue; 3964 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3965 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 3966 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 3967 kfree(ccold); 3968 } 3969 kfree(new); 3970 return alloc_kmemlist(cachep); 3971} 3972 3973/* Called with cache_chain_mutex held always */ 3974static int enable_cpucache(struct kmem_cache *cachep) 3975{ 3976 int err; 3977 int limit, shared; 3978 3979 /* 3980 * The head array serves three purposes: 3981 * - create a LIFO ordering, i.e. return objects that are cache-warm 3982 * - reduce the number of spinlock operations. 3983 * - reduce the number of linked list operations on the slab and 3984 * bufctl chains: array operations are cheaper. 3985 * The numbers are guessed, we should auto-tune as described by 3986 * Bonwick. 3987 */ 3988 if (cachep->buffer_size > 131072) 3989 limit = 1; 3990 else if (cachep->buffer_size > PAGE_SIZE) 3991 limit = 8; 3992 else if (cachep->buffer_size > 1024) 3993 limit = 24; 3994 else if (cachep->buffer_size > 256) 3995 limit = 54; 3996 else 3997 limit = 120; 3998 3999 /* 4000 * CPU bound tasks (e.g. network routing) can exhibit cpu bound 4001 * allocation behaviour: Most allocs on one cpu, most free operations 4002 * on another cpu. For these cases, an efficient object passing between 4003 * cpus is necessary. This is provided by a shared array. The array 4004 * replaces Bonwick's magazine layer. 4005 * On uniprocessor, it's functionally equivalent (but less efficient) 4006 * to a larger limit. Thus disabled by default. 4007 */ 4008 shared = 0; 4009 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1) 4010 shared = 8; 4011 4012#if DEBUG 4013 /* 4014 * With debugging enabled, large batchcount lead to excessively long 4015 * periods with disabled local interrupts. Limit the batchcount 4016 */ 4017 if (limit > 32) 4018 limit = 32; 4019#endif 4020 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared); 4021 if (err) 4022 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", 4023 cachep->name, -err); 4024 return err; 4025} 4026 4027/* 4028 * Drain an array if it contains any elements taking the l3 lock only if 4029 * necessary. Note that the l3 listlock also protects the array_cache 4030 * if drain_array() is used on the shared array. 4031 */ 4032void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4033 struct array_cache *ac, int force, int node) 4034{ 4035 int tofree; 4036 4037 if (!ac || !ac->avail) 4038 return; 4039 if (ac->touched && !force) { 4040 ac->touched = 0; 4041 } else { 4042 spin_lock_irq(&l3->list_lock); 4043 if (ac->avail) { 4044 tofree = force ? ac->avail : (ac->limit + 4) / 5; 4045 if (tofree > ac->avail) 4046 tofree = (ac->avail + 1) / 2; 4047 free_block(cachep, ac->entry, tofree, node); 4048 ac->avail -= tofree; 4049 memmove(ac->entry, &(ac->entry[tofree]), 4050 sizeof(void *) * ac->avail); 4051 } 4052 spin_unlock_irq(&l3->list_lock); 4053 } 4054} 4055 4056/** 4057 * cache_reap - Reclaim memory from caches. 4058 * @w: work descriptor 4059 * 4060 * Called from workqueue/eventd every few seconds. 4061 * Purpose: 4062 * - clear the per-cpu caches for this CPU. 4063 * - return freeable pages to the main free memory pool. 4064 * 4065 * If we cannot acquire the cache chain mutex then just give up - we'll try 4066 * again on the next iteration. 4067 */ 4068static void cache_reap(struct work_struct *w) 4069{ 4070 struct kmem_cache *searchp; 4071 struct kmem_list3 *l3; 4072 int node = numa_node_id(); 4073 struct delayed_work *work = 4074 container_of(w, struct delayed_work, work); 4075 4076 if (!mutex_trylock(&cache_chain_mutex)) 4077 /* Give up. Setup the next iteration. */ 4078 goto out; 4079 4080 list_for_each_entry(searchp, &cache_chain, next) { 4081 check_irq_on(); 4082 4083 /* 4084 * We only take the l3 lock if absolutely necessary and we 4085 * have established with reasonable certainty that 4086 * we can do some work if the lock was obtained. 4087 */ 4088 l3 = searchp->nodelists[node]; 4089 4090 reap_alien(searchp, l3); 4091 4092 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node); 4093 4094 /* 4095 * These are racy checks but it does not matter 4096 * if we skip one check or scan twice. 4097 */ 4098 if (time_after(l3->next_reap, jiffies)) 4099 goto next; 4100 4101 l3->next_reap = jiffies + REAPTIMEOUT_LIST3; 4102 4103 drain_array(searchp, l3, l3->shared, 0, node); 4104 4105 if (l3->free_touched) 4106 l3->free_touched = 0; 4107 else { 4108 int freed; 4109 4110 freed = drain_freelist(searchp, l3, (l3->free_limit + 4111 5 * searchp->num - 1) / (5 * searchp->num)); 4112 STATS_ADD_REAPED(searchp, freed); 4113 } 4114next: 4115 cond_resched(); 4116 } 4117 check_irq_on(); 4118 mutex_unlock(&cache_chain_mutex); 4119 next_reap_node(); 4120out: 4121 /* Set up the next iteration */ 4122 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); 4123} 4124 4125#ifdef CONFIG_PROC_FS 4126 4127static void print_slabinfo_header(struct seq_file *m) 4128{ 4129 /* 4130 * Output format version, so at least we can change it 4131 * without _too_ many complaints. 4132 */ 4133#if STATS 4134 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); 4135#else 4136 seq_puts(m, "slabinfo - version: 2.1\n"); 4137#endif 4138 seq_puts(m, "# name <active_objs> <num_objs> <objsize> " 4139 "<objperslab> <pagesperslab>"); 4140 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); 4141 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); 4142#if STATS 4143 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " 4144 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); 4145 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); 4146#endif 4147 seq_putc(m, '\n'); 4148} 4149 4150static void *s_start(struct seq_file *m, loff_t *pos) 4151{ 4152 loff_t n = *pos; 4153 struct list_head *p; 4154 4155 mutex_lock(&cache_chain_mutex); 4156 if (!n) 4157 print_slabinfo_header(m); 4158 p = cache_chain.next; 4159 while (n--) { 4160 p = p->next; 4161 if (p == &cache_chain) 4162 return NULL; 4163 } 4164 return list_entry(p, struct kmem_cache, next); 4165} 4166 4167static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4168{ 4169 struct kmem_cache *cachep = p; 4170 ++*pos; 4171 return cachep->next.next == &cache_chain ? 4172 NULL : list_entry(cachep->next.next, struct kmem_cache, next); 4173} 4174 4175static void s_stop(struct seq_file *m, void *p) 4176{ 4177 mutex_unlock(&cache_chain_mutex); 4178} 4179 4180static int s_show(struct seq_file *m, void *p) 4181{ 4182 struct kmem_cache *cachep = p; 4183 struct slab *slabp; 4184 unsigned long active_objs; 4185 unsigned long num_objs; 4186 unsigned long active_slabs = 0; 4187 unsigned long num_slabs, free_objects = 0, shared_avail = 0; 4188 const char *name; 4189 char *error = NULL; 4190 int node; 4191 struct kmem_list3 *l3; 4192 4193 active_objs = 0; 4194 num_slabs = 0; 4195 for_each_online_node(node) { 4196 l3 = cachep->nodelists[node]; 4197 if (!l3) 4198 continue; 4199 4200 check_irq_on(); 4201 spin_lock_irq(&l3->list_lock); 4202 4203 list_for_each_entry(slabp, &l3->slabs_full, list) { 4204 if (slabp->inuse != cachep->num && !error) 4205 error = "slabs_full accounting error"; 4206 active_objs += cachep->num; 4207 active_slabs++; 4208 } 4209 list_for_each_entry(slabp, &l3->slabs_partial, list) { 4210 if (slabp->inuse == cachep->num && !error) 4211 error = "slabs_partial inuse accounting error"; 4212 if (!slabp->inuse && !error) 4213 error = "slabs_partial/inuse accounting error"; 4214 active_objs += slabp->inuse; 4215 active_slabs++; 4216 } 4217 list_for_each_entry(slabp, &l3->slabs_free, list) { 4218 if (slabp->inuse && !error) 4219 error = "slabs_free/inuse accounting error"; 4220 num_slabs++; 4221 } 4222 free_objects += l3->free_objects; 4223 if (l3->shared) 4224 shared_avail += l3->shared->avail; 4225 4226 spin_unlock_irq(&l3->list_lock); 4227 } 4228 num_slabs += active_slabs; 4229 num_objs = num_slabs * cachep->num; 4230 if (num_objs - active_objs != free_objects && !error) 4231 error = "free_objects accounting error"; 4232 4233 name = cachep->name; 4234 if (error) 4235 printk(KERN_ERR "slab: cache %s error: %s\n", name, error); 4236 4237 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", 4238 name, active_objs, num_objs, cachep->buffer_size, 4239 cachep->num, (1 << cachep->gfporder)); 4240 seq_printf(m, " : tunables %4u %4u %4u", 4241 cachep->limit, cachep->batchcount, cachep->shared); 4242 seq_printf(m, " : slabdata %6lu %6lu %6lu", 4243 active_slabs, num_slabs, shared_avail); 4244#if STATS 4245 { /* list3 stats */ 4246 unsigned long high = cachep->high_mark; 4247 unsigned long allocs = cachep->num_allocations; 4248 unsigned long grown = cachep->grown; 4249 unsigned long reaped = cachep->reaped; 4250 unsigned long errors = cachep->errors; 4251 unsigned long max_freeable = cachep->max_freeable; 4252 unsigned long node_allocs = cachep->node_allocs; 4253 unsigned long node_frees = cachep->node_frees; 4254 unsigned long overflows = cachep->node_overflow; 4255 4256 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4257 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4258 reaped, errors, max_freeable, node_allocs, 4259 node_frees, overflows); 4260 } 4261 /* cpu stats */ 4262 { 4263 unsigned long allochit = atomic_read(&cachep->allochit); 4264 unsigned long allocmiss = atomic_read(&cachep->allocmiss); 4265 unsigned long freehit = atomic_read(&cachep->freehit); 4266 unsigned long freemiss = atomic_read(&cachep->freemiss); 4267 4268 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", 4269 allochit, allocmiss, freehit, freemiss); 4270 } 4271#endif 4272 seq_putc(m, '\n'); 4273 return 0; 4274} 4275 4276/* 4277 * slabinfo_op - iterator that generates /proc/slabinfo 4278 * 4279 * Output layout: 4280 * cache-name 4281 * num-active-objs 4282 * total-objs 4283 * object size 4284 * num-active-slabs 4285 * total-slabs 4286 * num-pages-per-slab 4287 * + further values on SMP and with statistics enabled 4288 */ 4289 4290const struct seq_operations slabinfo_op = { 4291 .start = s_start, 4292 .next = s_next, 4293 .stop = s_stop, 4294 .show = s_show, 4295}; 4296 4297#define MAX_SLABINFO_WRITE 128 4298/** 4299 * slabinfo_write - Tuning for the slab allocator 4300 * @file: unused 4301 * @buffer: user buffer 4302 * @count: data length 4303 * @ppos: unused 4304 */ 4305ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4306 size_t count, loff_t *ppos) 4307{ 4308 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; 4309 int limit, batchcount, shared, res; 4310 struct kmem_cache *cachep; 4311 4312 if (count > MAX_SLABINFO_WRITE) 4313 return -EINVAL; 4314 if (copy_from_user(&kbuf, buffer, count)) 4315 return -EFAULT; 4316 kbuf[MAX_SLABINFO_WRITE] = '\0'; 4317 4318 tmp = strchr(kbuf, ' '); 4319 if (!tmp) 4320 return -EINVAL; 4321 *tmp = '\0'; 4322 tmp++; 4323 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) 4324 return -EINVAL; 4325 4326 /* Find the cache in the chain of caches. */ 4327 mutex_lock(&cache_chain_mutex); 4328 res = -EINVAL; 4329 list_for_each_entry(cachep, &cache_chain, next) { 4330 if (!strcmp(cachep->name, kbuf)) { 4331 if (limit < 1 || batchcount < 1 || 4332 batchcount > limit || shared < 0) { 4333 res = 0; 4334 } else { 4335 res = do_tune_cpucache(cachep, limit, 4336 batchcount, shared); 4337 } 4338 break; 4339 } 4340 } 4341 mutex_unlock(&cache_chain_mutex); 4342 if (res >= 0) 4343 res = count; 4344 return res; 4345} 4346 4347#ifdef CONFIG_DEBUG_SLAB_LEAK 4348 4349static void *leaks_start(struct seq_file *m, loff_t *pos) 4350{ 4351 loff_t n = *pos; 4352 struct list_head *p; 4353 4354 mutex_lock(&cache_chain_mutex); 4355 p = cache_chain.next; 4356 while (n--) { 4357 p = p->next; 4358 if (p == &cache_chain) 4359 return NULL; 4360 } 4361 return list_entry(p, struct kmem_cache, next); 4362} 4363 4364static inline int add_caller(unsigned long *n, unsigned long v) 4365{ 4366 unsigned long *p; 4367 int l; 4368 if (!v) 4369 return 1; 4370 l = n[1]; 4371 p = n + 2; 4372 while (l) { 4373 int i = l/2; 4374 unsigned long *q = p + 2 * i; 4375 if (*q == v) { 4376 q[1]++; 4377 return 1; 4378 } 4379 if (*q > v) { 4380 l = i; 4381 } else { 4382 p = q + 2; 4383 l -= i + 1; 4384 } 4385 } 4386 if (++n[1] == n[0]) 4387 return 0; 4388 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); 4389 p[0] = v; 4390 p[1] = 1; 4391 return 1; 4392} 4393 4394static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s) 4395{ 4396 void *p; 4397 int i; 4398 if (n[0] == n[1]) 4399 return; 4400 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) { 4401 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE) 4402 continue; 4403 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4404 return; 4405 } 4406} 4407 4408static void show_symbol(struct seq_file *m, unsigned long address) 4409{ 4410#ifdef CONFIG_KALLSYMS 4411 unsigned long offset, size; 4412 char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1]; 4413 4414 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4415 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4416 if (modname[0]) 4417 seq_printf(m, " [%s]", modname); 4418 return; 4419 } 4420#endif 4421 seq_printf(m, "%p", (void *)address); 4422} 4423 4424static int leaks_show(struct seq_file *m, void *p) 4425{ 4426 struct kmem_cache *cachep = p; 4427 struct slab *slabp; 4428 struct kmem_list3 *l3; 4429 const char *name; 4430 unsigned long *n = m->private; 4431 int node; 4432 int i; 4433 4434 if (!(cachep->flags & SLAB_STORE_USER)) 4435 return 0; 4436 if (!(cachep->flags & SLAB_RED_ZONE)) 4437 return 0; 4438 4439 /* OK, we can do it */ 4440 4441 n[1] = 0; 4442 4443 for_each_online_node(node) { 4444 l3 = cachep->nodelists[node]; 4445 if (!l3) 4446 continue; 4447 4448 check_irq_on(); 4449 spin_lock_irq(&l3->list_lock); 4450 4451 list_for_each_entry(slabp, &l3->slabs_full, list) 4452 handle_slab(n, cachep, slabp); 4453 list_for_each_entry(slabp, &l3->slabs_partial, list) 4454 handle_slab(n, cachep, slabp); 4455 spin_unlock_irq(&l3->list_lock); 4456 } 4457 name = cachep->name; 4458 if (n[0] == n[1]) { 4459 /* Increase the buffer size */ 4460 mutex_unlock(&cache_chain_mutex); 4461 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL); 4462 if (!m->private) { 4463 /* Too bad, we are really out */ 4464 m->private = n; 4465 mutex_lock(&cache_chain_mutex); 4466 return -ENOMEM; 4467 } 4468 *(unsigned long *)m->private = n[0] * 2; 4469 kfree(n); 4470 mutex_lock(&cache_chain_mutex); 4471 /* Now make sure this entry will be retried */ 4472 m->count = m->size; 4473 return 0; 4474 } 4475 for (i = 0; i < n[1]; i++) { 4476 seq_printf(m, "%s: %lu ", name, n[2*i+3]); 4477 show_symbol(m, n[2*i+2]); 4478 seq_putc(m, '\n'); 4479 } 4480 4481 return 0; 4482} 4483 4484const struct seq_operations slabstats_op = { 4485 .start = leaks_start, 4486 .next = s_next, 4487 .stop = s_stop, 4488 .show = leaks_show, 4489}; 4490#endif 4491#endif 4492 4493/** 4494 * ksize - get the actual amount of memory allocated for a given object 4495 * @objp: Pointer to the object 4496 * 4497 * kmalloc may internally round up allocations and return more memory 4498 * than requested. ksize() can be used to determine the actual amount of 4499 * memory allocated. The caller may use this additional memory, even though 4500 * a smaller amount of memory was initially specified with the kmalloc call. 4501 * The caller must guarantee that objp points to a valid object previously 4502 * allocated with either kmalloc() or kmem_cache_alloc(). The object 4503 * must not be freed during the duration of the call. 4504 */ 4505size_t ksize(const void *objp) 4506{ 4507 if (unlikely(objp == NULL)) 4508 return 0; 4509 4510 return obj_size(virt_to_cache(objp)); 4511} 4512