1/* 2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/vm/uma_core.c 94161 2002-04-08 04:48:58Z jeff $ |
27 * 28 */ 29 30/* 31 * uma_core.c Implementation of the Universal Memory allocator 32 * 33 * This allocator is intended to replace the multitude of similar object caches 34 * in the standard FreeBSD kernel. The intent is to be flexible as well as --- 117 unchanged lines hidden (view full) --- 152static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 153static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 154static void page_free(void *, int, u_int8_t); 155static uma_slab_t slab_zalloc(uma_zone_t, int); 156static void cache_drain(uma_zone_t); 157static void bucket_drain(uma_zone_t, uma_bucket_t); 158static void zone_drain(uma_zone_t); 159static void zone_ctor(void *, int, void *); |
160static void zone_dtor(void *, int, void *); |
161static void zero_init(void *, int); 162static void zone_small_init(uma_zone_t zone); 163static void zone_large_init(uma_zone_t zone); 164static void zone_foreach(void (*zfunc)(uma_zone_t)); 165static void zone_timeout(uma_zone_t zone); 166static void hash_expand(struct uma_hash *); |
167static void hash_free(struct uma_hash *hash); |
168static void uma_timeout(void *); 169static void uma_startup3(void); 170static void *uma_zalloc_internal(uma_zone_t, void *, int, uma_bucket_t); 171static void uma_zfree_internal(uma_zone_t, 172 void *, void *, int); 173void uma_print_zone(uma_zone_t); 174void uma_print_stats(void); 175static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS); --- 123 unchanged lines hidden (view full) --- 299 * Discussion: 300 */ 301static void 302hash_expand(struct uma_hash *hash) 303{ 304 struct slabhead *newhash; 305 struct slabhead *oldhash; 306 uma_slab_t slab; |
307 int oldsize; 308 int newsize; |
309 int alloc; 310 int hval; 311 int i; 312 313 314 /* 315 * Remember the old hash size and see if it has to go back to the 316 * hash zone, or malloc. The hash zone is used for the initial hash 317 */ 318 |
319 oldsize = hash->uh_hashsize; |
320 oldhash = hash->uh_slab_hash; 321 |
322 /* We're just going to go to a power of two greater */ 323 if (hash->uh_hashsize) { |
324 newsize = oldsize * 2; 325 alloc = sizeof(hash->uh_slab_hash[0]) * newsize; |
326 /* XXX Shouldn't be abusing DEVBUF here */ 327 newhash = (struct slabhead *)malloc(alloc, M_DEVBUF, M_NOWAIT); 328 if (newhash == NULL) { 329 return; 330 } |
331 } else { 332 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 333 newhash = uma_zalloc_internal(hashzone, NULL, M_WAITOK, NULL); |
334 newsize = UMA_HASH_SIZE_INIT; |
335 } 336 337 bzero(newhash, alloc); 338 |
339 hash->uh_hashmask = newsize - 1; |
340 341 /* 342 * I need to investigate hash algorithms for resizing without a 343 * full rehash. 344 */ 345 |
346 for (i = 0; i < oldsize; i++) |
347 while (!SLIST_EMPTY(&hash->uh_slab_hash[i])) { 348 slab = SLIST_FIRST(&hash->uh_slab_hash[i]); 349 SLIST_REMOVE_HEAD(&hash->uh_slab_hash[i], us_hlink); 350 hval = UMA_HASH(hash, slab->us_data); 351 SLIST_INSERT_HEAD(&newhash[hval], slab, us_hlink); 352 } 353 |
354 if (oldhash) 355 hash_free(hash); 356 |
357 hash->uh_slab_hash = newhash; |
358 hash->uh_hashsize = newsize; |
359 360 return; 361} 362 |
363static void 364hash_free(struct uma_hash *hash) 365{ 366 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 367 uma_zfree_internal(hashzone, 368 hash->uh_slab_hash, NULL, 0); 369 else 370 free(hash->uh_slab_hash, M_DEVBUF); 371 372 hash->uh_slab_hash = NULL; 373} 374 |
375/* 376 * Frees all outstanding items in a bucket 377 * 378 * Arguments: 379 * zone The zone to free to, must be unlocked. 380 * bucket The free/alloc bucket with items, cpu queue must be locked. 381 * 382 * Returns: --- 110 unchanged lines hidden (view full) --- 493} 494 495/* 496 * Frees pages from a zone back to the system. This is done on demand from 497 * the pageout daemon. 498 * 499 * Arguments: 500 * zone The zone to free pages from |
501 * all Should we drain all items? |
502 * 503 * Returns: 504 * Nothing. 505 */ 506static void 507zone_drain(uma_zone_t zone) 508{ 509 uma_slab_t slab; --- 16 unchanged lines hidden (view full) --- 526 cache_drain(zone); 527 528 if (zone->uz_free < zone->uz_wssize) 529 goto finished; 530#ifdef UMA_DEBUG 531 printf("%s working set size: %llu free items: %u\n", 532 zone->uz_name, (unsigned long long)zone->uz_wssize, zone->uz_free); 533#endif |
534 extra = zone->uz_free - zone->uz_wssize; |
535 extra /= zone->uz_ipers; 536 537 /* extra is now the number of extra slabs that we can free */ 538 539 if (extra == 0) 540 goto finished; 541 542 slab = LIST_FIRST(&zone->uz_free_slab); --- 471 unchanged lines hidden (view full) --- 1014 zone->uz_count = zone->uz_ipers - 1; 1015 else 1016 zone->uz_count = UMA_BUCKET_SIZE - 1; 1017 1018 for (cpu = 0; cpu < maxcpu; cpu++) 1019 CPU_LOCK_INIT(zone, cpu); 1020} 1021 |
1022/* 1023 * Zone header dtor. This frees all data, destroys locks, frees the hash table 1024 * and removes the zone from the global list. 1025 * 1026 * Arguments/Returns follow uma_dtor specifications 1027 * udata unused 1028 */ 1029 1030static void 1031zone_dtor(void *arg, int size, void *udata) 1032{ 1033 uma_zone_t zone; 1034 int cpu; 1035 1036 zone = (uma_zone_t)arg; 1037 1038 mtx_lock(&uma_mtx); 1039 LIST_REMOVE(zone, uz_link); 1040 mtx_unlock(&uma_mtx); 1041 1042 ZONE_LOCK(zone); 1043 zone->uz_wssize = 0; 1044 ZONE_UNLOCK(zone); 1045 1046 zone_drain(zone); 1047 ZONE_LOCK(zone); 1048 if (zone->uz_free != 0) 1049 printf("Zone %s was not empty. Lost %d pages of memory.\n", 1050 zone->uz_name, zone->uz_pages); 1051 1052 if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) != 0) 1053 for (cpu = 0; cpu < maxcpu; cpu++) 1054 CPU_LOCK_FINI(zone, cpu); 1055 1056 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) != 0) 1057 hash_free(&zone->uz_hash); 1058 1059 ZONE_UNLOCK(zone); 1060 ZONE_LOCK_FINI(zone); 1061} |
1062/* 1063 * Traverses every zone in the system and calls a callback 1064 * 1065 * Arguments: 1066 * zfunc A pointer to a function which accepts a zone 1067 * as an argument. 1068 * 1069 * Returns: --- 34 unchanged lines hidden (view full) --- 1104 Debugger("stop"); 1105#endif 1106 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF); 1107 /* "manually" Create the initial zone */ 1108 args.name = "UMA Zones"; 1109 args.size = sizeof(struct uma_zone) + 1110 (sizeof(struct uma_cache) * (maxcpu - 1)); 1111 args.ctor = zone_ctor; |
1112 args.dtor = zone_dtor; |
1113 args.uminit = zero_init; 1114 args.fini = NULL; 1115 args.align = 32 - 1; 1116 args.flags = UMA_ZONE_INTERNAL; 1117 /* The initial zone has no Per cpu queues so it's smaller */ 1118 zone_ctor(zones, sizeof(struct uma_zone), &args); 1119 1120#ifdef UMA_DEBUG --- 92 unchanged lines hidden (view full) --- 1213 args.fini = fini; 1214 args.align = align; 1215 args.flags = flags; 1216 1217 return (uma_zalloc_internal(zones, &args, M_WAITOK, NULL)); 1218} 1219 1220/* See uma.h */ |
1221void 1222uma_zdestroy(uma_zone_t zone) 1223{ 1224 uma_zfree_internal(zones, zone, NULL, 0); 1225} 1226 1227/* See uma.h */ |
1228void * 1229uma_zalloc_arg(uma_zone_t zone, void *udata, int wait) 1230{ 1231 void *item; 1232 uma_cache_t cache; 1233 uma_bucket_t bucket; 1234 int cpu; 1235 --- 706 unchanged lines hidden --- |