1/* 2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/vm/uma_core.c 103531 2002-09-18 08:26:30Z jeff $ |
27 * 28 */ 29 30/* 31 * uma_core.c Implementation of the Universal Memory allocator 32 * 33 * This allocator is intended to replace the multitude of similar object caches 34 * in the standard FreeBSD kernel. The intent is to be flexible as well as --- 105 unchanged lines hidden (view full) --- 140 uma_ctor ctor; 141 uma_dtor dtor; 142 uma_init uminit; 143 uma_fini fini; 144 int align; 145 u_int16_t flags; 146}; 147 |
148/* Prototypes.. */ 149 150static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 151static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 152static void page_free(void *, int, u_int8_t); 153static uma_slab_t slab_zalloc(uma_zone_t, int); 154static void cache_drain(uma_zone_t); 155static void bucket_drain(uma_zone_t, uma_bucket_t); --- 114 unchanged lines hidden (view full) --- 270 /* 271 * Expand the zone hash table. 272 * 273 * This is done if the number of slabs is larger than the hash size. 274 * What I'm trying to do here is completely reduce collisions. This 275 * may be a little aggressive. Should I allow for two collisions max? 276 */ 277 |
278 if (zone->uz_flags & UMA_ZFLAG_HASH && 279 zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) { 280 struct uma_hash newhash; 281 struct uma_hash oldhash; 282 int ret; |
283 |
284 /* 285 * This is so involved because allocating and freeing 286 * while the zone lock is held will lead to deadlock. 287 * I have to do everything in stages and check for 288 * races. 289 */ 290 newhash = zone->uz_hash; 291 ZONE_UNLOCK(zone); 292 ret = hash_alloc(&newhash); 293 ZONE_LOCK(zone); 294 if (ret) { 295 if (hash_expand(&zone->uz_hash, &newhash)) { 296 oldhash = zone->uz_hash; 297 zone->uz_hash = newhash; 298 } else 299 oldhash = newhash; 300 |
301 ZONE_UNLOCK(zone); |
302 hash_free(&oldhash); |
303 ZONE_LOCK(zone); |
304 } 305 } 306 307 /* 308 * Here we compute the working set size as the total number of items 309 * left outstanding since the last time interval. This is slightly 310 * suboptimal. What we really want is the highest number of outstanding 311 * items during the last time quantum. This should be close enough. --- 151 unchanged lines hidden (view full) --- 463#endif 464 bucket->ub_ptr--; 465 /* 466 * This is extremely inefficient. The slab pointer was passed 467 * to uma_zfree_arg, but we lost it because the buckets don't 468 * hold them. This will go away when free() gets a size passed 469 * to it. 470 */ |
471 if (mzone) 472 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); |
473 uma_zfree_internal(zone, item, slab, 1); 474 } 475} 476 477/* 478 * Drains the per cpu caches for a zone. 479 * 480 * Arguments: --- 120 unchanged lines hidden (view full) --- 601 slab = n; 602 continue; 603 } 604 605 LIST_REMOVE(slab, us_link); 606 zone->uz_pages -= zone->uz_ppera; 607 zone->uz_free -= zone->uz_ipers; 608 |
609 if (zone->uz_flags & UMA_ZFLAG_HASH) |
610 UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data); 611 612 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 613 614 slab = n; 615 extra--; 616 } 617finished: 618 ZONE_UNLOCK(zone); 619 620 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 621 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 622 if (zone->uz_fini) 623 for (i = 0; i < zone->uz_ipers; i++) 624 zone->uz_fini( 625 slab->us_data + (zone->uz_rsize * i), 626 zone->uz_size); 627 flags = slab->us_flags; 628 mem = slab->us_data; |
629 630 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) |
631 uma_zfree_internal(slabzone, slab, NULL, 0); |
632 if (zone->uz_flags & UMA_ZFLAG_MALLOC) 633 for (i = 0; i < zone->uz_ppera; i++) 634 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 635 kmem_object); |
636#ifdef UMA_DEBUG 637 printf("%s: Returning %d bytes.\n", 638 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera); 639#endif 640 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags); 641 } 642 643} --- 65 unchanged lines hidden (view full) --- 709 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n"); 710 tmps = LIST_FIRST(&uma_boot_pages); 711 LIST_REMOVE(tmps, us_link); 712 uma_boot_free--; 713 mem = tmps->us_data; 714 } 715 716 /* Point the slab into the allocated memory */ |
717 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) |
718 slab = (uma_slab_t )(mem + zone->uz_pgoff); |
719 |
720 if (zone->uz_flags & UMA_ZFLAG_MALLOC) 721 for (i = 0; i < zone->uz_ppera; i++) 722 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); |
723 724 slab->us_zone = zone; 725 slab->us_data = mem; 726 727 /* 728 * This is intended to spread data out across cache lines. 729 * 730 * This code doesn't seem to work properly on x86, and on alpha --- 17 unchanged lines hidden (view full) --- 748 slab->us_freelist[i] = i+1; 749 750 if (zone->uz_init) 751 for (i = 0; i < zone->uz_ipers; i++) 752 zone->uz_init(slab->us_data + (zone->uz_rsize * i), 753 zone->uz_size); 754 ZONE_LOCK(zone); 755 |
756 if (zone->uz_flags & UMA_ZFLAG_HASH) |
757 UMA_HASH_INSERT(&zone->uz_hash, slab, mem); 758 759 zone->uz_pages += zone->uz_ppera; 760 zone->uz_free += zone->uz_ipers; 761 762 763 return (slab); 764} --- 140 unchanged lines hidden (view full) --- 905 906 /* Can we do any better? */ 907 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) { 908 if (zone->uz_flags & UMA_ZFLAG_INTERNAL) 909 return; 910 ipers = UMA_SLAB_SIZE / zone->uz_rsize; 911 if (ipers > zone->uz_ipers) { 912 zone->uz_flags |= UMA_ZFLAG_OFFPAGE; |
913 if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0) 914 zone->uz_flags |= UMA_ZFLAG_HASH; |
915 zone->uz_ipers = ipers; 916 } 917 } 918 919} 920 921/* 922 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do --- 16 unchanged lines hidden (view full) --- 939 /* Account for remainder */ 940 if ((pages * UMA_SLAB_SIZE) < zone->uz_size) 941 pages++; 942 943 zone->uz_ppera = pages; 944 zone->uz_ipers = 1; 945 946 zone->uz_flags |= UMA_ZFLAG_OFFPAGE; |
947 if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0) 948 zone->uz_flags |= UMA_ZFLAG_HASH; 949 |
950 zone->uz_rsize = zone->uz_size; 951} 952 953/* 954 * Zone header ctor. This initializes all fields, locks, etc. And inserts 955 * the zone onto the global zone list. 956 * 957 * Arguments/Returns follow uma_ctor specifications --- 89 unchanged lines hidden (view full) --- 1047 + zone->uz_ipers; 1048 /* I don't think it's possible, but I'll make sure anyway */ 1049 if (totsize > UMA_SLAB_SIZE) { 1050 printf("zone %s ipers %d rsize %d size %d\n", 1051 zone->uz_name, zone->uz_ipers, zone->uz_rsize, 1052 zone->uz_size); 1053 panic("UMA slab won't fit.\n"); 1054 } |
1055 } 1056 |
1057 if (zone->uz_flags & UMA_ZFLAG_HASH) 1058 hash_alloc(&zone->uz_hash); 1059 |
1060#ifdef UMA_DEBUG 1061 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1062 zone->uz_name, zone, 1063 zone->uz_size, zone->uz_ipers, 1064 zone->uz_ppera, zone->uz_pgoff); 1065#endif 1066 ZONE_LOCK_INIT(zone, privlc); 1067 --- 159 unchanged lines hidden (view full) --- 1227 1228#ifdef UMA_DEBUG 1229 printf("UMA startup complete.\n"); 1230#endif 1231} 1232 1233/* see uma.h */ 1234void |
1235uma_startup2(void) |
1236{ |
1237 booted = 1; 1238 bucket_enable(); 1239#ifdef UMA_DEBUG 1240 printf("UMA startup2 complete.\n"); 1241#endif 1242} 1243 1244/* --- 528 unchanged lines hidden (view full) --- 1773 uma_slab_t slab; 1774 u_int8_t *mem; 1775 u_int8_t freei; 1776 1777 ZONE_LOCK(zone); 1778 1779 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) { 1780 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); |
1781 if (zone->uz_flags & UMA_ZFLAG_HASH) |
1782 slab = hash_sfind(&zone->uz_hash, mem); 1783 else { 1784 mem += zone->uz_pgoff; 1785 slab = (uma_slab_t)mem; 1786 } 1787 } else { 1788 slab = (uma_slab_t)udata; 1789 } --- 181 unchanged lines hidden (view full) --- 1971 u_int8_t flags; 1972 1973 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL); 1974 if (slab == NULL) 1975 return (NULL); 1976 1977 mem = page_alloc(NULL, size, &flags, wait); 1978 if (mem) { |
1979 vsetslab((vm_offset_t)mem, slab); |
1980 slab->us_data = mem; 1981 slab->us_flags = flags | UMA_SLAB_MALLOC; 1982 slab->us_size = size; |
1983 } else { 1984 uma_zfree_internal(slabzone, slab, NULL, 0); 1985 } 1986 1987 1988 return (mem); 1989} 1990 1991void 1992uma_large_free(uma_slab_t slab) 1993{ |
1994 vsetobj((vm_offset_t)slab->us_data, kmem_object); |
1995 page_free(slab->us_data, slab->us_size, slab->us_flags); 1996 uma_zfree_internal(slabzone, slab, NULL, 0); 1997} 1998 1999void 2000uma_print_stats(void) 2001{ 2002 zone_foreach(uma_print_zone); --- 68 unchanged lines hidden --- |