Deleted Added
full compact
uma_core.c (102241) uma_core.c (103531)
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*
2 * Copyright (c) 2002, Jeffrey Roberson <jroberson@chesapeake.net>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/vm/uma_core.c 102241 2002-08-21 23:39:52Z archie $
26 * $FreeBSD: head/sys/vm/uma_core.c 103531 2002-09-18 08:26:30Z jeff $
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as

--- 105 unchanged lines hidden (view full) ---

140 uma_ctor ctor;
141 uma_dtor dtor;
142 uma_init uminit;
143 uma_fini fini;
144 int align;
145 u_int16_t flags;
146};
147
27 *
28 */
29
30/*
31 * uma_core.c Implementation of the Universal Memory allocator
32 *
33 * This allocator is intended to replace the multitude of similar object caches
34 * in the standard FreeBSD kernel. The intent is to be flexible as well as

--- 105 unchanged lines hidden (view full) ---

140 uma_ctor ctor;
141 uma_dtor dtor;
142 uma_init uminit;
143 uma_fini fini;
144 int align;
145 u_int16_t flags;
146};
147
148/*
149 * This is the malloc hash table which is used to find the zone that a
150 * malloc allocation came from. It is not currently resizeable. The
151 * memory for the actual hash bucket is allocated in kmeminit.
152 */
153struct uma_hash mhash;
154struct uma_hash *mallochash = &mhash;
155
156/* Prototypes.. */
157
158static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
159static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
160static void page_free(void *, int, u_int8_t);
161static uma_slab_t slab_zalloc(uma_zone_t, int);
162static void cache_drain(uma_zone_t);
163static void bucket_drain(uma_zone_t, uma_bucket_t);

--- 114 unchanged lines hidden (view full) ---

278 /*
279 * Expand the zone hash table.
280 *
281 * This is done if the number of slabs is larger than the hash size.
282 * What I'm trying to do here is completely reduce collisions. This
283 * may be a little aggressive. Should I allow for two collisions max?
284 */
285
148/* Prototypes.. */
149
150static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
151static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
152static void page_free(void *, int, u_int8_t);
153static uma_slab_t slab_zalloc(uma_zone_t, int);
154static void cache_drain(uma_zone_t);
155static void bucket_drain(uma_zone_t, uma_bucket_t);

--- 114 unchanged lines hidden (view full) ---

270 /*
271 * Expand the zone hash table.
272 *
273 * This is done if the number of slabs is larger than the hash size.
274 * What I'm trying to do here is completely reduce collisions. This
275 * may be a little aggressive. Should I allow for two collisions max?
276 */
277
286 if ((zone->uz_flags & UMA_ZFLAG_OFFPAGE) &&
287 !(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
288 if (zone->uz_pages / zone->uz_ppera
289 >= zone->uz_hash.uh_hashsize) {
290 struct uma_hash newhash;
291 struct uma_hash oldhash;
292 int ret;
278 if (zone->uz_flags & UMA_ZFLAG_HASH &&
279 zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
280 struct uma_hash newhash;
281 struct uma_hash oldhash;
282 int ret;
293
283
294 /*
295 * This is so involved because allocating and freeing
296 * while the zone lock is held will lead to deadlock.
297 * I have to do everything in stages and check for
298 * races.
299 */
300 newhash = zone->uz_hash;
284 /*
285 * This is so involved because allocating and freeing
286 * while the zone lock is held will lead to deadlock.
287 * I have to do everything in stages and check for
288 * races.
289 */
290 newhash = zone->uz_hash;
291 ZONE_UNLOCK(zone);
292 ret = hash_alloc(&newhash);
293 ZONE_LOCK(zone);
294 if (ret) {
295 if (hash_expand(&zone->uz_hash, &newhash)) {
296 oldhash = zone->uz_hash;
297 zone->uz_hash = newhash;
298 } else
299 oldhash = newhash;
300
301 ZONE_UNLOCK(zone);
301 ZONE_UNLOCK(zone);
302 ret = hash_alloc(&newhash);
302 hash_free(&oldhash);
303 ZONE_LOCK(zone);
303 ZONE_LOCK(zone);
304 if (ret) {
305 if (hash_expand(&zone->uz_hash, &newhash)) {
306 oldhash = zone->uz_hash;
307 zone->uz_hash = newhash;
308 } else
309 oldhash = newhash;
310
311 ZONE_UNLOCK(zone);
312 hash_free(&oldhash);
313 ZONE_LOCK(zone);
314 }
315 }
316 }
317
318 /*
319 * Here we compute the working set size as the total number of items
320 * left outstanding since the last time interval. This is slightly
321 * suboptimal. What we really want is the highest number of outstanding
322 * items during the last time quantum. This should be close enough.

--- 151 unchanged lines hidden (view full) ---

474#endif
475 bucket->ub_ptr--;
476 /*
477 * This is extremely inefficient. The slab pointer was passed
478 * to uma_zfree_arg, but we lost it because the buckets don't
479 * hold them. This will go away when free() gets a size passed
480 * to it.
481 */
304 }
305 }
306
307 /*
308 * Here we compute the working set size as the total number of items
309 * left outstanding since the last time interval. This is slightly
310 * suboptimal. What we really want is the highest number of outstanding
311 * items during the last time quantum. This should be close enough.

--- 151 unchanged lines hidden (view full) ---

463#endif
464 bucket->ub_ptr--;
465 /*
466 * This is extremely inefficient. The slab pointer was passed
467 * to uma_zfree_arg, but we lost it because the buckets don't
468 * hold them. This will go away when free() gets a size passed
469 * to it.
470 */
482 if (mzone) {
483 mtx_lock(&malloc_mtx);
484 slab = hash_sfind(mallochash,
485 (u_int8_t *)((unsigned long)item &
486 (~UMA_SLAB_MASK)));
487 mtx_unlock(&malloc_mtx);
488 }
471 if (mzone)
472 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
489 uma_zfree_internal(zone, item, slab, 1);
490 }
491}
492
493/*
494 * Drains the per cpu caches for a zone.
495 *
496 * Arguments:

--- 120 unchanged lines hidden (view full) ---

617 slab = n;
618 continue;
619 }
620
621 LIST_REMOVE(slab, us_link);
622 zone->uz_pages -= zone->uz_ppera;
623 zone->uz_free -= zone->uz_ipers;
624
473 uma_zfree_internal(zone, item, slab, 1);
474 }
475}
476
477/*
478 * Drains the per cpu caches for a zone.
479 *
480 * Arguments:

--- 120 unchanged lines hidden (view full) ---

601 slab = n;
602 continue;
603 }
604
605 LIST_REMOVE(slab, us_link);
606 zone->uz_pages -= zone->uz_ppera;
607 zone->uz_free -= zone->uz_ipers;
608
625 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
626 mtx_lock(&malloc_mtx);
627 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
628 mtx_unlock(&malloc_mtx);
629 }
630 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE &&
631 !(zone->uz_flags & UMA_ZFLAG_MALLOC))
609 if (zone->uz_flags & UMA_ZFLAG_HASH)
632 UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
633
634 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
635
636 slab = n;
637 extra--;
638 }
639finished:
640 ZONE_UNLOCK(zone);
641
642 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
643 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
644 if (zone->uz_fini)
645 for (i = 0; i < zone->uz_ipers; i++)
646 zone->uz_fini(
647 slab->us_data + (zone->uz_rsize * i),
648 zone->uz_size);
649 flags = slab->us_flags;
650 mem = slab->us_data;
610 UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
611
612 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
613
614 slab = n;
615 extra--;
616 }
617finished:
618 ZONE_UNLOCK(zone);
619
620 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
621 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
622 if (zone->uz_fini)
623 for (i = 0; i < zone->uz_ipers; i++)
624 zone->uz_fini(
625 slab->us_data + (zone->uz_rsize * i),
626 zone->uz_size);
627 flags = slab->us_flags;
628 mem = slab->us_data;
651 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE) {
629
630 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
652 uma_zfree_internal(slabzone, slab, NULL, 0);
631 uma_zfree_internal(slabzone, slab, NULL, 0);
653 }
632 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
633 for (i = 0; i < zone->uz_ppera; i++)
634 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
635 kmem_object);
654#ifdef UMA_DEBUG
655 printf("%s: Returning %d bytes.\n",
656 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
657#endif
658 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
659 }
660
661}

--- 65 unchanged lines hidden (view full) ---

727 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
728 tmps = LIST_FIRST(&uma_boot_pages);
729 LIST_REMOVE(tmps, us_link);
730 uma_boot_free--;
731 mem = tmps->us_data;
732 }
733
734 /* Point the slab into the allocated memory */
636#ifdef UMA_DEBUG
637 printf("%s: Returning %d bytes.\n",
638 zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
639#endif
640 zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
641 }
642
643}

--- 65 unchanged lines hidden (view full) ---

709 panic("UMA: Ran out of pre init pages, increase UMA_BOOT_PAGES\n");
710 tmps = LIST_FIRST(&uma_boot_pages);
711 LIST_REMOVE(tmps, us_link);
712 uma_boot_free--;
713 mem = tmps->us_data;
714 }
715
716 /* Point the slab into the allocated memory */
735 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE)) {
717 if (!(zone->uz_flags & UMA_ZFLAG_OFFPAGE))
736 slab = (uma_slab_t )(mem + zone->uz_pgoff);
718 slab = (uma_slab_t )(mem + zone->uz_pgoff);
737 }
738
719
739 if (zone->uz_flags & UMA_ZFLAG_MALLOC) {
740#ifdef UMA_DEBUG
741 printf("Inserting %p into malloc hash from slab %p\n",
742 mem, slab);
743#endif
744 mtx_lock(&malloc_mtx);
745 UMA_HASH_INSERT(mallochash, slab, mem);
746 mtx_unlock(&malloc_mtx);
747 }
720 if (zone->uz_flags & UMA_ZFLAG_MALLOC)
721 for (i = 0; i < zone->uz_ppera; i++)
722 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
748
749 slab->us_zone = zone;
750 slab->us_data = mem;
751
752 /*
753 * This is intended to spread data out across cache lines.
754 *
755 * This code doesn't seem to work properly on x86, and on alpha

--- 17 unchanged lines hidden (view full) ---

773 slab->us_freelist[i] = i+1;
774
775 if (zone->uz_init)
776 for (i = 0; i < zone->uz_ipers; i++)
777 zone->uz_init(slab->us_data + (zone->uz_rsize * i),
778 zone->uz_size);
779 ZONE_LOCK(zone);
780
723
724 slab->us_zone = zone;
725 slab->us_data = mem;
726
727 /*
728 * This is intended to spread data out across cache lines.
729 *
730 * This code doesn't seem to work properly on x86, and on alpha

--- 17 unchanged lines hidden (view full) ---

748 slab->us_freelist[i] = i+1;
749
750 if (zone->uz_init)
751 for (i = 0; i < zone->uz_ipers; i++)
752 zone->uz_init(slab->us_data + (zone->uz_rsize * i),
753 zone->uz_size);
754 ZONE_LOCK(zone);
755
781 if ((zone->uz_flags & (UMA_ZFLAG_OFFPAGE|UMA_ZFLAG_MALLOC)) ==
782 UMA_ZFLAG_OFFPAGE)
756 if (zone->uz_flags & UMA_ZFLAG_HASH)
783 UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
784
785 zone->uz_pages += zone->uz_ppera;
786 zone->uz_free += zone->uz_ipers;
787
788
789 return (slab);
790}

--- 140 unchanged lines hidden (view full) ---

931
932 /* Can we do any better? */
933 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
934 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
935 return;
936 ipers = UMA_SLAB_SIZE / zone->uz_rsize;
937 if (ipers > zone->uz_ipers) {
938 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
757 UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
758
759 zone->uz_pages += zone->uz_ppera;
760 zone->uz_free += zone->uz_ipers;
761
762
763 return (slab);
764}

--- 140 unchanged lines hidden (view full) ---

905
906 /* Can we do any better? */
907 if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
908 if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
909 return;
910 ipers = UMA_SLAB_SIZE / zone->uz_rsize;
911 if (ipers > zone->uz_ipers) {
912 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
913 if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
914 zone->uz_flags |= UMA_ZFLAG_HASH;
939 zone->uz_ipers = ipers;
940 }
941 }
942
943}
944
945/*
946 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do

--- 16 unchanged lines hidden (view full) ---

963 /* Account for remainder */
964 if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
965 pages++;
966
967 zone->uz_ppera = pages;
968 zone->uz_ipers = 1;
969
970 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
915 zone->uz_ipers = ipers;
916 }
917 }
918
919}
920
921/*
922 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do

--- 16 unchanged lines hidden (view full) ---

939 /* Account for remainder */
940 if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
941 pages++;
942
943 zone->uz_ppera = pages;
944 zone->uz_ipers = 1;
945
946 zone->uz_flags |= UMA_ZFLAG_OFFPAGE;
947 if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
948 zone->uz_flags |= UMA_ZFLAG_HASH;
949
971 zone->uz_rsize = zone->uz_size;
972}
973
974/*
975 * Zone header ctor. This initializes all fields, locks, etc. And inserts
976 * the zone onto the global zone list.
977 *
978 * Arguments/Returns follow uma_ctor specifications

--- 89 unchanged lines hidden (view full) ---

1068 + zone->uz_ipers;
1069 /* I don't think it's possible, but I'll make sure anyway */
1070 if (totsize > UMA_SLAB_SIZE) {
1071 printf("zone %s ipers %d rsize %d size %d\n",
1072 zone->uz_name, zone->uz_ipers, zone->uz_rsize,
1073 zone->uz_size);
1074 panic("UMA slab won't fit.\n");
1075 }
950 zone->uz_rsize = zone->uz_size;
951}
952
953/*
954 * Zone header ctor. This initializes all fields, locks, etc. And inserts
955 * the zone onto the global zone list.
956 *
957 * Arguments/Returns follow uma_ctor specifications

--- 89 unchanged lines hidden (view full) ---

1047 + zone->uz_ipers;
1048 /* I don't think it's possible, but I'll make sure anyway */
1049 if (totsize > UMA_SLAB_SIZE) {
1050 printf("zone %s ipers %d rsize %d size %d\n",
1051 zone->uz_name, zone->uz_ipers, zone->uz_rsize,
1052 zone->uz_size);
1053 panic("UMA slab won't fit.\n");
1054 }
1076 } else {
1077 hash_alloc(&zone->uz_hash);
1078 zone->uz_pgoff = 0;
1079 }
1080
1055 }
1056
1057 if (zone->uz_flags & UMA_ZFLAG_HASH)
1058 hash_alloc(&zone->uz_hash);
1059
1081#ifdef UMA_DEBUG
1082 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1083 zone->uz_name, zone,
1084 zone->uz_size, zone->uz_ipers,
1085 zone->uz_ppera, zone->uz_pgoff);
1086#endif
1087 ZONE_LOCK_INIT(zone, privlc);
1088

--- 159 unchanged lines hidden (view full) ---

1248
1249#ifdef UMA_DEBUG
1250 printf("UMA startup complete.\n");
1251#endif
1252}
1253
1254/* see uma.h */
1255void
1060#ifdef UMA_DEBUG
1061 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
1062 zone->uz_name, zone,
1063 zone->uz_size, zone->uz_ipers,
1064 zone->uz_ppera, zone->uz_pgoff);
1065#endif
1066 ZONE_LOCK_INIT(zone, privlc);
1067

--- 159 unchanged lines hidden (view full) ---

1227
1228#ifdef UMA_DEBUG
1229 printf("UMA startup complete.\n");
1230#endif
1231}
1232
1233/* see uma.h */
1234void
1256uma_startup2(void *hashmem, u_long elems)
1235uma_startup2(void)
1257{
1236{
1258 bzero(hashmem, elems * sizeof(void *));
1259 mallochash->uh_slab_hash = hashmem;
1260 mallochash->uh_hashsize = elems;
1261 mallochash->uh_hashmask = elems - 1;
1262 booted = 1;
1263 bucket_enable();
1264#ifdef UMA_DEBUG
1265 printf("UMA startup2 complete.\n");
1266#endif
1267}
1268
1269/*

--- 528 unchanged lines hidden (view full) ---

1798 uma_slab_t slab;
1799 u_int8_t *mem;
1800 u_int8_t freei;
1801
1802 ZONE_LOCK(zone);
1803
1804 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
1805 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1237 booted = 1;
1238 bucket_enable();
1239#ifdef UMA_DEBUG
1240 printf("UMA startup2 complete.\n");
1241#endif
1242}
1243
1244/*

--- 528 unchanged lines hidden (view full) ---

1773 uma_slab_t slab;
1774 u_int8_t *mem;
1775 u_int8_t freei;
1776
1777 ZONE_LOCK(zone);
1778
1779 if (!(zone->uz_flags & UMA_ZFLAG_MALLOC)) {
1780 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
1806 if (zone->uz_flags & UMA_ZFLAG_OFFPAGE)
1781 if (zone->uz_flags & UMA_ZFLAG_HASH)
1807 slab = hash_sfind(&zone->uz_hash, mem);
1808 else {
1809 mem += zone->uz_pgoff;
1810 slab = (uma_slab_t)mem;
1811 }
1812 } else {
1813 slab = (uma_slab_t)udata;
1814 }

--- 181 unchanged lines hidden (view full) ---

1996 u_int8_t flags;
1997
1998 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
1999 if (slab == NULL)
2000 return (NULL);
2001
2002 mem = page_alloc(NULL, size, &flags, wait);
2003 if (mem) {
1782 slab = hash_sfind(&zone->uz_hash, mem);
1783 else {
1784 mem += zone->uz_pgoff;
1785 slab = (uma_slab_t)mem;
1786 }
1787 } else {
1788 slab = (uma_slab_t)udata;
1789 }

--- 181 unchanged lines hidden (view full) ---

1971 u_int8_t flags;
1972
1973 slab = uma_zalloc_internal(slabzone, NULL, wait, NULL);
1974 if (slab == NULL)
1975 return (NULL);
1976
1977 mem = page_alloc(NULL, size, &flags, wait);
1978 if (mem) {
1979 vsetslab((vm_offset_t)mem, slab);
2004 slab->us_data = mem;
2005 slab->us_flags = flags | UMA_SLAB_MALLOC;
2006 slab->us_size = size;
1980 slab->us_data = mem;
1981 slab->us_flags = flags | UMA_SLAB_MALLOC;
1982 slab->us_size = size;
2007 mtx_lock(&malloc_mtx);
2008 UMA_HASH_INSERT(mallochash, slab, mem);
2009 mtx_unlock(&malloc_mtx);
2010 } else {
2011 uma_zfree_internal(slabzone, slab, NULL, 0);
2012 }
2013
2014
2015 return (mem);
2016}
2017
2018void
2019uma_large_free(uma_slab_t slab)
2020{
1983 } else {
1984 uma_zfree_internal(slabzone, slab, NULL, 0);
1985 }
1986
1987
1988 return (mem);
1989}
1990
1991void
1992uma_large_free(uma_slab_t slab)
1993{
2021 mtx_lock(&malloc_mtx);
2022 UMA_HASH_REMOVE(mallochash, slab, slab->us_data);
2023 mtx_unlock(&malloc_mtx);
1994 vsetobj((vm_offset_t)slab->us_data, kmem_object);
2024 page_free(slab->us_data, slab->us_size, slab->us_flags);
2025 uma_zfree_internal(slabzone, slab, NULL, 0);
2026}
2027
2028void
2029uma_print_stats(void)
2030{
2031 zone_foreach(uma_print_zone);

--- 68 unchanged lines hidden ---
1995 page_free(slab->us_data, slab->us_size, slab->us_flags);
1996 uma_zfree_internal(slabzone, slab, NULL, 0);
1997}
1998
1999void
2000uma_print_stats(void)
2001{
2002 zone_foreach(uma_print_zone);

--- 68 unchanged lines hidden ---