1/*- |
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org> |
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> 4 * Copyright (c) 2004-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright --- 32 unchanged lines hidden (view full) --- 43 44/* 45 * TODO: 46 * - Improve memory usage for large allocations 47 * - Investigate cache size adjustments 48 */ 49 50#include <sys/cdefs.h> |
51__FBSDID("$FreeBSD: head/sys/vm/uma_core.c 187681 2009-01-25 09:11:24Z jeff $"); |
52 53/* I should really use ktr.. */ 54/* 55#define UMA_DEBUG 1 56#define UMA_DEBUG_ALLOC 1 57#define UMA_DEBUG_ALLOC_1 1 58*/ 59 --- 47 unchanged lines hidden (view full) --- 107 108/* 109 * The initial hash tables come out of this zone so they can be allocated 110 * prior to malloc coming up. 111 */ 112static uma_zone_t hashzone; 113 114/* The boot-time adjusted value for cache line alignment. */ |
115static int uma_align_cache = 64 - 1; |
116 117static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); 118 119/* 120 * Are we allowed to allocate buckets? 121 */ 122static int bucketdisable = 1; 123 --- 83 unchanged lines hidden (view full) --- 207#define ZFREE_STATFREE 0x00000002 /* Update zone free statistic. */ 208 209/* Prototypes.. */ 210 211static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); 212static void *page_alloc(uma_zone_t, int, u_int8_t *, int); 213static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); 214static void page_free(void *, int, u_int8_t); |
215static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int); |
216static void cache_drain(uma_zone_t); 217static void bucket_drain(uma_zone_t, uma_bucket_t); 218static void bucket_cache_drain(uma_zone_t zone); 219static int keg_ctor(void *, int, void *, int); 220static void keg_dtor(void *, int, void *); 221static int zone_ctor(void *, int, void *, int); 222static void zone_dtor(void *, int, void *); 223static int zero_init(void *, int, int); |
224static void keg_small_init(uma_keg_t keg); 225static void keg_large_init(uma_keg_t keg); |
226static void zone_foreach(void (*zfunc)(uma_zone_t)); 227static void zone_timeout(uma_zone_t zone); 228static int hash_alloc(struct uma_hash *); 229static int hash_expand(struct uma_hash *, struct uma_hash *); 230static void hash_free(struct uma_hash *hash); 231static void uma_timeout(void *); 232static void uma_startup3(void); |
233static void *zone_alloc_item(uma_zone_t, void *, int); 234static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip, |
235 int); 236static void bucket_enable(void); 237static void bucket_init(void); 238static uma_bucket_t bucket_alloc(int, int); 239static void bucket_free(uma_bucket_t); 240static void bucket_zone_drain(void); |
241static int zone_alloc_bucket(uma_zone_t zone, int flags); 242static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags); 243static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags); 244static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab); 245static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, |
246 uma_fini fini, int align, u_int32_t flags); |
247static inline void zone_relock(uma_zone_t zone, uma_keg_t keg); 248static inline void keg_relock(uma_keg_t keg, uma_zone_t zone); |
249 250void uma_print_zone(uma_zone_t); 251void uma_print_stats(void); 252static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); 253static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); 254 255SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); 256 --- 32 unchanged lines hidden (view full) --- 289 290 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 291 int size; 292 293 ubz = &bucket_zones[j]; 294 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 295 size += sizeof(void *) * ubz->ubz_entries; 296 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, |
297 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 298 UMA_ZFLAG_INTERNAL | UMA_ZFLAG_BUCKET); |
299 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 300 bucket_size[i >> BUCKET_SHIFT] = j; 301 } 302} 303 304/* 305 * Given a desired number of entries for a bucket, return the zone from which 306 * to allocate the bucket. --- 18 unchanged lines hidden (view full) --- 325 * running out of vm.boot_pages. Otherwise, we would exhaust the 326 * boot pages. This also prevents us from allocating buckets in 327 * low memory situations. 328 */ 329 if (bucketdisable) 330 return (NULL); 331 332 ubz = bucket_zone_lookup(entries); |
333 bucket = zone_alloc_item(ubz->ubz_zone, NULL, bflags); |
334 if (bucket) { 335#ifdef INVARIANTS 336 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 337#endif 338 bucket->ub_cnt = 0; 339 bucket->ub_entries = ubz->ubz_entries; 340 } 341 342 return (bucket); 343} 344 345static void 346bucket_free(uma_bucket_t bucket) 347{ 348 struct uma_bucket_zone *ubz; 349 350 ubz = bucket_zone_lookup(bucket->ub_entries); |
351 zone_free_item(ubz->ubz_zone, bucket, NULL, SKIP_NONE, |
352 ZFREE_STATFREE); 353} 354 355static void 356bucket_zone_drain(void) 357{ 358 struct uma_bucket_zone *ubz; 359 360 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 361 zone_drain(ubz->ubz_zone); 362} 363 |
364static inline uma_keg_t 365zone_first_keg(uma_zone_t zone) 366{ |
367 |
368 return (LIST_FIRST(&zone->uz_kegs)->kl_keg); 369} 370 371static void 372zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) 373{ 374 uma_klink_t klink; 375 376 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) 377 kegfn(klink->kl_keg); 378} 379 |
380/* 381 * Routine called by timeout which is used to fire off some time interval 382 * based calculations. (stats, hash size, etc.) 383 * 384 * Arguments: 385 * arg Unused 386 * 387 * Returns: --- 8 unchanged lines hidden (view full) --- 396 /* Reschedule this event */ 397 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 398} 399 400/* 401 * Routine to perform timeout driven calculations. This expands the 402 * hashes and does per cpu statistics aggregation. 403 * |
404 * Returns nothing. |
405 */ 406static void |
407keg_timeout(uma_keg_t keg) |
408{ |
409 |
410 KEG_LOCK(keg); |
411 /* |
412 * Expand the keg hash table. |
413 * 414 * This is done if the number of slabs is larger than the hash size. 415 * What I'm trying to do here is completely reduce collisions. This 416 * may be a little aggressive. Should I allow for two collisions max? 417 */ |
418 if (keg->uk_flags & UMA_ZONE_HASH && 419 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 420 struct uma_hash newhash; 421 struct uma_hash oldhash; 422 int ret; 423 424 /* 425 * This is so involved because allocating and freeing |
426 * while the keg lock is held will lead to deadlock. |
427 * I have to do everything in stages and check for 428 * races. 429 */ 430 newhash = keg->uk_hash; |
431 KEG_UNLOCK(keg); |
432 ret = hash_alloc(&newhash); |
433 KEG_LOCK(keg); |
434 if (ret) { 435 if (hash_expand(&keg->uk_hash, &newhash)) { 436 oldhash = keg->uk_hash; 437 keg->uk_hash = newhash; 438 } else 439 oldhash = newhash; 440 |
441 KEG_UNLOCK(keg); |
442 hash_free(&oldhash); |
443 KEG_LOCK(keg); |
444 } 445 } |
446 KEG_UNLOCK(keg); |
447} 448 |
449static void 450zone_timeout(uma_zone_t zone) 451{ 452 453 zone_foreach_keg(zone, &keg_timeout); 454} 455 |
456/* 457 * Allocate and zero fill the next sized hash table from the appropriate 458 * backing store. 459 * 460 * Arguments: 461 * hash A new hash structure with the old hash size in uh_hashsize 462 * 463 * Returns: --- 10 unchanged lines hidden (view full) --- 474 /* We're just going to go to a power of two greater */ 475 if (oldsize) { 476 hash->uh_hashsize = oldsize * 2; 477 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 478 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 479 M_UMAHASH, M_NOWAIT); 480 } else { 481 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; |
482 hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, |
483 M_WAITOK); 484 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 485 } 486 if (hash->uh_slab_hash) { 487 bzero(hash->uh_slab_hash, alloc); 488 hash->uh_hashmask = hash->uh_hashsize - 1; 489 return (1); 490 } --- 56 unchanged lines hidden (view full) --- 547 * Nothing 548 */ 549static void 550hash_free(struct uma_hash *hash) 551{ 552 if (hash->uh_slab_hash == NULL) 553 return; 554 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) |
555 zone_free_item(hashzone, |
556 hash->uh_slab_hash, NULL, SKIP_NONE, ZFREE_STATFREE); 557 else 558 free(hash->uh_slab_hash, M_UMAHASH); 559} 560 561/* 562 * Frees all outstanding items in a bucket 563 * 564 * Arguments: 565 * zone The zone to free to, must be unlocked. 566 * bucket The free/alloc bucket with items, cpu queue must be locked. 567 * 568 * Returns: 569 * Nothing 570 */ 571 572static void 573bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 574{ |
575 void *item; 576 577 if (bucket == NULL) 578 return; 579 |
580 while (bucket->ub_cnt > 0) { 581 bucket->ub_cnt--; 582 item = bucket->ub_bucket[bucket->ub_cnt]; 583#ifdef INVARIANTS 584 bucket->ub_bucket[bucket->ub_cnt] = NULL; 585 KASSERT(item != NULL, 586 ("bucket_drain: botched ptr, item is NULL")); 587#endif |
588 zone_free_item(zone, item, NULL, SKIP_DTOR, 0); |
589 } 590} 591 592/* 593 * Drains the per cpu caches for a zone. 594 * 595 * NOTE: This may only be called while the zone is being turn down, and not 596 * during normal operation. This is necessary in order that we do not have --- 63 unchanged lines hidden (view full) --- 660 /* Now we do the free queue.. */ 661 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 662 LIST_REMOVE(bucket, ub_link); 663 bucket_free(bucket); 664 } 665} 666 667/* |
668 * Frees pages from a keg back to the system. This is done on demand from |
669 * the pageout daemon. 670 * |
671 * Returns nothing. |
672 */ |
673static void 674keg_drain(uma_keg_t keg) |
675{ 676 struct slabhead freeslabs = { 0 }; |
677 uma_slab_t slab; 678 uma_slab_t n; 679 u_int8_t flags; 680 u_int8_t *mem; 681 int i; 682 |
683 /* |
684 * We don't want to take pages from statically allocated kegs at this |
685 * time 686 */ 687 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 688 return; 689 |
690#ifdef UMA_DEBUG |
691 printf("%s free items: %u\n", keg->uk_name, keg->uk_free); |
692#endif |
693 KEG_LOCK(keg); |
694 if (keg->uk_free == 0) 695 goto finished; 696 697 slab = LIST_FIRST(&keg->uk_free_slab); 698 while (slab) { 699 n = LIST_NEXT(slab, us_link); 700 701 /* We have no where to free these to */ --- 9 unchanged lines hidden (view full) --- 711 if (keg->uk_flags & UMA_ZONE_HASH) 712 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 713 714 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 715 716 slab = n; 717 } 718finished: |
719 KEG_UNLOCK(keg); |
720 721 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 722 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 723 if (keg->uk_fini) 724 for (i = 0; i < keg->uk_ipers; i++) 725 keg->uk_fini( 726 slab->us_data + (keg->uk_rsize * i), 727 keg->uk_size); 728 flags = slab->us_flags; 729 mem = slab->us_data; 730 |
731 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { |
732 vm_object_t obj; 733 734 if (flags & UMA_SLAB_KMEM) 735 obj = kmem_object; 736 else if (flags & UMA_SLAB_KERNEL) 737 obj = kernel_object; 738 else 739 obj = NULL; 740 for (i = 0; i < keg->uk_ppera; i++) 741 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 742 obj); 743 } 744 if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
745 zone_free_item(keg->uk_slabzone, slab, NULL, |
746 SKIP_NONE, ZFREE_STATFREE); 747#ifdef UMA_DEBUG 748 printf("%s: Returning %d bytes.\n", |
749 keg->uk_name, UMA_SLAB_SIZE * keg->uk_ppera); |
750#endif 751 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 752 } 753} 754 |
755static void 756zone_drain_wait(uma_zone_t zone, int waitok) 757{ 758 759 /* 760 * Set draining to interlock with zone_dtor() so we can release our 761 * locks as we go. Only dtor() should do a WAITOK call since it 762 * is the only call that knows the structure will still be available 763 * when it wakes up. 764 */ 765 ZONE_LOCK(zone); 766 while (zone->uz_flags & UMA_ZFLAG_DRAINING) { 767 if (waitok == M_NOWAIT) 768 goto out; 769 mtx_unlock(&uma_mtx); 770 msleep(zone, zone->uz_lock, PVM, "zonedrain", 1); 771 mtx_lock(&uma_mtx); 772 } 773 zone->uz_flags |= UMA_ZFLAG_DRAINING; 774 bucket_cache_drain(zone); 775 ZONE_UNLOCK(zone); 776 /* 777 * The DRAINING flag protects us from being freed while 778 * we're running. Normally the uma_mtx would protect us but we 779 * must be able to release and acquire the right lock for each keg. 780 */ 781 zone_foreach_keg(zone, &keg_drain); 782 ZONE_LOCK(zone); 783 zone->uz_flags &= ~UMA_ZFLAG_DRAINING; 784 wakeup(zone); 785out: 786 ZONE_UNLOCK(zone); 787} 788 789void 790zone_drain(uma_zone_t zone) 791{ 792 793 zone_drain_wait(zone, M_NOWAIT); 794} 795 |
796/* |
797 * Allocate a new slab for a keg. This does not insert the slab onto a list. |
798 * 799 * Arguments: |
800 * wait Shall we wait? 801 * 802 * Returns: 803 * The slab that was allocated or NULL if there is no memory and the 804 * caller specified M_NOWAIT. 805 */ 806static uma_slab_t |
807keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait) |
808{ 809 uma_slabrefcnt_t slabref; |
810 uma_alloc allocf; |
811 uma_slab_t slab; |
812 u_int8_t *mem; 813 u_int8_t flags; 814 int i; 815 |
816 mtx_assert(&keg->uk_lock, MA_OWNED); |
817 slab = NULL; |
818 819#ifdef UMA_DEBUG |
820 printf("slab_zalloc: Allocating a new slab for %s\n", keg->uk_name); |
821#endif |
822 allocf = keg->uk_allocf; 823 KEG_UNLOCK(keg); |
824 825 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { |
826 slab = zone_alloc_item(keg->uk_slabzone, NULL, wait); |
827 if (slab == NULL) { |
828 KEG_LOCK(keg); |
829 return NULL; 830 } 831 } 832 833 /* 834 * This reproduces the old vm_zone behavior of zero filling pages the 835 * first time they are added to a zone. 836 * 837 * Malloced items are zeroed in uma_zalloc. 838 */ 839 840 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 841 wait |= M_ZERO; 842 else 843 wait &= ~M_ZERO; 844 |
845 /* zone is passed for legacy reasons. */ 846 mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait); |
847 if (mem == NULL) { 848 if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
849 zone_free_item(keg->uk_slabzone, slab, NULL, |
850 SKIP_NONE, ZFREE_STATFREE); |
851 KEG_LOCK(keg); |
852 return (NULL); 853 } 854 855 /* Point the slab into the allocated memory */ 856 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 857 slab = (uma_slab_t )(mem + keg->uk_pgoff); 858 |
859 if (keg->uk_flags & UMA_ZONE_VTOSLAB) |
860 for (i = 0; i < keg->uk_ppera; i++) 861 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 862 863 slab->us_keg = keg; 864 slab->us_data = mem; 865 slab->us_freecount = keg->uk_ipers; 866 slab->us_firstfree = 0; 867 slab->us_flags = flags; --- 16 unchanged lines hidden (view full) --- 884 break; 885 if (i != keg->uk_ipers) { 886 if (keg->uk_fini != NULL) { 887 for (i--; i > -1; i--) 888 keg->uk_fini(slab->us_data + 889 (keg->uk_rsize * i), 890 keg->uk_size); 891 } |
892 if (keg->uk_flags & UMA_ZONE_VTOSLAB) { |
893 vm_object_t obj; 894 895 if (flags & UMA_SLAB_KMEM) 896 obj = kmem_object; 897 else if (flags & UMA_SLAB_KERNEL) 898 obj = kernel_object; 899 else 900 obj = NULL; 901 for (i = 0; i < keg->uk_ppera; i++) 902 vsetobj((vm_offset_t)mem + 903 (i * PAGE_SIZE), obj); 904 } 905 if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
906 zone_free_item(keg->uk_slabzone, slab, |
907 NULL, SKIP_NONE, ZFREE_STATFREE); 908 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 909 flags); |
910 KEG_LOCK(keg); |
911 return (NULL); 912 } 913 } |
914 KEG_LOCK(keg); |
915 916 if (keg->uk_flags & UMA_ZONE_HASH) 917 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 918 919 keg->uk_pages += keg->uk_ppera; 920 keg->uk_free += keg->uk_ipers; 921 922 return (slab); --- 5 unchanged lines hidden (view full) --- 928 * the VM is ready. 929 */ 930static void * 931startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 932{ 933 uma_keg_t keg; 934 uma_slab_t tmps; 935 |
936 keg = zone_first_keg(zone); |
937 938 /* 939 * Check our small startup cache to see if it has pages remaining. 940 */ 941 mtx_lock(&uma_boot_pages_mtx); 942 if ((tmps = LIST_FIRST(&uma_boot_pages)) != NULL) { 943 LIST_REMOVE(tmps, us_link); 944 mtx_unlock(&uma_boot_pages_mtx); --- 13 unchanged lines hidden (view full) --- 958#endif 959 return keg->uk_allocf(zone, bytes, pflag, wait); 960} 961 962/* 963 * Allocates a number of pages from the system 964 * 965 * Arguments: |
966 * bytes The number of bytes requested 967 * wait Shall we wait? 968 * 969 * Returns: 970 * A pointer to the alloced memory or possibly 971 * NULL if M_NOWAIT is set. 972 */ 973static void * --- 6 unchanged lines hidden (view full) --- 980 981 return (p); 982} 983 984/* 985 * Allocates a number of pages from within an object 986 * 987 * Arguments: |
988 * bytes The number of bytes requested 989 * wait Shall we wait? 990 * 991 * Returns: 992 * A pointer to the alloced memory or possibly 993 * NULL if M_NOWAIT is set. 994 */ 995static void * 996obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 997{ 998 vm_object_t object; 999 vm_offset_t retkva, zkva; 1000 vm_page_t p; 1001 int pages, startpages; |
1002 uma_keg_t keg; |
1003 |
1004 keg = zone_first_keg(zone); 1005 object = keg->uk_obj; |
1006 retkva = 0; 1007 1008 /* 1009 * This looks a little weird since we're getting one page at a time. 1010 */ 1011 VM_OBJECT_LOCK(object); 1012 p = TAILQ_LAST(&object->memq, pglist); 1013 pages = p != NULL ? p->pindex + 1 : 0; 1014 startpages = pages; |
1015 zkva = keg->uk_kva + pages * PAGE_SIZE; |
1016 for (; bytes > 0; bytes -= PAGE_SIZE) { 1017 p = vm_page_alloc(object, pages, 1018 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 1019 if (p == NULL) { 1020 if (pages != startpages) 1021 pmap_qremove(retkva, pages - startpages); 1022 while (pages != startpages) { 1023 pages--; --- 51 unchanged lines hidden (view full) --- 1075static int 1076zero_init(void *mem, int size, int flags) 1077{ 1078 bzero(mem, size); 1079 return (0); 1080} 1081 1082/* |
1083 * Finish creating a small uma keg. This calculates ipers, and the keg size. |
1084 * 1085 * Arguments |
1086 * keg The zone we should initialize |
1087 * 1088 * Returns 1089 * Nothing 1090 */ 1091static void |
1092keg_small_init(uma_keg_t keg) |
1093{ |
1094 u_int rsize; 1095 u_int memused; 1096 u_int wastedspace; 1097 u_int shsize; 1098 |
1099 KASSERT(keg != NULL, ("Keg is null in keg_small_init")); |
1100 rsize = keg->uk_size; 1101 1102 if (rsize < UMA_SMALLEST_UNIT) 1103 rsize = UMA_SMALLEST_UNIT; 1104 if (rsize & keg->uk_align) 1105 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1106 1107 keg->uk_rsize = rsize; 1108 keg->uk_ppera = 1; 1109 1110 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1111 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1112 shsize = sizeof(struct uma_slab_refcnt); 1113 } else { 1114 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1115 shsize = sizeof(struct uma_slab); 1116 } 1117 1118 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; |
1119 KASSERT(keg->uk_ipers != 0, ("keg_small_init: ipers is 0")); |
1120 memused = keg->uk_ipers * rsize + shsize; 1121 wastedspace = UMA_SLAB_SIZE - memused; 1122 1123 /* 1124 * We can't do OFFPAGE if we're internal or if we've been 1125 * asked to not go to the VM for buckets. If we do this we 1126 * may end up going to the VM (kmem_map) for slabs which we 1127 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1128 * result of UMA_ZONE_VM, which clearly forbids it. 1129 */ 1130 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1131 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1132 return; 1133 1134 if ((wastedspace >= UMA_MAX_WASTE) && 1135 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1136 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1137 KASSERT(keg->uk_ipers <= 255, |
1138 ("keg_small_init: keg->uk_ipers too high!")); |
1139#ifdef UMA_DEBUG 1140 printf("UMA decided we need offpage slab headers for " |
1141 "keg: %s, calculated wastedspace = %d, " |
1142 "maximum wasted space allowed = %d, " 1143 "calculated ipers = %d, " |
1144 "new wasted space = %d\n", keg->uk_name, wastedspace, |
1145 UMA_MAX_WASTE, keg->uk_ipers, 1146 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1147#endif 1148 keg->uk_flags |= UMA_ZONE_OFFPAGE; |
1149 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) |
1150 keg->uk_flags |= UMA_ZONE_HASH; 1151 } 1152} 1153 1154/* |
1155 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do |
1156 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1157 * more complicated. 1158 * 1159 * Arguments |
1160 * keg The keg we should initialize |
1161 * 1162 * Returns 1163 * Nothing 1164 */ 1165static void |
1166keg_large_init(uma_keg_t keg) |
1167{ |
1168 int pages; 1169 |
1170 KASSERT(keg != NULL, ("Keg is null in keg_large_init")); |
1171 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, |
1172 ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); |
1173 1174 pages = keg->uk_size / UMA_SLAB_SIZE; 1175 1176 /* Account for remainder */ 1177 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1178 pages++; 1179 1180 keg->uk_ppera = pages; 1181 keg->uk_ipers = 1; 1182 1183 keg->uk_flags |= UMA_ZONE_OFFPAGE; |
1184 if ((keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) |
1185 keg->uk_flags |= UMA_ZONE_HASH; 1186 1187 keg->uk_rsize = keg->uk_size; 1188} 1189 |
1190static void 1191keg_cachespread_init(uma_keg_t keg) 1192{ 1193 int alignsize; 1194 int trailer; 1195 int pages; 1196 int rsize; 1197 1198 alignsize = keg->uk_align + 1; 1199 rsize = keg->uk_size; 1200 /* 1201 * We want one item to start on every align boundary in a page. To 1202 * do this we will span pages. We will also extend the item by the 1203 * size of align if it is an even multiple of align. Otherwise, it 1204 * would fall on the same boundary every time. 1205 */ 1206 if (rsize & keg->uk_align) 1207 rsize = (rsize & ~keg->uk_align) + alignsize; 1208 if ((rsize & alignsize) == 0) 1209 rsize += alignsize; 1210 trailer = rsize - keg->uk_size; 1211 pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; 1212 pages = MIN(pages, (128 * 1024) / PAGE_SIZE); 1213 keg->uk_rsize = rsize; 1214 keg->uk_ppera = pages; 1215 keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; 1216 keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; 1217 KASSERT(keg->uk_ipers <= uma_max_ipers, 1218 ("keg_small_init: keg->uk_ipers too high(%d) increase max_ipers", 1219 keg->uk_ipers)); 1220} 1221 |
1222/* 1223 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1224 * the keg onto the global keg list. 1225 * 1226 * Arguments/Returns follow uma_ctor specifications 1227 * udata Actually uma_kctor_args 1228 */ 1229static int --- 15 unchanged lines hidden (view full) --- 1245 keg->uk_freef = page_free; 1246 keg->uk_recurse = 0; 1247 keg->uk_slabzone = NULL; 1248 1249 /* 1250 * The master zone is passed to us at keg-creation time. 1251 */ 1252 zone = arg->zone; |
1253 keg->uk_name = zone->uz_name; |
1254 1255 if (arg->flags & UMA_ZONE_VM) 1256 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1257 1258 if (arg->flags & UMA_ZONE_ZINIT) 1259 keg->uk_init = zero_init; 1260 |
1261 if (arg->flags & UMA_ZONE_REFCNT || arg->flags & UMA_ZONE_MALLOC) 1262 keg->uk_flags |= UMA_ZONE_VTOSLAB; 1263 |
1264 /* 1265 * The +UMA_FRITM_SZ added to uk_size is to account for the |
1266 * linkage that is added to the size in keg_small_init(). If |
1267 * we don't account for this here then we may end up in |
1268 * keg_small_init() with a calculated 'ipers' of 0. |
1269 */ 1270 if (keg->uk_flags & UMA_ZONE_REFCNT) { |
1271 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) 1272 keg_cachespread_init(keg); 1273 else if ((keg->uk_size+UMA_FRITMREF_SZ) > |
1274 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) |
1275 keg_large_init(keg); |
1276 else |
1277 keg_small_init(keg); |
1278 } else { |
1279 if (keg->uk_flags & UMA_ZONE_CACHESPREAD) 1280 keg_cachespread_init(keg); 1281 else if ((keg->uk_size+UMA_FRITM_SZ) > |
1282 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) |
1283 keg_large_init(keg); |
1284 else |
1285 keg_small_init(keg); |
1286 } 1287 1288 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1289 if (keg->uk_flags & UMA_ZONE_REFCNT) 1290 keg->uk_slabzone = slabrefzone; 1291 else 1292 keg->uk_slabzone = slabzone; 1293 } --- 7 unchanged lines hidden (view full) --- 1301 keg->uk_allocf = uma_small_alloc; 1302 keg->uk_freef = uma_small_free; 1303#endif 1304 if (booted == 0) 1305 keg->uk_allocf = startup_alloc; 1306 } 1307 1308 /* |
1309 * Initialize keg's lock (shared among zones). |
1310 */ |
1311 if (arg->flags & UMA_ZONE_MTXCLASS) |
1312 KEG_LOCK_INIT(keg, 1); |
1313 else |
1314 KEG_LOCK_INIT(keg, 0); |
1315 1316 /* 1317 * If we're putting the slab header in the actual page we need to 1318 * figure out where in each page it goes. This calculates a right 1319 * justified offset into the memory on an ALIGN_PTR boundary. 1320 */ 1321 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1322 u_int totsize; --- 32 unchanged lines hidden (view full) --- 1355 panic("UMA slab won't fit.\n"); 1356 } 1357 } 1358 1359 if (keg->uk_flags & UMA_ZONE_HASH) 1360 hash_alloc(&keg->uk_hash); 1361 1362#ifdef UMA_DEBUG |
1363 printf("UMA: %s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 1364 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 1365 keg->uk_ipers, keg->uk_ppera, 1366 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); |
1367#endif 1368 1369 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1370 1371 mtx_lock(&uma_mtx); 1372 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1373 mtx_unlock(&uma_mtx); 1374 return (0); 1375} 1376 1377/* 1378 * Zone header ctor. This initializes all fields, locks, etc. 1379 * 1380 * Arguments/Returns follow uma_ctor specifications 1381 * udata Actually uma_zctor_args 1382 */ |
1383static int 1384zone_ctor(void *mem, int size, void *udata, int flags) 1385{ 1386 struct uma_zctor_args *arg = udata; 1387 uma_zone_t zone = mem; 1388 uma_zone_t z; 1389 uma_keg_t keg; 1390 1391 bzero(zone, size); 1392 zone->uz_name = arg->name; 1393 zone->uz_ctor = arg->ctor; 1394 zone->uz_dtor = arg->dtor; |
1395 zone->uz_slab = zone_fetch_slab; |
1396 zone->uz_init = NULL; 1397 zone->uz_fini = NULL; 1398 zone->uz_allocs = 0; 1399 zone->uz_frees = 0; 1400 zone->uz_fails = 0; 1401 zone->uz_fills = zone->uz_count = 0; |
1402 zone->uz_flags = 0; 1403 keg = arg->keg; |
1404 1405 if (arg->flags & UMA_ZONE_SECONDARY) { 1406 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); |
1407 zone->uz_init = arg->uminit; 1408 zone->uz_fini = arg->fini; 1409 zone->uz_lock = &keg->uk_lock; |
1410 zone->uz_flags |= UMA_ZONE_SECONDARY; |
1411 mtx_lock(&uma_mtx); 1412 ZONE_LOCK(zone); |
1413 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1414 if (LIST_NEXT(z, uz_link) == NULL) { 1415 LIST_INSERT_AFTER(z, zone, uz_link); 1416 break; 1417 } 1418 } 1419 ZONE_UNLOCK(zone); 1420 mtx_unlock(&uma_mtx); |
1421 } else if (keg == NULL) { 1422 if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1423 arg->align, arg->flags)) == NULL) |
1424 return (ENOMEM); 1425 } else { 1426 struct uma_kctor_args karg; 1427 int error; 1428 1429 /* We should only be here from uma_startup() */ 1430 karg.size = arg->size; 1431 karg.uminit = arg->uminit; 1432 karg.fini = arg->fini; 1433 karg.align = arg->align; 1434 karg.flags = arg->flags; 1435 karg.zone = zone; 1436 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1437 flags); 1438 if (error) 1439 return (error); 1440 } |
1441 /* 1442 * Link in the first keg. 1443 */ 1444 zone->uz_klink.kl_keg = keg; 1445 LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); |
1446 zone->uz_lock = &keg->uk_lock; |
1447 zone->uz_size = keg->uk_size; 1448 zone->uz_flags |= (keg->uk_flags & 1449 (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); |
1450 1451 /* 1452 * Some internal zones don't have room allocated for the per cpu 1453 * caches. If we're internal, bail out here. 1454 */ 1455 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { |
1456 KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, |
1457 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1458 return (0); 1459 } 1460 1461 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1462 zone->uz_count = BUCKET_MAX; 1463 else if (keg->uk_ipers <= BUCKET_MAX) 1464 zone->uz_count = keg->uk_ipers; --- 10 unchanged lines hidden (view full) --- 1475 * udata unused 1476 */ 1477static void 1478keg_dtor(void *arg, int size, void *udata) 1479{ 1480 uma_keg_t keg; 1481 1482 keg = (uma_keg_t)arg; |
1483 KEG_LOCK(keg); |
1484 if (keg->uk_free != 0) { 1485 printf("Freed UMA keg was not empty (%d items). " 1486 " Lost %d pages of memory.\n", 1487 keg->uk_free, keg->uk_pages); 1488 } |
1489 KEG_UNLOCK(keg); |
1490 |
1491 hash_free(&keg->uk_hash); |
1492 |
1493 KEG_LOCK_FINI(keg); |
1494} 1495 1496/* 1497 * Zone header dtor. 1498 * 1499 * Arguments/Returns follow uma_dtor specifications 1500 * udata unused 1501 */ 1502static void 1503zone_dtor(void *arg, int size, void *udata) 1504{ |
1505 uma_klink_t klink; |
1506 uma_zone_t zone; 1507 uma_keg_t keg; 1508 1509 zone = (uma_zone_t)arg; |
1510 keg = zone_first_keg(zone); |
1511 |
1512 if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) |
1513 cache_drain(zone); 1514 1515 mtx_lock(&uma_mtx); |
1516 LIST_REMOVE(zone, uz_link); 1517 mtx_unlock(&uma_mtx); 1518 /* 1519 * XXX there are some races here where 1520 * the zone can be drained but zone lock 1521 * released and then refilled before we 1522 * remove it... we dont care for now 1523 */ 1524 zone_drain_wait(zone, M_WAITOK); 1525 /* 1526 * Unlink all of our kegs. 1527 */ 1528 while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { 1529 klink->kl_keg = NULL; 1530 LIST_REMOVE(klink, kl_link); 1531 if (klink == &zone->uz_klink) 1532 continue; 1533 free(klink, M_TEMP); 1534 } 1535 /* 1536 * We only destroy kegs from non secondary zones. 1537 */ 1538 if ((zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { 1539 mtx_lock(&uma_mtx); |
1540 LIST_REMOVE(keg, uk_link); |
1541 mtx_unlock(&uma_mtx); |
1542 zone_free_item(kegs, keg, NULL, SKIP_NONE, |
1543 ZFREE_STATFREE); 1544 } |
1545} 1546 1547/* 1548 * Traverses every zone in the system and calls a callback 1549 * 1550 * Arguments: 1551 * zfunc A pointer to a function which accepts a zone 1552 * as an argument. --- 33 unchanged lines hidden (view full) --- 1586 1587 /* 1588 * Figure out the maximum number of items-per-slab we'll have if 1589 * we're using the OFFPAGE slab header to track free items, given 1590 * all possible object sizes and the maximum desired wastage 1591 * (UMA_MAX_WASTE). 1592 * 1593 * We iterate until we find an object size for |
1594 * which the calculated wastage in keg_small_init() will be |
1595 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1596 * is an overall increasing see-saw function, we find the smallest 1597 * objsize such that the wastage is always acceptable for objects 1598 * with that objsize or smaller. Since a smaller objsize always 1599 * generates a larger possible uma_max_ipers, we use this computed 1600 * objsize to calculate the largest ipers possible. Since the 1601 * ipers calculated for OFFPAGE slab headers is always larger than |
1602 * the ipers initially calculated in keg_small_init(), we use |
1603 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1604 * obtain the maximum ipers possible for offpage slab headers. 1605 * 1606 * It should be noted that ipers versus objsize is an inversly 1607 * proportional function which drops off rather quickly so as 1608 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1609 * falls into the portion of the inverse relation AFTER the steep 1610 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). --- 15 unchanged lines hidden (view full) --- 1626 while (totsize >= wsize) { 1627 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1628 (objsize + UMA_FRITM_SZ); 1629 totsize *= (UMA_FRITM_SZ + objsize); 1630 objsize++; 1631 } 1632 if (objsize > UMA_SMALLEST_UNIT) 1633 objsize--; |
1634 uma_max_ipers = MAX(UMA_SLAB_SIZE / objsize, 64); |
1635 1636 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1637 totsize = wsize; 1638 objsize = UMA_SMALLEST_UNIT; 1639 while (totsize >= wsize) { 1640 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1641 (objsize + UMA_FRITMREF_SZ); 1642 totsize *= (UMA_FRITMREF_SZ + objsize); 1643 objsize++; 1644 } 1645 if (objsize > UMA_SMALLEST_UNIT) 1646 objsize--; |
1647 uma_max_ipers_ref = MAX(UMA_SLAB_SIZE / objsize, 64); |
1648 1649 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1650 ("uma_startup: calculated uma_max_ipers values too large!")); 1651 1652#ifdef UMA_DEBUG 1653 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1654 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1655 uma_max_ipers_ref); --- 111 unchanged lines hidden (view full) --- 1767#endif 1768 callout_init(&uma_callout, CALLOUT_MPSAFE); 1769 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1770#ifdef UMA_DEBUG 1771 printf("UMA startup3 complete.\n"); 1772#endif 1773} 1774 |
1775static uma_keg_t |
1776uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1777 int align, u_int32_t flags) 1778{ 1779 struct uma_kctor_args args; 1780 1781 args.size = size; 1782 args.uminit = uminit; 1783 args.fini = fini; 1784 args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; 1785 args.flags = flags; 1786 args.zone = zone; |
1787 return (zone_alloc_item(kegs, &args, M_WAITOK)); |
1788} 1789 1790/* See uma.h */ 1791void 1792uma_set_align(int align) 1793{ 1794 1795 if (align != UMA_ALIGN_CACHE) --- 14 unchanged lines hidden (view full) --- 1810 args.ctor = ctor; 1811 args.dtor = dtor; 1812 args.uminit = uminit; 1813 args.fini = fini; 1814 args.align = align; 1815 args.flags = flags; 1816 args.keg = NULL; 1817 |
1818 return (zone_alloc_item(zones, &args, M_WAITOK)); |
1819} 1820 1821/* See uma.h */ 1822uma_zone_t 1823uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1824 uma_init zinit, uma_fini zfini, uma_zone_t master) 1825{ 1826 struct uma_zctor_args args; |
1827 uma_keg_t keg; |
1828 |
1829 keg = zone_first_keg(master); |
1830 args.name = name; |
1831 args.size = keg->uk_size; |
1832 args.ctor = ctor; 1833 args.dtor = dtor; 1834 args.uminit = zinit; 1835 args.fini = zfini; |
1836 args.align = keg->uk_align; 1837 args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; 1838 args.keg = keg; |
1839 |
1840 /* XXX Attaches only one keg of potentially many. */ 1841 return (zone_alloc_item(zones, &args, M_WAITOK)); |
1842} 1843 |
1844static void 1845zone_lock_pair(uma_zone_t a, uma_zone_t b) 1846{ 1847 if (a < b) { 1848 ZONE_LOCK(a); 1849 mtx_lock_flags(b->uz_lock, MTX_DUPOK); 1850 } else { 1851 ZONE_LOCK(b); 1852 mtx_lock_flags(a->uz_lock, MTX_DUPOK); 1853 } 1854} 1855 1856static void 1857zone_unlock_pair(uma_zone_t a, uma_zone_t b) 1858{ 1859 1860 ZONE_UNLOCK(a); 1861 ZONE_UNLOCK(b); 1862} 1863 1864int 1865uma_zsecond_add(uma_zone_t zone, uma_zone_t master) 1866{ 1867 uma_klink_t klink; 1868 uma_klink_t kl; 1869 int error; 1870 1871 error = 0; 1872 klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); 1873 1874 zone_lock_pair(zone, master); 1875 /* 1876 * zone must use vtoslab() to resolve objects and must already be 1877 * a secondary. 1878 */ 1879 if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) 1880 != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { 1881 error = EINVAL; 1882 goto out; 1883 } 1884 /* 1885 * The new master must also use vtoslab(). 1886 */ 1887 if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { 1888 error = EINVAL; 1889 goto out; 1890 } 1891 /* 1892 * Both must either be refcnt, or not be refcnt. 1893 */ 1894 if ((zone->uz_flags & UMA_ZONE_REFCNT) != 1895 (master->uz_flags & UMA_ZONE_REFCNT)) { 1896 error = EINVAL; 1897 goto out; 1898 } 1899 /* 1900 * The underlying object must be the same size. rsize 1901 * may be different. 1902 */ 1903 if (master->uz_size != zone->uz_size) { 1904 error = E2BIG; 1905 goto out; 1906 } 1907 /* 1908 * Put it at the end of the list. 1909 */ 1910 klink->kl_keg = zone_first_keg(master); 1911 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { 1912 if (LIST_NEXT(kl, kl_link) == NULL) { 1913 LIST_INSERT_AFTER(kl, klink, kl_link); 1914 break; 1915 } 1916 } 1917 klink = NULL; 1918 zone->uz_flags |= UMA_ZFLAG_MULTI; 1919 zone->uz_slab = zone_fetch_slab_multi; 1920 1921out: 1922 zone_unlock_pair(zone, master); 1923 if (klink != NULL) 1924 free(klink, M_TEMP); 1925 1926 return (error); 1927} 1928 1929 |
1930/* See uma.h */ 1931void 1932uma_zdestroy(uma_zone_t zone) 1933{ 1934 |
1935 zone_free_item(zones, zone, NULL, SKIP_NONE, ZFREE_STATFREE); |
1936} 1937 1938/* See uma.h */ 1939void * 1940uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1941{ 1942 void *item; 1943 uma_cache_t cache; --- 43 unchanged lines hidden (view full) --- 1987 cache->uc_allocs++; 1988 critical_exit(); 1989#ifdef INVARIANTS 1990 ZONE_LOCK(zone); 1991 uma_dbg_alloc(zone, NULL, item); 1992 ZONE_UNLOCK(zone); 1993#endif 1994 if (zone->uz_ctor != NULL) { |
1995 if (zone->uz_ctor(item, zone->uz_size, |
1996 udata, flags) != 0) { |
1997 zone_free_item(zone, item, udata, |
1998 SKIP_DTOR, ZFREE_STATFAIL | 1999 ZFREE_STATFREE); 2000 return (NULL); 2001 } 2002 } 2003 if (flags & M_ZERO) |
2004 bzero(item, zone->uz_size); |
2005 return (item); 2006 } else if (cache->uc_freebucket) { 2007 /* 2008 * We have run out of items in our allocbucket. 2009 * See if we can switch with our free bucket. 2010 */ 2011 if (cache->uc_freebucket->ub_cnt > 0) { 2012#ifdef UMA_DEBUG_ALLOC --- 66 unchanged lines hidden (view full) --- 2079 /* Bump up our uz_count so we get here less */ 2080 if (zone->uz_count < BUCKET_MAX) 2081 zone->uz_count++; 2082 2083 /* 2084 * Now lets just fill a bucket and put it on the free list. If that 2085 * works we'll restart the allocation from the begining. 2086 */ |
2087 if (zone_alloc_bucket(zone, flags)) { |
2088 ZONE_UNLOCK(zone); 2089 goto zalloc_restart; 2090 } 2091 ZONE_UNLOCK(zone); 2092 /* 2093 * We may not be able to get a bucket so return an actual item. 2094 */ 2095#ifdef UMA_DEBUG 2096 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 2097#endif 2098 |
2099 item = zone_alloc_item(zone, udata, flags); 2100 return (item); |
2101} 2102 2103static uma_slab_t |
2104keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags) |
2105{ 2106 uma_slab_t slab; |
2107 |
2108 mtx_assert(&keg->uk_lock, MA_OWNED); |
2109 slab = NULL; 2110 2111 for (;;) { 2112 /* 2113 * Find a slab with some space. Prefer slabs that are partially 2114 * used over those that are totally full. This helps to reduce 2115 * fragmentation. 2116 */ 2117 if (keg->uk_free != 0) { 2118 if (!LIST_EMPTY(&keg->uk_part_slab)) { 2119 slab = LIST_FIRST(&keg->uk_part_slab); 2120 } else { 2121 slab = LIST_FIRST(&keg->uk_free_slab); 2122 LIST_REMOVE(slab, us_link); 2123 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2124 us_link); 2125 } |
2126 MPASS(slab->us_keg == keg); |
2127 return (slab); 2128 } 2129 2130 /* 2131 * M_NOVM means don't ask at all! 2132 */ 2133 if (flags & M_NOVM) 2134 break; 2135 |
2136 if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { |
2137 keg->uk_flags |= UMA_ZFLAG_FULL; |
2138 /* 2139 * If this is not a multi-zone, set the FULL bit. 2140 * Otherwise slab_multi() takes care of it. 2141 */ 2142 if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) 2143 zone->uz_flags |= UMA_ZFLAG_FULL; |
2144 if (flags & M_NOWAIT) 2145 break; |
2146 msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); |
2147 continue; 2148 } 2149 keg->uk_recurse++; |
2150 slab = keg_alloc_slab(keg, zone, flags); |
2151 keg->uk_recurse--; |
2152 /* 2153 * If we got a slab here it's safe to mark it partially used 2154 * and return. We assume that the caller is going to remove 2155 * at least one item. 2156 */ 2157 if (slab) { |
2158 MPASS(slab->us_keg == keg); |
2159 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2160 return (slab); 2161 } 2162 /* 2163 * We might not have been able to get a slab but another cpu 2164 * could have while we were unlocked. Check again before we 2165 * fail. 2166 */ |
2167 flags |= M_NOVM; |
2168 } 2169 return (slab); 2170} 2171 |
2172static inline void 2173zone_relock(uma_zone_t zone, uma_keg_t keg) 2174{ 2175 if (zone->uz_lock != &keg->uk_lock) { 2176 KEG_UNLOCK(keg); 2177 ZONE_LOCK(zone); 2178 } 2179} 2180 2181static inline void 2182keg_relock(uma_keg_t keg, uma_zone_t zone) 2183{ 2184 if (zone->uz_lock != &keg->uk_lock) { 2185 ZONE_UNLOCK(zone); 2186 KEG_LOCK(keg); 2187 } 2188} 2189 2190static uma_slab_t 2191zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags) 2192{ 2193 uma_slab_t slab; 2194 2195 if (keg == NULL) 2196 keg = zone_first_keg(zone); 2197 /* 2198 * This is to prevent us from recursively trying to allocate 2199 * buckets. The problem is that if an allocation forces us to 2200 * grab a new bucket we will call page_alloc, which will go off 2201 * and cause the vm to allocate vm_map_entries. If we need new 2202 * buckets there too we will recurse in kmem_alloc and bad 2203 * things happen. So instead we return a NULL bucket, and make 2204 * the code that allocates buckets smart enough to deal with it 2205 */ 2206 if (keg->uk_flags & UMA_ZFLAG_BUCKET && keg->uk_recurse != 0) 2207 return (NULL); 2208 2209 for (;;) { 2210 slab = keg_fetch_slab(keg, zone, flags); 2211 if (slab) 2212 return (slab); 2213 if (flags & (M_NOWAIT | M_NOVM)) 2214 break; 2215 } 2216 return (NULL); 2217} 2218 2219/* 2220 * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns 2221 * with the keg locked. Caller must call zone_relock() afterwards if the 2222 * zone lock is required. On NULL the zone lock is held. 2223 * 2224 * The last pointer is used to seed the search. It is not required. 2225 */ 2226static uma_slab_t 2227zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags) 2228{ 2229 uma_klink_t klink; 2230 uma_slab_t slab; 2231 uma_keg_t keg; 2232 int flags; 2233 int empty; 2234 int full; 2235 2236 /* 2237 * Don't wait on the first pass. This will skip limit tests 2238 * as well. We don't want to block if we can find a provider 2239 * without blocking. 2240 */ 2241 flags = (rflags & ~M_WAITOK) | M_NOWAIT; 2242 /* 2243 * Use the last slab allocated as a hint for where to start 2244 * the search. 2245 */ 2246 if (last) { 2247 slab = keg_fetch_slab(last, zone, flags); 2248 if (slab) 2249 return (slab); 2250 zone_relock(zone, last); 2251 last = NULL; 2252 } 2253 /* 2254 * Loop until we have a slab incase of transient failures 2255 * while M_WAITOK is specified. I'm not sure this is 100% 2256 * required but we've done it for so long now. 2257 */ 2258 for (;;) { 2259 empty = 0; 2260 full = 0; 2261 /* 2262 * Search the available kegs for slabs. Be careful to hold the 2263 * correct lock while calling into the keg layer. 2264 */ 2265 LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { 2266 keg = klink->kl_keg; 2267 keg_relock(keg, zone); 2268 if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { 2269 slab = keg_fetch_slab(keg, zone, flags); 2270 if (slab) 2271 return (slab); 2272 } 2273 if (keg->uk_flags & UMA_ZFLAG_FULL) 2274 full++; 2275 else 2276 empty++; 2277 zone_relock(zone, keg); 2278 } 2279 if (rflags & (M_NOWAIT | M_NOVM)) 2280 break; 2281 flags = rflags; 2282 /* 2283 * All kegs are full. XXX We can't atomically check all kegs 2284 * and sleep so just sleep for a short period and retry. 2285 */ 2286 if (full && !empty) { 2287 zone->uz_flags |= UMA_ZFLAG_FULL; 2288 msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100); 2289 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2290 continue; 2291 } 2292 } 2293 return (NULL); 2294} 2295 |
2296static void * |
2297slab_alloc_item(uma_zone_t zone, uma_slab_t slab) |
2298{ 2299 uma_keg_t keg; 2300 uma_slabrefcnt_t slabref; 2301 void *item; 2302 u_int8_t freei; 2303 |
2304 keg = slab->us_keg; 2305 mtx_assert(&keg->uk_lock, MA_OWNED); |
2306 2307 freei = slab->us_firstfree; 2308 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2309 slabref = (uma_slabrefcnt_t)slab; 2310 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2311 } else { 2312 slab->us_firstfree = slab->us_freelist[freei].us_item; 2313 } --- 9 unchanged lines hidden (view full) --- 2323 LIST_REMOVE(slab, us_link); 2324 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2325 } 2326 2327 return (item); 2328} 2329 2330static int |
2331zone_alloc_bucket(uma_zone_t zone, int flags) |
2332{ 2333 uma_bucket_t bucket; 2334 uma_slab_t slab; |
2335 uma_keg_t keg; |
2336 int16_t saved; 2337 int max, origflags = flags; 2338 2339 /* 2340 * Try this zone's free list first so we don't allocate extra buckets. 2341 */ 2342 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2343 KASSERT(bucket->ub_cnt == 0, |
2344 ("zone_alloc_bucket: Bucket on free list is not empty.")); |
2345 LIST_REMOVE(bucket, ub_link); 2346 } else { 2347 int bflags; 2348 2349 bflags = (flags & ~M_ZERO); |
2350 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) |
2351 bflags |= M_NOVM; 2352 2353 ZONE_UNLOCK(zone); 2354 bucket = bucket_alloc(zone->uz_count, bflags); 2355 ZONE_LOCK(zone); 2356 } 2357 |
2358 if (bucket == NULL) { |
2359 return (0); |
2360 } |
2361 2362#ifdef SMP 2363 /* 2364 * This code is here to limit the number of simultaneous bucket fills 2365 * for any given zone to the number of per cpu caches in this zone. This 2366 * is done so that we don't allocate more memory than we really need. 2367 */ 2368 if (zone->uz_fills >= mp_ncpus) 2369 goto done; 2370 2371#endif 2372 zone->uz_fills++; 2373 2374 max = MIN(bucket->ub_entries, zone->uz_count); 2375 /* Try to keep the buckets totally full */ 2376 saved = bucket->ub_cnt; |
2377 slab = NULL; 2378 keg = NULL; |
2379 while (bucket->ub_cnt < max && |
2380 (slab = zone->uz_slab(zone, keg, flags)) != NULL) { 2381 keg = slab->us_keg; |
2382 while (slab->us_freecount && bucket->ub_cnt < max) { 2383 bucket->ub_bucket[bucket->ub_cnt++] = |
2384 slab_alloc_item(zone, slab); |
2385 } 2386 2387 /* Don't block on the next fill */ 2388 flags |= M_NOWAIT; 2389 } |
2390 if (slab) 2391 zone_relock(zone, keg); |
2392 2393 /* 2394 * We unlock here because we need to call the zone's init. 2395 * It should be safe to unlock because the slab dealt with 2396 * above is already on the appropriate list within the keg 2397 * and the bucket we filled is not yet on any list, so we 2398 * own it. 2399 */ 2400 if (zone->uz_init != NULL) { 2401 int i; 2402 2403 ZONE_UNLOCK(zone); 2404 for (i = saved; i < bucket->ub_cnt; i++) |
2405 if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, 2406 origflags) != 0) |
2407 break; 2408 /* 2409 * If we couldn't initialize the whole bucket, put the 2410 * rest back onto the freelist. 2411 */ 2412 if (i != bucket->ub_cnt) { 2413 int j; 2414 2415 for (j = i; j < bucket->ub_cnt; j++) { |
2416 zone_free_item(zone, bucket->ub_bucket[j], |
2417 NULL, SKIP_FINI, 0); 2418#ifdef INVARIANTS 2419 bucket->ub_bucket[j] = NULL; 2420#endif 2421 } 2422 bucket->ub_cnt = i; 2423 } 2424 ZONE_LOCK(zone); --- 21 unchanged lines hidden (view full) --- 2446 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2447 * 2448 * Returns 2449 * NULL if there is no memory and M_NOWAIT is set 2450 * An item if successful 2451 */ 2452 2453static void * |
2454zone_alloc_item(uma_zone_t zone, void *udata, int flags) |
2455{ |
2456 uma_slab_t slab; 2457 void *item; 2458 2459 item = NULL; |
2460 2461#ifdef UMA_DEBUG_ALLOC 2462 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2463#endif 2464 ZONE_LOCK(zone); 2465 |
2466 slab = zone->uz_slab(zone, NULL, flags); |
2467 if (slab == NULL) { 2468 zone->uz_fails++; 2469 ZONE_UNLOCK(zone); 2470 return (NULL); 2471 } 2472 |
2473 item = slab_alloc_item(zone, slab); |
2474 |
2475 zone_relock(zone, slab->us_keg); |
2476 zone->uz_allocs++; |
2477 ZONE_UNLOCK(zone); 2478 2479 /* 2480 * We have to call both the zone's init (not the keg's init) 2481 * and the zone's ctor. This is because the item is going from 2482 * a keg slab directly to the user, and the user is expecting it 2483 * to be both zone-init'd as well as zone-ctor'd. 2484 */ 2485 if (zone->uz_init != NULL) { |
2486 if (zone->uz_init(item, zone->uz_size, flags) != 0) { 2487 zone_free_item(zone, item, udata, SKIP_FINI, |
2488 ZFREE_STATFAIL | ZFREE_STATFREE); 2489 return (NULL); 2490 } 2491 } 2492 if (zone->uz_ctor != NULL) { |
2493 if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { 2494 zone_free_item(zone, item, udata, SKIP_DTOR, |
2495 ZFREE_STATFAIL | ZFREE_STATFREE); 2496 return (NULL); 2497 } 2498 } 2499 if (flags & M_ZERO) |
2500 bzero(item, zone->uz_size); |
2501 2502 return (item); 2503} 2504 2505/* See uma.h */ 2506void 2507uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2508{ |
2509 uma_cache_t cache; 2510 uma_bucket_t bucket; 2511 int bflags; 2512 int cpu; 2513 |
2514#ifdef UMA_DEBUG_ALLOC_1 2515 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2516#endif 2517 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2518 zone->uz_name); 2519 2520 if (zone->uz_dtor) |
2521 zone->uz_dtor(item, zone->uz_size, udata); 2522 |
2523#ifdef INVARIANTS 2524 ZONE_LOCK(zone); |
2525 if (zone->uz_flags & UMA_ZONE_MALLOC) |
2526 uma_dbg_free(zone, udata, item); 2527 else 2528 uma_dbg_free(zone, NULL, item); 2529 ZONE_UNLOCK(zone); 2530#endif 2531 /* 2532 * The race here is acceptable. If we miss it we'll just have to wait 2533 * a little longer for the limits to be reset. 2534 */ |
2535 if (zone->uz_flags & UMA_ZFLAG_FULL) |
2536 goto zfree_internal; 2537 2538 /* 2539 * If possible, free to the per-CPU cache. There are two 2540 * requirements for safe access to the per-CPU cache: (1) the thread 2541 * accessing the cache must not be preempted or yield during access, 2542 * and (2) the thread must not migrate CPUs without switching which 2543 * cache it accesses. We rely on a critical section to prevent --- 105 unchanged lines hidden (view full) --- 2649 /* And the zone.. */ 2650 ZONE_UNLOCK(zone); 2651 2652#ifdef UMA_DEBUG_ALLOC 2653 printf("uma_zfree: Allocating new free bucket.\n"); 2654#endif 2655 bflags = M_NOWAIT; 2656 |
2657 if (zone->uz_flags & UMA_ZFLAG_CACHEONLY) |
2658 bflags |= M_NOVM; 2659 bucket = bucket_alloc(zone->uz_count, bflags); 2660 if (bucket) { 2661 ZONE_LOCK(zone); 2662 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2663 bucket, ub_link); 2664 ZONE_UNLOCK(zone); 2665 goto zfree_restart; 2666 } 2667 2668 /* 2669 * If nothing else caught this, we'll just do an internal free. 2670 */ 2671zfree_internal: |
2672 zone_free_item(zone, item, udata, SKIP_DTOR, ZFREE_STATFREE); |
2673 2674 return; 2675} 2676 2677/* 2678 * Frees an item to an INTERNAL zone or allocates a free bucket 2679 * 2680 * Arguments: 2681 * zone The zone to free to 2682 * item The item we're freeing 2683 * udata User supplied data for the dtor 2684 * skip Skip dtors and finis 2685 */ 2686static void |
2687zone_free_item(uma_zone_t zone, void *item, void *udata, |
2688 enum zfreeskip skip, int flags) 2689{ 2690 uma_slab_t slab; 2691 uma_slabrefcnt_t slabref; 2692 uma_keg_t keg; 2693 u_int8_t *mem; 2694 u_int8_t freei; |
2695 int clearfull; |
2696 |
2697 if (skip < SKIP_DTOR && zone->uz_dtor) |
2698 zone->uz_dtor(item, zone->uz_size, udata); 2699 |
2700 if (skip < SKIP_FINI && zone->uz_fini) |
2701 zone->uz_fini(item, zone->uz_size); |
2702 2703 ZONE_LOCK(zone); 2704 2705 if (flags & ZFREE_STATFAIL) 2706 zone->uz_fails++; 2707 if (flags & ZFREE_STATFREE) 2708 zone->uz_frees++; 2709 |
2710 if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { |
2711 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); |
2712 keg = zone_first_keg(zone); /* Must only be one. */ 2713 if (zone->uz_flags & UMA_ZONE_HASH) { |
2714 slab = hash_sfind(&keg->uk_hash, mem); |
2715 } else { |
2716 mem += keg->uk_pgoff; 2717 slab = (uma_slab_t)mem; 2718 } 2719 } else { |
2720 /* This prevents redundant lookups via free(). */ 2721 if ((zone->uz_flags & UMA_ZONE_MALLOC) && udata != NULL) 2722 slab = (uma_slab_t)udata; 2723 else 2724 slab = vtoslab((vm_offset_t)item); 2725 keg = slab->us_keg; 2726 keg_relock(keg, zone); |
2727 } |
2728 MPASS(keg == slab->us_keg); |
2729 2730 /* Do we need to remove from any lists? */ 2731 if (slab->us_freecount+1 == keg->uk_ipers) { 2732 LIST_REMOVE(slab, us_link); 2733 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2734 } else if (slab->us_freecount == 0) { 2735 LIST_REMOVE(slab, us_link); 2736 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); --- 15 unchanged lines hidden (view full) --- 2752 slab->us_freelist[freei].us_item = slab->us_firstfree; 2753 } 2754 slab->us_firstfree = freei; 2755 slab->us_freecount++; 2756 2757 /* Zone statistics */ 2758 keg->uk_free++; 2759 |
2760 clearfull = 0; |
2761 if (keg->uk_flags & UMA_ZFLAG_FULL) { |
2762 if (keg->uk_pages < keg->uk_maxpages) { |
2763 keg->uk_flags &= ~UMA_ZFLAG_FULL; |
2764 clearfull = 1; 2765 } |
2766 2767 /* 2768 * We can handle one more allocation. Since we're clearing ZFLAG_FULL, 2769 * wake up all procs blocked on pages. This should be uncommon, so 2770 * keeping this simple for now (rather than adding count of blocked 2771 * threads etc). 2772 */ 2773 wakeup(keg); 2774 } |
2775 if (clearfull) { 2776 zone_relock(zone, keg); 2777 zone->uz_flags &= ~UMA_ZFLAG_FULL; 2778 wakeup(zone); 2779 ZONE_UNLOCK(zone); 2780 } else 2781 KEG_UNLOCK(keg); |
2782} 2783 2784/* See uma.h */ 2785void 2786uma_zone_set_max(uma_zone_t zone, int nitems) 2787{ 2788 uma_keg_t keg; 2789 |
2790 ZONE_LOCK(zone); |
2791 keg = zone_first_keg(zone); 2792 keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; |
2793 if (keg->uk_maxpages * keg->uk_ipers < nitems) |
2794 keg->uk_maxpages += keg->uk_ppera; |
2795 2796 ZONE_UNLOCK(zone); 2797} 2798 2799/* See uma.h */ 2800void 2801uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2802{ |
2803 uma_keg_t keg; 2804 |
2805 ZONE_LOCK(zone); |
2806 keg = zone_first_keg(zone); 2807 KASSERT(keg->uk_pages == 0, |
2808 ("uma_zone_set_init on non-empty keg")); |
2809 keg->uk_init = uminit; |
2810 ZONE_UNLOCK(zone); 2811} 2812 2813/* See uma.h */ 2814void 2815uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2816{ |
2817 uma_keg_t keg; 2818 |
2819 ZONE_LOCK(zone); |
2820 keg = zone_first_keg(zone); 2821 KASSERT(keg->uk_pages == 0, |
2822 ("uma_zone_set_fini on non-empty keg")); |
2823 keg->uk_fini = fini; |
2824 ZONE_UNLOCK(zone); 2825} 2826 2827/* See uma.h */ 2828void 2829uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2830{ 2831 ZONE_LOCK(zone); |
2832 KASSERT(zone_first_keg(zone)->uk_pages == 0, |
2833 ("uma_zone_set_zinit on non-empty keg")); 2834 zone->uz_init = zinit; 2835 ZONE_UNLOCK(zone); 2836} 2837 2838/* See uma.h */ 2839void 2840uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2841{ 2842 ZONE_LOCK(zone); |
2843 KASSERT(zone_first_keg(zone)->uk_pages == 0, |
2844 ("uma_zone_set_zfini on non-empty keg")); 2845 zone->uz_fini = zfini; 2846 ZONE_UNLOCK(zone); 2847} 2848 2849/* See uma.h */ 2850/* XXX uk_freef is not actually used with the zone locked */ 2851void 2852uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2853{ |
2854 |
2855 ZONE_LOCK(zone); |
2856 zone_first_keg(zone)->uk_freef = freef; |
2857 ZONE_UNLOCK(zone); 2858} 2859 2860/* See uma.h */ 2861/* XXX uk_allocf is not actually used with the zone locked */ 2862void 2863uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2864{ |
2865 uma_keg_t keg; 2866 |
2867 ZONE_LOCK(zone); |
2868 keg = zone_first_keg(zone); 2869 keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2870 keg->uk_allocf = allocf; |
2871 ZONE_UNLOCK(zone); 2872} 2873 2874/* See uma.h */ 2875int 2876uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2877{ 2878 uma_keg_t keg; 2879 vm_offset_t kva; 2880 int pages; 2881 |
2882 keg = zone_first_keg(zone); |
2883 pages = count / keg->uk_ipers; 2884 2885 if (pages * keg->uk_ipers < count) 2886 pages++; 2887 2888 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2889 2890 if (kva == 0) --- 19 unchanged lines hidden (view full) --- 2910/* See uma.h */ 2911void 2912uma_prealloc(uma_zone_t zone, int items) 2913{ 2914 int slabs; 2915 uma_slab_t slab; 2916 uma_keg_t keg; 2917 |
2918 keg = zone_first_keg(zone); |
2919 ZONE_LOCK(zone); 2920 slabs = items / keg->uk_ipers; 2921 if (slabs * keg->uk_ipers < items) 2922 slabs++; 2923 while (slabs > 0) { |
2924 slab = keg_alloc_slab(keg, zone, M_WAITOK); 2925 if (slab == NULL) 2926 break; 2927 MPASS(slab->us_keg == keg); |
2928 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2929 slabs--; 2930 } 2931 ZONE_UNLOCK(zone); 2932} 2933 2934/* See uma.h */ 2935u_int32_t * 2936uma_find_refcnt(uma_zone_t zone, void *item) 2937{ 2938 uma_slabrefcnt_t slabref; 2939 uma_keg_t keg; 2940 u_int32_t *refcnt; 2941 int idx; 2942 |
2943 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 2944 (~UMA_SLAB_MASK)); |
2945 keg = slabref->us_keg; |
2946 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 2947 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2948 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 2949 / keg->uk_rsize; 2950 refcnt = &slabref->us_freelist[idx].us_refcnt; 2951 return refcnt; 2952} 2953 --- 18 unchanged lines hidden (view full) --- 2972 2973/* See uma.h */ 2974int 2975uma_zone_exhausted(uma_zone_t zone) 2976{ 2977 int full; 2978 2979 ZONE_LOCK(zone); |
2980 full = (zone->uz_flags & UMA_ZFLAG_FULL); |
2981 ZONE_UNLOCK(zone); 2982 return (full); 2983} 2984 2985int 2986uma_zone_exhausted_nolock(uma_zone_t zone) 2987{ |
2988 return (zone->uz_flags & UMA_ZFLAG_FULL); |
2989} 2990 2991void * 2992uma_large_malloc(int size, int wait) 2993{ 2994 void *mem; 2995 uma_slab_t slab; 2996 u_int8_t flags; 2997 |
2998 slab = zone_alloc_item(slabzone, NULL, wait); |
2999 if (slab == NULL) 3000 return (NULL); 3001 mem = page_alloc(NULL, size, &flags, wait); 3002 if (mem) { 3003 vsetslab((vm_offset_t)mem, slab); 3004 slab->us_data = mem; 3005 slab->us_flags = flags | UMA_SLAB_MALLOC; 3006 slab->us_size = size; 3007 } else { |
3008 zone_free_item(slabzone, slab, NULL, SKIP_NONE, |
3009 ZFREE_STATFAIL | ZFREE_STATFREE); 3010 } 3011 3012 return (mem); 3013} 3014 3015void 3016uma_large_free(uma_slab_t slab) 3017{ 3018 vsetobj((vm_offset_t)slab->us_data, kmem_object); 3019 page_free(slab->us_data, slab->us_size, slab->us_flags); |
3020 zone_free_item(slabzone, slab, NULL, SKIP_NONE, ZFREE_STATFREE); |
3021} 3022 3023void 3024uma_print_stats(void) 3025{ 3026 zone_foreach(uma_print_zone); 3027} 3028 --- 10 unchanged lines hidden (view full) --- 3039{ 3040 printf("alloc: %p(%d), free: %p(%d)\n", 3041 cache->uc_allocbucket, 3042 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 3043 cache->uc_freebucket, 3044 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 3045} 3046 |
3047static void 3048uma_print_keg(uma_keg_t keg) |
3049{ |
3050 uma_slab_t slab; |
3051 |
3052 printf("keg: %s(%p) size %d(%d) flags %d ipers %d ppera %d " 3053 "out %d free %d limit %d\n", 3054 keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, |
3055 keg->uk_ipers, keg->uk_ppera, |
3056 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free, 3057 (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); |
3058 printf("Part slabs:\n"); 3059 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 3060 slab_print(slab); 3061 printf("Free slabs:\n"); 3062 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 3063 slab_print(slab); 3064 printf("Full slabs:\n"); 3065 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 3066 slab_print(slab); |
3067} 3068 3069void 3070uma_print_zone(uma_zone_t zone) 3071{ 3072 uma_cache_t cache; 3073 uma_klink_t kl; 3074 int i; 3075 3076 printf("zone: %s(%p) size %d flags %d\n", 3077 zone->uz_name, zone, zone->uz_size, zone->uz_flags); 3078 LIST_FOREACH(kl, &zone->uz_kegs, kl_link) 3079 uma_print_keg(kl->kl_keg); |
3080 for (i = 0; i <= mp_maxid; i++) { 3081 if (CPU_ABSENT(i)) 3082 continue; 3083 cache = &zone->uz_cpu[i]; 3084 printf("CPU %d Cache:\n", i); 3085 cache_print(cache); 3086 } 3087} --- 63 unchanged lines hidden (view full) --- 3151sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) 3152{ 3153 struct uma_stream_header ush; 3154 struct uma_type_header uth; 3155 struct uma_percpu_stat ups; 3156 uma_bucket_t bucket; 3157 struct sbuf sbuf; 3158 uma_cache_t cache; |
3159 uma_klink_t kl; |
3160 uma_keg_t kz; 3161 uma_zone_t z; |
3162 uma_keg_t k; |
3163 char *buffer; 3164 int buflen, count, error, i; 3165 3166 mtx_lock(&uma_mtx); 3167restart: 3168 mtx_assert(&uma_mtx, MA_OWNED); 3169 count = 0; 3170 LIST_FOREACH(kz, &uma_kegs, uk_link) { --- 34 unchanged lines hidden (view full) --- 3205 } 3206 3207 LIST_FOREACH(kz, &uma_kegs, uk_link) { 3208 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3209 bzero(&uth, sizeof(uth)); 3210 ZONE_LOCK(z); 3211 strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); 3212 uth.uth_align = kz->uk_align; |
3213 uth.uth_size = kz->uk_size; 3214 uth.uth_rsize = kz->uk_rsize; |
3215 LIST_FOREACH(kl, &z->uz_kegs, kl_link) { 3216 k = kl->kl_keg; 3217 uth.uth_maxpages += k->uk_maxpages; 3218 uth.uth_pages += k->uk_pages; 3219 uth.uth_keg_free += k->uk_free; 3220 uth.uth_limit = (k->uk_maxpages / k->uk_ppera) 3221 * k->uk_ipers; 3222 } |
3223 3224 /* 3225 * A zone is secondary is it is not the first entry 3226 * on the keg's zone list. 3227 */ |
3228 if ((z->uz_flags & UMA_ZONE_SECONDARY) && |
3229 (LIST_FIRST(&kz->uk_zones) != z)) 3230 uth.uth_zone_flags = UTH_ZONE_SECONDARY; 3231 3232 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) 3233 uth.uth_zone_free += bucket->ub_cnt; 3234 uth.uth_allocs = z->uz_allocs; 3235 uth.uth_frees = z->uz_frees; 3236 uth.uth_fails = z->uz_fails; --- 60 unchanged lines hidden (view full) --- 3297 LIST_FOREACH(z, &kz->uk_zones, uz_link) { 3298 if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { 3299 allocs = z->uz_allocs; 3300 frees = z->uz_frees; 3301 cachefree = 0; 3302 } else 3303 uma_zone_sumstat(z, &cachefree, &allocs, 3304 &frees); |
3305 if (!((z->uz_flags & UMA_ZONE_SECONDARY) && |
3306 (LIST_FIRST(&kz->uk_zones) != z))) 3307 cachefree += kz->uk_free; 3308 LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) 3309 cachefree += bucket->ub_cnt; 3310 db_printf("%18s %8ju %8jd %8d %12ju\n", z->uz_name, 3311 (uintmax_t)kz->uk_size, 3312 (intmax_t)(allocs - frees), cachefree, 3313 (uintmax_t)allocs); 3314 } 3315 } 3316} 3317#endif |