251/* 252 * This routine checks to see whether or not it's safe to enable buckets. 253 */ 254 255static void 256bucket_enable(void) 257{ 258 if (cnt.v_free_count < cnt.v_free_min) 259 bucketdisable = 1; 260 else 261 bucketdisable = 0; 262} 263 264/* 265 * Initialize bucket_zones, the array of zones of buckets of various sizes. 266 * 267 * For each zone, calculate the memory required for each bucket, consisting 268 * of the header and an array of pointers. Initialize bucket_size[] to point 269 * the range of appropriate bucket sizes at the zone. 270 */ 271static void 272bucket_init(void) 273{ 274 struct uma_bucket_zone *ubz; 275 int i; 276 int j; 277 278 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 279 int size; 280 281 ubz = &bucket_zones[j]; 282 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 283 size += sizeof(void *) * ubz->ubz_entries; 284 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 286 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 287 bucket_size[i >> BUCKET_SHIFT] = j; 288 } 289} 290 291/* 292 * Given a desired number of entries for a bucket, return the zone from which 293 * to allocate the bucket. 294 */ 295static struct uma_bucket_zone * 296bucket_zone_lookup(int entries) 297{ 298 int idx; 299 300 idx = howmany(entries, 1 << BUCKET_SHIFT); 301 return (&bucket_zones[bucket_size[idx]]); 302} 303 304static uma_bucket_t 305bucket_alloc(int entries, int bflags) 306{ 307 struct uma_bucket_zone *ubz; 308 uma_bucket_t bucket; 309 310 /* 311 * This is to stop us from allocating per cpu buckets while we're 312 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 313 * boot pages. This also prevents us from allocating buckets in 314 * low memory situations. 315 */ 316 if (bucketdisable) 317 return (NULL); 318 319 ubz = bucket_zone_lookup(entries); 320 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 321 if (bucket) { 322#ifdef INVARIANTS 323 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 324#endif 325 bucket->ub_cnt = 0; 326 bucket->ub_entries = ubz->ubz_entries; 327 } 328 329 return (bucket); 330} 331 332static void 333bucket_free(uma_bucket_t bucket) 334{ 335 struct uma_bucket_zone *ubz; 336 337 ubz = bucket_zone_lookup(bucket->ub_entries); 338 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE); 339} 340 341static void 342bucket_zone_drain(void) 343{ 344 struct uma_bucket_zone *ubz; 345 346 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 347 zone_drain(ubz->ubz_zone); 348} 349 350 351/* 352 * Routine called by timeout which is used to fire off some time interval 353 * based calculations. (stats, hash size, etc.) 354 * 355 * Arguments: 356 * arg Unused 357 * 358 * Returns: 359 * Nothing 360 */ 361static void 362uma_timeout(void *unused) 363{ 364 bucket_enable(); 365 zone_foreach(zone_timeout); 366 367 /* Reschedule this event */ 368 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 369} 370 371/* 372 * Routine to perform timeout driven calculations. This expands the 373 * hashes and does per cpu statistics aggregation. 374 * 375 * Arguments: 376 * zone The zone to operate on 377 * 378 * Returns: 379 * Nothing 380 */ 381static void 382zone_timeout(uma_zone_t zone) 383{ 384 uma_keg_t keg; 385 u_int64_t alloc; 386 387 keg = zone->uz_keg; 388 alloc = 0; 389 390 /* 391 * Expand the zone hash table. 392 * 393 * This is done if the number of slabs is larger than the hash size. 394 * What I'm trying to do here is completely reduce collisions. This 395 * may be a little aggressive. Should I allow for two collisions max? 396 */ 397 ZONE_LOCK(zone); 398 if (keg->uk_flags & UMA_ZONE_HASH && 399 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 400 struct uma_hash newhash; 401 struct uma_hash oldhash; 402 int ret; 403 404 /* 405 * This is so involved because allocating and freeing 406 * while the zone lock is held will lead to deadlock. 407 * I have to do everything in stages and check for 408 * races. 409 */ 410 newhash = keg->uk_hash; 411 ZONE_UNLOCK(zone); 412 ret = hash_alloc(&newhash); 413 ZONE_LOCK(zone); 414 if (ret) { 415 if (hash_expand(&keg->uk_hash, &newhash)) { 416 oldhash = keg->uk_hash; 417 keg->uk_hash = newhash; 418 } else 419 oldhash = newhash; 420 421 ZONE_UNLOCK(zone); 422 hash_free(&oldhash); 423 ZONE_LOCK(zone); 424 } 425 } 426 ZONE_UNLOCK(zone); 427} 428 429/* 430 * Allocate and zero fill the next sized hash table from the appropriate 431 * backing store. 432 * 433 * Arguments: 434 * hash A new hash structure with the old hash size in uh_hashsize 435 * 436 * Returns: 437 * 1 on sucess and 0 on failure. 438 */ 439static int 440hash_alloc(struct uma_hash *hash) 441{ 442 int oldsize; 443 int alloc; 444 445 oldsize = hash->uh_hashsize; 446 447 /* We're just going to go to a power of two greater */ 448 if (oldsize) { 449 hash->uh_hashsize = oldsize * 2; 450 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 451 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 452 M_UMAHASH, M_NOWAIT); 453 } else { 454 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 455 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 456 M_WAITOK); 457 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 458 } 459 if (hash->uh_slab_hash) { 460 bzero(hash->uh_slab_hash, alloc); 461 hash->uh_hashmask = hash->uh_hashsize - 1; 462 return (1); 463 } 464 465 return (0); 466} 467 468/* 469 * Expands the hash table for HASH zones. This is done from zone_timeout 470 * to reduce collisions. This must not be done in the regular allocation 471 * path, otherwise, we can recurse on the vm while allocating pages. 472 * 473 * Arguments: 474 * oldhash The hash you want to expand 475 * newhash The hash structure for the new table 476 * 477 * Returns: 478 * Nothing 479 * 480 * Discussion: 481 */ 482static int 483hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 484{ 485 uma_slab_t slab; 486 int hval; 487 int i; 488 489 if (!newhash->uh_slab_hash) 490 return (0); 491 492 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 493 return (0); 494 495 /* 496 * I need to investigate hash algorithms for resizing without a 497 * full rehash. 498 */ 499 500 for (i = 0; i < oldhash->uh_hashsize; i++) 501 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 502 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 503 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 504 hval = UMA_HASH(newhash, slab->us_data); 505 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 506 slab, us_hlink); 507 } 508 509 return (1); 510} 511 512/* 513 * Free the hash bucket to the appropriate backing store. 514 * 515 * Arguments: 516 * slab_hash The hash bucket we're freeing 517 * hashsize The number of entries in that hash bucket 518 * 519 * Returns: 520 * Nothing 521 */ 522static void 523hash_free(struct uma_hash *hash) 524{ 525 if (hash->uh_slab_hash == NULL) 526 return; 527 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 528 uma_zfree_internal(hashzone, 529 hash->uh_slab_hash, NULL, SKIP_NONE); 530 else 531 free(hash->uh_slab_hash, M_UMAHASH); 532} 533 534/* 535 * Frees all outstanding items in a bucket 536 * 537 * Arguments: 538 * zone The zone to free to, must be unlocked. 539 * bucket The free/alloc bucket with items, cpu queue must be locked. 540 * 541 * Returns: 542 * Nothing 543 */ 544 545static void 546bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 547{ 548 uma_slab_t slab; 549 int mzone; 550 void *item; 551 552 if (bucket == NULL) 553 return; 554 555 slab = NULL; 556 mzone = 0; 557 558 /* We have to lookup the slab again for malloc.. */ 559 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC) 560 mzone = 1; 561 562 while (bucket->ub_cnt > 0) { 563 bucket->ub_cnt--; 564 item = bucket->ub_bucket[bucket->ub_cnt]; 565#ifdef INVARIANTS 566 bucket->ub_bucket[bucket->ub_cnt] = NULL; 567 KASSERT(item != NULL, 568 ("bucket_drain: botched ptr, item is NULL")); 569#endif 570 /* 571 * This is extremely inefficient. The slab pointer was passed 572 * to uma_zfree_arg, but we lost it because the buckets don't 573 * hold them. This will go away when free() gets a size passed 574 * to it. 575 */ 576 if (mzone) 577 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 578 uma_zfree_internal(zone, item, slab, SKIP_DTOR); 579 } 580} 581 582/* 583 * Drains the per cpu caches for a zone. 584 * 585 * NOTE: This may only be called while the zone is being turn down, and not 586 * during normal operation. This is necessary in order that we do not have 587 * to migrate CPUs to drain the per-CPU caches. 588 * 589 * Arguments: 590 * zone The zone to drain, must be unlocked. 591 * 592 * Returns: 593 * Nothing 594 */ 595static void 596cache_drain(uma_zone_t zone) 597{ 598 uma_cache_t cache; 599 int cpu; 600 601 /* 602 * XXX: It is safe to not lock the per-CPU caches, because we're 603 * tearing down the zone anyway. I.e., there will be no further use 604 * of the caches at this point. 605 * 606 * XXX: It would good to be able to assert that the zone is being 607 * torn down to prevent improper use of cache_drain(). 608 * 609 * XXX: We lock the zone before passing into bucket_cache_drain() as 610 * it is used elsewhere. Should the tear-down path be made special 611 * there in some form? 612 */ 613 for (cpu = 0; cpu <= mp_maxid; cpu++) { 614 if (CPU_ABSENT(cpu)) 615 continue; 616 cache = &zone->uz_cpu[cpu]; 617 bucket_drain(zone, cache->uc_allocbucket); 618 bucket_drain(zone, cache->uc_freebucket); 619 if (cache->uc_allocbucket != NULL) 620 bucket_free(cache->uc_allocbucket); 621 if (cache->uc_freebucket != NULL) 622 bucket_free(cache->uc_freebucket); 623 cache->uc_allocbucket = cache->uc_freebucket = NULL; 624 } 625 ZONE_LOCK(zone); 626 bucket_cache_drain(zone); 627 ZONE_UNLOCK(zone); 628} 629 630/* 631 * Drain the cached buckets from a zone. Expects a locked zone on entry. 632 */ 633static void 634bucket_cache_drain(uma_zone_t zone) 635{ 636 uma_bucket_t bucket; 637 638 /* 639 * Drain the bucket queues and free the buckets, we just keep two per 640 * cpu (alloc/free). 641 */ 642 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 643 LIST_REMOVE(bucket, ub_link); 644 ZONE_UNLOCK(zone); 645 bucket_drain(zone, bucket); 646 bucket_free(bucket); 647 ZONE_LOCK(zone); 648 } 649 650 /* Now we do the free queue.. */ 651 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 652 LIST_REMOVE(bucket, ub_link); 653 bucket_free(bucket); 654 } 655} 656 657/* 658 * Frees pages from a zone back to the system. This is done on demand from 659 * the pageout daemon. 660 * 661 * Arguments: 662 * zone The zone to free pages from 663 * all Should we drain all items? 664 * 665 * Returns: 666 * Nothing. 667 */ 668static void 669zone_drain(uma_zone_t zone) 670{ 671 struct slabhead freeslabs = { 0 }; 672 uma_keg_t keg; 673 uma_slab_t slab; 674 uma_slab_t n; 675 u_int8_t flags; 676 u_int8_t *mem; 677 int i; 678 679 keg = zone->uz_keg; 680 681 /* 682 * We don't want to take pages from statically allocated zones at this 683 * time 684 */ 685 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 686 return; 687 688 ZONE_LOCK(zone); 689 690#ifdef UMA_DEBUG 691 printf("%s free items: %u\n", zone->uz_name, keg->uk_free); 692#endif 693 bucket_cache_drain(zone); 694 if (keg->uk_free == 0) 695 goto finished; 696 697 slab = LIST_FIRST(&keg->uk_free_slab); 698 while (slab) { 699 n = LIST_NEXT(slab, us_link); 700 701 /* We have no where to free these to */ 702 if (slab->us_flags & UMA_SLAB_BOOT) { 703 slab = n; 704 continue; 705 } 706 707 LIST_REMOVE(slab, us_link); 708 keg->uk_pages -= keg->uk_ppera; 709 keg->uk_free -= keg->uk_ipers; 710 711 if (keg->uk_flags & UMA_ZONE_HASH) 712 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 713 714 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 715 716 slab = n; 717 } 718finished: 719 ZONE_UNLOCK(zone); 720 721 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 722 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 723 if (keg->uk_fini) 724 for (i = 0; i < keg->uk_ipers; i++) 725 keg->uk_fini( 726 slab->us_data + (keg->uk_rsize * i), 727 keg->uk_size); 728 flags = slab->us_flags; 729 mem = slab->us_data; 730 731 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 732 (keg->uk_flags & UMA_ZONE_REFCNT)) { 733 vm_object_t obj; 734 735 if (flags & UMA_SLAB_KMEM) 736 obj = kmem_object; 737 else 738 obj = NULL; 739 for (i = 0; i < keg->uk_ppera; i++) 740 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 741 obj); 742 } 743 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 744 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 745 SKIP_NONE); 746#ifdef UMA_DEBUG 747 printf("%s: Returning %d bytes.\n", 748 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera); 749#endif 750 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 751 } 752} 753 754/* 755 * Allocate a new slab for a zone. This does not insert the slab onto a list. 756 * 757 * Arguments: 758 * zone The zone to allocate slabs for 759 * wait Shall we wait? 760 * 761 * Returns: 762 * The slab that was allocated or NULL if there is no memory and the 763 * caller specified M_NOWAIT. 764 */ 765static uma_slab_t 766slab_zalloc(uma_zone_t zone, int wait) 767{ 768 uma_slabrefcnt_t slabref; 769 uma_slab_t slab; 770 uma_keg_t keg; 771 u_int8_t *mem; 772 u_int8_t flags; 773 int i; 774 775 slab = NULL; 776 keg = zone->uz_keg; 777 778#ifdef UMA_DEBUG 779 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 780#endif 781 ZONE_UNLOCK(zone); 782 783 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 784 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait); 785 if (slab == NULL) { 786 ZONE_LOCK(zone); 787 return NULL; 788 } 789 } 790 791 /* 792 * This reproduces the old vm_zone behavior of zero filling pages the 793 * first time they are added to a zone. 794 * 795 * Malloced items are zeroed in uma_zalloc. 796 */ 797 798 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 799 wait |= M_ZERO; 800 else 801 wait &= ~M_ZERO; 802 803 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, 804 &flags, wait); 805 if (mem == NULL) { 806 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 807 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 808 SKIP_NONE); 809 ZONE_LOCK(zone); 810 return (NULL); 811 } 812 813 /* Point the slab into the allocated memory */ 814 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 815 slab = (uma_slab_t )(mem + keg->uk_pgoff); 816 817 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 818 (keg->uk_flags & UMA_ZONE_REFCNT)) 819 for (i = 0; i < keg->uk_ppera; i++) 820 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 821 822 slab->us_keg = keg; 823 slab->us_data = mem; 824 slab->us_freecount = keg->uk_ipers; 825 slab->us_firstfree = 0; 826 slab->us_flags = flags; 827 828 if (keg->uk_flags & UMA_ZONE_REFCNT) { 829 slabref = (uma_slabrefcnt_t)slab; 830 for (i = 0; i < keg->uk_ipers; i++) { 831 slabref->us_freelist[i].us_refcnt = 0; 832 slabref->us_freelist[i].us_item = i+1; 833 } 834 } else { 835 for (i = 0; i < keg->uk_ipers; i++) 836 slab->us_freelist[i].us_item = i+1; 837 } 838 839 if (keg->uk_init != NULL) { 840 for (i = 0; i < keg->uk_ipers; i++) 841 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 842 keg->uk_size, wait) != 0) 843 break; 844 if (i != keg->uk_ipers) { 845 if (keg->uk_fini != NULL) { 846 for (i--; i > -1; i--) 847 keg->uk_fini(slab->us_data + 848 (keg->uk_rsize * i), 849 keg->uk_size); 850 } 851 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 852 (keg->uk_flags & UMA_ZONE_REFCNT)) { 853 vm_object_t obj; 854 855 if (flags & UMA_SLAB_KMEM) 856 obj = kmem_object; 857 else 858 obj = NULL; 859 for (i = 0; i < keg->uk_ppera; i++) 860 vsetobj((vm_offset_t)mem + 861 (i * PAGE_SIZE), obj); 862 } 863 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 864 uma_zfree_internal(keg->uk_slabzone, slab, 865 NULL, SKIP_NONE); 866 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 867 flags); 868 ZONE_LOCK(zone); 869 return (NULL); 870 } 871 } 872 ZONE_LOCK(zone); 873 874 if (keg->uk_flags & UMA_ZONE_HASH) 875 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 876 877 keg->uk_pages += keg->uk_ppera; 878 keg->uk_free += keg->uk_ipers; 879 880 return (slab); 881} 882 883/* 884 * This function is intended to be used early on in place of page_alloc() so 885 * that we may use the boot time page cache to satisfy allocations before 886 * the VM is ready. 887 */ 888static void * 889startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 890{ 891 uma_keg_t keg; 892 893 keg = zone->uz_keg; 894 895 /* 896 * Check our small startup cache to see if it has pages remaining. 897 */ 898 mtx_lock(&uma_mtx); 899 if (uma_boot_free != 0) { 900 uma_slab_t tmps; 901 902 tmps = LIST_FIRST(&uma_boot_pages); 903 LIST_REMOVE(tmps, us_link); 904 uma_boot_free--; 905 mtx_unlock(&uma_mtx); 906 *pflag = tmps->us_flags; 907 return (tmps->us_data); 908 } 909 mtx_unlock(&uma_mtx); 910 if (booted == 0) 911 panic("UMA: Increase UMA_BOOT_PAGES"); 912 /* 913 * Now that we've booted reset these users to their real allocator. 914 */ 915#ifdef UMA_MD_SMALL_ALLOC 916 keg->uk_allocf = uma_small_alloc; 917#else 918 keg->uk_allocf = page_alloc; 919#endif 920 return keg->uk_allocf(zone, bytes, pflag, wait); 921} 922 923/* 924 * Allocates a number of pages from the system 925 * 926 * Arguments: 927 * zone Unused 928 * bytes The number of bytes requested 929 * wait Shall we wait? 930 * 931 * Returns: 932 * A pointer to the alloced memory or possibly 933 * NULL if M_NOWAIT is set. 934 */ 935static void * 936page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 937{ 938 void *p; /* Returned page */ 939 940 *pflag = UMA_SLAB_KMEM; 941 p = (void *) kmem_malloc(kmem_map, bytes, wait); 942 943 return (p); 944} 945 946/* 947 * Allocates a number of pages from within an object 948 * 949 * Arguments: 950 * zone Unused 951 * bytes The number of bytes requested 952 * wait Shall we wait? 953 * 954 * Returns: 955 * A pointer to the alloced memory or possibly 956 * NULL if M_NOWAIT is set. 957 */ 958static void * 959obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 960{ 961 vm_object_t object; 962 vm_offset_t retkva, zkva; 963 vm_page_t p; 964 int pages, startpages; 965 966 object = zone->uz_keg->uk_obj; 967 retkva = 0; 968 969 /* 970 * This looks a little weird since we're getting one page at a time. 971 */ 972 VM_OBJECT_LOCK(object); 973 p = TAILQ_LAST(&object->memq, pglist); 974 pages = p != NULL ? p->pindex + 1 : 0; 975 startpages = pages; 976 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; 977 for (; bytes > 0; bytes -= PAGE_SIZE) { 978 p = vm_page_alloc(object, pages, 979 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 980 if (p == NULL) { 981 if (pages != startpages) 982 pmap_qremove(retkva, pages - startpages); 983 while (pages != startpages) { 984 pages--; 985 p = TAILQ_LAST(&object->memq, pglist); 986 vm_page_lock_queues(); 987 vm_page_unwire(p, 0); 988 vm_page_free(p); 989 vm_page_unlock_queues(); 990 } 991 retkva = 0; 992 goto done; 993 } 994 pmap_qenter(zkva, &p, 1); 995 if (retkva == 0) 996 retkva = zkva; 997 zkva += PAGE_SIZE; 998 pages += 1; 999 } 1000done: 1001 VM_OBJECT_UNLOCK(object); 1002 *flags = UMA_SLAB_PRIV; 1003 1004 return ((void *)retkva); 1005} 1006 1007/* 1008 * Frees a number of pages to the system 1009 * 1010 * Arguments: 1011 * mem A pointer to the memory to be freed 1012 * size The size of the memory being freed 1013 * flags The original p->us_flags field 1014 * 1015 * Returns: 1016 * Nothing 1017 */ 1018static void 1019page_free(void *mem, int size, u_int8_t flags) 1020{ 1021 vm_map_t map; 1022 1023 if (flags & UMA_SLAB_KMEM) 1024 map = kmem_map; 1025 else 1026 panic("UMA: page_free used with invalid flags %d\n", flags); 1027 1028 kmem_free(map, (vm_offset_t)mem, size); 1029} 1030 1031/* 1032 * Zero fill initializer 1033 * 1034 * Arguments/Returns follow uma_init specifications 1035 */ 1036static int 1037zero_init(void *mem, int size, int flags) 1038{ 1039 bzero(mem, size); 1040 return (0); 1041} 1042 1043/* 1044 * Finish creating a small uma zone. This calculates ipers, and the zone size. 1045 * 1046 * Arguments 1047 * zone The zone we should initialize 1048 * 1049 * Returns 1050 * Nothing 1051 */ 1052static void 1053zone_small_init(uma_zone_t zone) 1054{ 1055 uma_keg_t keg; 1056 u_int rsize; 1057 u_int memused; 1058 u_int wastedspace; 1059 u_int shsize; 1060 1061 keg = zone->uz_keg; 1062 KASSERT(keg != NULL, ("Keg is null in zone_small_init")); 1063 rsize = keg->uk_size; 1064 1065 if (rsize < UMA_SMALLEST_UNIT) 1066 rsize = UMA_SMALLEST_UNIT; 1067 if (rsize & keg->uk_align) 1068 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1069 1070 keg->uk_rsize = rsize; 1071 keg->uk_ppera = 1; 1072 1073 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1074 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1075 shsize = sizeof(struct uma_slab_refcnt); 1076 } else { 1077 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1078 shsize = sizeof(struct uma_slab); 1079 } 1080 1081 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1082 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0")); 1083 memused = keg->uk_ipers * rsize + shsize; 1084 wastedspace = UMA_SLAB_SIZE - memused; 1085 1086 /* 1087 * We can't do OFFPAGE if we're internal or if we've been 1088 * asked to not go to the VM for buckets. If we do this we 1089 * may end up going to the VM (kmem_map) for slabs which we 1090 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1091 * result of UMA_ZONE_VM, which clearly forbids it. 1092 */ 1093 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1094 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1095 return; 1096 1097 if ((wastedspace >= UMA_MAX_WASTE) && 1098 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1099 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1100 KASSERT(keg->uk_ipers <= 255, 1101 ("zone_small_init: keg->uk_ipers too high!")); 1102#ifdef UMA_DEBUG 1103 printf("UMA decided we need offpage slab headers for " 1104 "zone: %s, calculated wastedspace = %d, " 1105 "maximum wasted space allowed = %d, " 1106 "calculated ipers = %d, " 1107 "new wasted space = %d\n", zone->uz_name, wastedspace, 1108 UMA_MAX_WASTE, keg->uk_ipers, 1109 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1110#endif 1111 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1112 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1113 keg->uk_flags |= UMA_ZONE_HASH; 1114 } 1115} 1116 1117/* 1118 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 1119 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1120 * more complicated. 1121 * 1122 * Arguments 1123 * zone The zone we should initialize 1124 * 1125 * Returns 1126 * Nothing 1127 */ 1128static void 1129zone_large_init(uma_zone_t zone) 1130{ 1131 uma_keg_t keg; 1132 int pages; 1133 1134 keg = zone->uz_keg; 1135 1136 KASSERT(keg != NULL, ("Keg is null in zone_large_init")); 1137 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1138 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 1139 1140 pages = keg->uk_size / UMA_SLAB_SIZE; 1141 1142 /* Account for remainder */ 1143 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1144 pages++; 1145 1146 keg->uk_ppera = pages; 1147 keg->uk_ipers = 1; 1148 1149 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1150 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1151 keg->uk_flags |= UMA_ZONE_HASH; 1152 1153 keg->uk_rsize = keg->uk_size; 1154} 1155 1156/* 1157 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1158 * the keg onto the global keg list. 1159 * 1160 * Arguments/Returns follow uma_ctor specifications 1161 * udata Actually uma_kctor_args 1162 */ 1163static int 1164keg_ctor(void *mem, int size, void *udata, int flags) 1165{ 1166 struct uma_kctor_args *arg = udata; 1167 uma_keg_t keg = mem; 1168 uma_zone_t zone; 1169 1170 bzero(keg, size); 1171 keg->uk_size = arg->size; 1172 keg->uk_init = arg->uminit; 1173 keg->uk_fini = arg->fini; 1174 keg->uk_align = arg->align; 1175 keg->uk_free = 0; 1176 keg->uk_pages = 0; 1177 keg->uk_flags = arg->flags; 1178 keg->uk_allocf = page_alloc; 1179 keg->uk_freef = page_free; 1180 keg->uk_recurse = 0; 1181 keg->uk_slabzone = NULL; 1182 1183 /* 1184 * The master zone is passed to us at keg-creation time. 1185 */ 1186 zone = arg->zone; 1187 zone->uz_keg = keg; 1188 1189 if (arg->flags & UMA_ZONE_VM) 1190 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1191 1192 if (arg->flags & UMA_ZONE_ZINIT) 1193 keg->uk_init = zero_init; 1194 1195 /* 1196 * The +UMA_FRITM_SZ added to uk_size is to account for the 1197 * linkage that is added to the size in zone_small_init(). If 1198 * we don't account for this here then we may end up in 1199 * zone_small_init() with a calculated 'ipers' of 0. 1200 */ 1201 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1202 if ((keg->uk_size+UMA_FRITMREF_SZ) > 1203 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1204 zone_large_init(zone); 1205 else 1206 zone_small_init(zone); 1207 } else { 1208 if ((keg->uk_size+UMA_FRITM_SZ) > 1209 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1210 zone_large_init(zone); 1211 else 1212 zone_small_init(zone); 1213 } 1214 1215 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1216 if (keg->uk_flags & UMA_ZONE_REFCNT) 1217 keg->uk_slabzone = slabrefzone; 1218 else 1219 keg->uk_slabzone = slabzone; 1220 } 1221 1222 /* 1223 * If we haven't booted yet we need allocations to go through the 1224 * startup cache until the vm is ready. 1225 */ 1226 if (keg->uk_ppera == 1) { 1227#ifdef UMA_MD_SMALL_ALLOC 1228 keg->uk_allocf = uma_small_alloc; 1229 keg->uk_freef = uma_small_free; 1230#endif 1231 if (booted == 0) 1232 keg->uk_allocf = startup_alloc; 1233 } 1234 1235 /* 1236 * Initialize keg's lock (shared among zones) through 1237 * Master zone 1238 */ 1239 zone->uz_lock = &keg->uk_lock; 1240 if (arg->flags & UMA_ZONE_MTXCLASS) 1241 ZONE_LOCK_INIT(zone, 1); 1242 else 1243 ZONE_LOCK_INIT(zone, 0); 1244 1245 /* 1246 * If we're putting the slab header in the actual page we need to 1247 * figure out where in each page it goes. This calculates a right 1248 * justified offset into the memory on an ALIGN_PTR boundary. 1249 */ 1250 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1251 u_int totsize; 1252 1253 /* Size of the slab struct and free list */ 1254 if (keg->uk_flags & UMA_ZONE_REFCNT) 1255 totsize = sizeof(struct uma_slab_refcnt) + 1256 keg->uk_ipers * UMA_FRITMREF_SZ; 1257 else 1258 totsize = sizeof(struct uma_slab) + 1259 keg->uk_ipers * UMA_FRITM_SZ; 1260 1261 if (totsize & UMA_ALIGN_PTR) 1262 totsize = (totsize & ~UMA_ALIGN_PTR) + 1263 (UMA_ALIGN_PTR + 1); 1264 keg->uk_pgoff = UMA_SLAB_SIZE - totsize; 1265 1266 if (keg->uk_flags & UMA_ZONE_REFCNT) 1267 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1268 + keg->uk_ipers * UMA_FRITMREF_SZ; 1269 else 1270 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1271 + keg->uk_ipers * UMA_FRITM_SZ; 1272 1273 /* 1274 * The only way the following is possible is if with our 1275 * UMA_ALIGN_PTR adjustments we are now bigger than 1276 * UMA_SLAB_SIZE. I haven't checked whether this is 1277 * mathematically possible for all cases, so we make 1278 * sure here anyway. 1279 */ 1280 if (totsize > UMA_SLAB_SIZE) { 1281 printf("zone %s ipers %d rsize %d size %d\n", 1282 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1283 keg->uk_size); 1284 panic("UMA slab won't fit.\n"); 1285 } 1286 } 1287 1288 if (keg->uk_flags & UMA_ZONE_HASH) 1289 hash_alloc(&keg->uk_hash); 1290 1291#ifdef UMA_DEBUG 1292 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1293 zone->uz_name, zone, 1294 keg->uk_size, keg->uk_ipers, 1295 keg->uk_ppera, keg->uk_pgoff); 1296#endif 1297 1298 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1299 1300 mtx_lock(&uma_mtx); 1301 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1302 mtx_unlock(&uma_mtx); 1303 return (0); 1304} 1305 1306/* 1307 * Zone header ctor. This initializes all fields, locks, etc. 1308 * 1309 * Arguments/Returns follow uma_ctor specifications 1310 * udata Actually uma_zctor_args 1311 */ 1312 1313static int 1314zone_ctor(void *mem, int size, void *udata, int flags) 1315{ 1316 struct uma_zctor_args *arg = udata; 1317 uma_zone_t zone = mem; 1318 uma_zone_t z; 1319 uma_keg_t keg; 1320 1321 bzero(zone, size); 1322 zone->uz_name = arg->name; 1323 zone->uz_ctor = arg->ctor; 1324 zone->uz_dtor = arg->dtor; 1325 zone->uz_init = NULL; 1326 zone->uz_fini = NULL; 1327 zone->uz_allocs = 0; 1328 zone->uz_frees = 0; 1329 zone->uz_fills = zone->uz_count = 0; 1330 1331 if (arg->flags & UMA_ZONE_SECONDARY) { 1332 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1333 keg = arg->keg; 1334 zone->uz_keg = keg; 1335 zone->uz_init = arg->uminit; 1336 zone->uz_fini = arg->fini; 1337 zone->uz_lock = &keg->uk_lock; 1338 mtx_lock(&uma_mtx); 1339 ZONE_LOCK(zone); 1340 keg->uk_flags |= UMA_ZONE_SECONDARY; 1341 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1342 if (LIST_NEXT(z, uz_link) == NULL) { 1343 LIST_INSERT_AFTER(z, zone, uz_link); 1344 break; 1345 } 1346 } 1347 ZONE_UNLOCK(zone); 1348 mtx_unlock(&uma_mtx); 1349 } else if (arg->keg == NULL) { 1350 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1351 arg->align, arg->flags) == NULL) 1352 return (ENOMEM); 1353 } else { 1354 struct uma_kctor_args karg; 1355 int error; 1356 1357 /* We should only be here from uma_startup() */ 1358 karg.size = arg->size; 1359 karg.uminit = arg->uminit; 1360 karg.fini = arg->fini; 1361 karg.align = arg->align; 1362 karg.flags = arg->flags; 1363 karg.zone = zone; 1364 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1365 flags); 1366 if (error) 1367 return (error); 1368 } 1369 keg = zone->uz_keg; 1370 zone->uz_lock = &keg->uk_lock; 1371 1372 /* 1373 * Some internal zones don't have room allocated for the per cpu 1374 * caches. If we're internal, bail out here. 1375 */ 1376 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1377 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0, 1378 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1379 return (0); 1380 } 1381 1382 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1383 zone->uz_count = BUCKET_MAX; 1384 else if (keg->uk_ipers <= BUCKET_MAX) 1385 zone->uz_count = keg->uk_ipers; 1386 else 1387 zone->uz_count = BUCKET_MAX; 1388 return (0); 1389} 1390 1391/* 1392 * Keg header dtor. This frees all data, destroys locks, frees the hash 1393 * table and removes the keg from the global list. 1394 * 1395 * Arguments/Returns follow uma_dtor specifications 1396 * udata unused 1397 */ 1398static void 1399keg_dtor(void *arg, int size, void *udata) 1400{ 1401 uma_keg_t keg; 1402 1403 keg = (uma_keg_t)arg; 1404 mtx_lock(&keg->uk_lock); 1405 if (keg->uk_free != 0) { 1406 printf("Freed UMA keg was not empty (%d items). " 1407 " Lost %d pages of memory.\n", 1408 keg->uk_free, keg->uk_pages); 1409 } 1410 mtx_unlock(&keg->uk_lock); 1411 1412 if (keg->uk_flags & UMA_ZONE_HASH) 1413 hash_free(&keg->uk_hash); 1414 1415 mtx_destroy(&keg->uk_lock); 1416} 1417 1418/* 1419 * Zone header dtor. 1420 * 1421 * Arguments/Returns follow uma_dtor specifications 1422 * udata unused 1423 */ 1424static void 1425zone_dtor(void *arg, int size, void *udata) 1426{ 1427 uma_zone_t zone; 1428 uma_keg_t keg; 1429 1430 zone = (uma_zone_t)arg; 1431 keg = zone->uz_keg; 1432 1433 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1434 cache_drain(zone); 1435 1436 mtx_lock(&uma_mtx); 1437 zone_drain(zone); 1438 if (keg->uk_flags & UMA_ZONE_SECONDARY) { 1439 LIST_REMOVE(zone, uz_link); 1440 /* 1441 * XXX there are some races here where 1442 * the zone can be drained but zone lock 1443 * released and then refilled before we 1444 * remove it... we dont care for now 1445 */ 1446 ZONE_LOCK(zone); 1447 if (LIST_EMPTY(&keg->uk_zones)) 1448 keg->uk_flags &= ~UMA_ZONE_SECONDARY; 1449 ZONE_UNLOCK(zone); 1450 mtx_unlock(&uma_mtx); 1451 } else { 1452 LIST_REMOVE(keg, uk_link); 1453 LIST_REMOVE(zone, uz_link); 1454 mtx_unlock(&uma_mtx); 1455 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE); 1456 } 1457 zone->uz_keg = NULL; 1458} 1459 1460/* 1461 * Traverses every zone in the system and calls a callback 1462 * 1463 * Arguments: 1464 * zfunc A pointer to a function which accepts a zone 1465 * as an argument. 1466 * 1467 * Returns: 1468 * Nothing 1469 */ 1470static void 1471zone_foreach(void (*zfunc)(uma_zone_t)) 1472{ 1473 uma_keg_t keg; 1474 uma_zone_t zone; 1475 1476 mtx_lock(&uma_mtx); 1477 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1478 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1479 zfunc(zone); 1480 } 1481 mtx_unlock(&uma_mtx); 1482} 1483 1484/* Public functions */ 1485/* See uma.h */ 1486void 1487uma_startup(void *bootmem) 1488{ 1489 struct uma_zctor_args args; 1490 uma_slab_t slab; 1491 u_int slabsize; 1492 u_int objsize, totsize, wsize; 1493 int i; 1494 1495#ifdef UMA_DEBUG 1496 printf("Creating uma keg headers zone and keg.\n"); 1497#endif 1498 /* 1499 * The general UMA lock is a recursion-allowed lock because 1500 * there is a code path where, while we're still configured 1501 * to use startup_alloc() for backend page allocations, we 1502 * may end up in uma_reclaim() which calls zone_foreach(zone_drain), 1503 * which grabs uma_mtx, only to later call into startup_alloc() 1504 * because while freeing we needed to allocate a bucket. Since 1505 * startup_alloc() also takes uma_mtx, we need to be able to 1506 * recurse on it. 1507 */ 1508 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE); 1509 1510 /* 1511 * Figure out the maximum number of items-per-slab we'll have if 1512 * we're using the OFFPAGE slab header to track free items, given 1513 * all possible object sizes and the maximum desired wastage 1514 * (UMA_MAX_WASTE). 1515 * 1516 * We iterate until we find an object size for 1517 * which the calculated wastage in zone_small_init() will be 1518 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1519 * is an overall increasing see-saw function, we find the smallest 1520 * objsize such that the wastage is always acceptable for objects 1521 * with that objsize or smaller. Since a smaller objsize always 1522 * generates a larger possible uma_max_ipers, we use this computed 1523 * objsize to calculate the largest ipers possible. Since the 1524 * ipers calculated for OFFPAGE slab headers is always larger than 1525 * the ipers initially calculated in zone_small_init(), we use 1526 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1527 * obtain the maximum ipers possible for offpage slab headers. 1528 * 1529 * It should be noted that ipers versus objsize is an inversly 1530 * proportional function which drops off rather quickly so as 1531 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1532 * falls into the portion of the inverse relation AFTER the steep 1533 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1534 * 1535 * Note that we have 8-bits (1 byte) to use as a freelist index 1536 * inside the actual slab header itself and this is enough to 1537 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1538 * object with offpage slab header would have ipers = 1539 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1540 * 1 greater than what our byte-integer freelist index can 1541 * accomodate, but we know that this situation never occurs as 1542 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1543 * that we need to go to offpage slab headers. Or, if we do, 1544 * then we trap that condition below and panic in the INVARIANTS case. 1545 */ 1546 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1547 totsize = wsize; 1548 objsize = UMA_SMALLEST_UNIT; 1549 while (totsize >= wsize) { 1550 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1551 (objsize + UMA_FRITM_SZ); 1552 totsize *= (UMA_FRITM_SZ + objsize); 1553 objsize++; 1554 } 1555 if (objsize > UMA_SMALLEST_UNIT) 1556 objsize--; 1557 uma_max_ipers = UMA_SLAB_SIZE / objsize; 1558 1559 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1560 totsize = wsize; 1561 objsize = UMA_SMALLEST_UNIT; 1562 while (totsize >= wsize) { 1563 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1564 (objsize + UMA_FRITMREF_SZ); 1565 totsize *= (UMA_FRITMREF_SZ + objsize); 1566 objsize++; 1567 } 1568 if (objsize > UMA_SMALLEST_UNIT) 1569 objsize--; 1570 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize; 1571 1572 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1573 ("uma_startup: calculated uma_max_ipers values too large!")); 1574 1575#ifdef UMA_DEBUG 1576 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1577 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1578 uma_max_ipers_ref); 1579#endif 1580 1581 /* "manually" create the initial zone */ 1582 args.name = "UMA Kegs"; 1583 args.size = sizeof(struct uma_keg); 1584 args.ctor = keg_ctor; 1585 args.dtor = keg_dtor; 1586 args.uminit = zero_init; 1587 args.fini = NULL; 1588 args.keg = &masterkeg; 1589 args.align = 32 - 1; 1590 args.flags = UMA_ZFLAG_INTERNAL; 1591 /* The initial zone has no Per cpu queues so it's smaller */ 1592 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1593 1594#ifdef UMA_DEBUG 1595 printf("Filling boot free list.\n"); 1596#endif 1597 for (i = 0; i < UMA_BOOT_PAGES; i++) { 1598 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1599 slab->us_data = (u_int8_t *)slab; 1600 slab->us_flags = UMA_SLAB_BOOT; 1601 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1602 uma_boot_free++; 1603 } 1604 1605#ifdef UMA_DEBUG 1606 printf("Creating uma zone headers zone and keg.\n"); 1607#endif 1608 args.name = "UMA Zones"; 1609 args.size = sizeof(struct uma_zone) + 1610 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1611 args.ctor = zone_ctor; 1612 args.dtor = zone_dtor; 1613 args.uminit = zero_init; 1614 args.fini = NULL; 1615 args.keg = NULL; 1616 args.align = 32 - 1; 1617 args.flags = UMA_ZFLAG_INTERNAL; 1618 /* The initial zone has no Per cpu queues so it's smaller */ 1619 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1620 1621#ifdef UMA_DEBUG 1622 printf("Initializing pcpu cache locks.\n"); 1623#endif 1624#ifdef UMA_DEBUG 1625 printf("Creating slab and hash zones.\n"); 1626#endif 1627 1628 /* 1629 * This is the max number of free list items we'll have with 1630 * offpage slabs. 1631 */ 1632 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1633 slabsize += sizeof(struct uma_slab); 1634 1635 /* Now make a zone for slab headers */ 1636 slabzone = uma_zcreate("UMA Slabs", 1637 slabsize, 1638 NULL, NULL, NULL, NULL, 1639 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1640 1641 /* 1642 * We also create a zone for the bigger slabs with reference 1643 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1644 */ 1645 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1646 slabsize += sizeof(struct uma_slab_refcnt); 1647 slabrefzone = uma_zcreate("UMA RCntSlabs", 1648 slabsize, 1649 NULL, NULL, NULL, NULL, 1650 UMA_ALIGN_PTR, 1651 UMA_ZFLAG_INTERNAL); 1652 1653 hashzone = uma_zcreate("UMA Hash", 1654 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1655 NULL, NULL, NULL, NULL, 1656 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1657 1658 bucket_init(); 1659 1660#ifdef UMA_MD_SMALL_ALLOC 1661 booted = 1; 1662#endif 1663 1664#ifdef UMA_DEBUG 1665 printf("UMA startup complete.\n"); 1666#endif 1667} 1668 1669/* see uma.h */ 1670void 1671uma_startup2(void) 1672{ 1673 booted = 1; 1674 bucket_enable(); 1675#ifdef UMA_DEBUG 1676 printf("UMA startup2 complete.\n"); 1677#endif 1678} 1679 1680/* 1681 * Initialize our callout handle 1682 * 1683 */ 1684 1685static void 1686uma_startup3(void) 1687{ 1688#ifdef UMA_DEBUG 1689 printf("Starting callout.\n"); 1690#endif 1691 callout_init(&uma_callout, CALLOUT_MPSAFE); 1692 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1693#ifdef UMA_DEBUG 1694 printf("UMA startup3 complete.\n"); 1695#endif 1696} 1697 1698static uma_zone_t 1699uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1700 int align, u_int16_t flags) 1701{ 1702 struct uma_kctor_args args; 1703 1704 args.size = size; 1705 args.uminit = uminit; 1706 args.fini = fini; 1707 args.align = align; 1708 args.flags = flags; 1709 args.zone = zone; 1710 return (uma_zalloc_internal(kegs, &args, M_WAITOK)); 1711} 1712 1713/* See uma.h */ 1714uma_zone_t 1715uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1716 uma_init uminit, uma_fini fini, int align, u_int16_t flags) 1717 1718{ 1719 struct uma_zctor_args args; 1720 1721 /* This stuff is essential for the zone ctor */ 1722 args.name = name; 1723 args.size = size; 1724 args.ctor = ctor; 1725 args.dtor = dtor; 1726 args.uminit = uminit; 1727 args.fini = fini; 1728 args.align = align; 1729 args.flags = flags; 1730 args.keg = NULL; 1731 1732 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1733} 1734 1735/* See uma.h */ 1736uma_zone_t 1737uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1738 uma_init zinit, uma_fini zfini, uma_zone_t master) 1739{ 1740 struct uma_zctor_args args; 1741 1742 args.name = name; 1743 args.size = master->uz_keg->uk_size; 1744 args.ctor = ctor; 1745 args.dtor = dtor; 1746 args.uminit = zinit; 1747 args.fini = zfini; 1748 args.align = master->uz_keg->uk_align; 1749 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY; 1750 args.keg = master->uz_keg; 1751 1752 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1753} 1754 1755/* See uma.h */ 1756void 1757uma_zdestroy(uma_zone_t zone) 1758{ 1759 uma_zfree_internal(zones, zone, NULL, SKIP_NONE); 1760} 1761 1762/* See uma.h */ 1763void * 1764uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1765{ 1766 void *item; 1767 uma_cache_t cache; 1768 uma_bucket_t bucket; 1769 int cpu; 1770 int badness; 1771 1772 /* This is the fast path allocation */ 1773#ifdef UMA_DEBUG_ALLOC_1 1774 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1775#endif 1776 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1777 zone->uz_name, flags); 1778 1779 if (!(flags & M_NOWAIT)) { 1780 KASSERT(curthread->td_intr_nesting_level == 0, 1781 ("malloc(M_WAITOK) in interrupt context")); 1782 if (nosleepwithlocks) { 1783#ifdef WITNESS 1784 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1785 NULL, 1786 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT", 1787 zone->uz_name); 1788#else 1789 badness = 1; 1790#endif 1791 } else { 1792 badness = 0; 1793#ifdef WITNESS 1794 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1795 "malloc(M_WAITOK) of \"%s\"", zone->uz_name); 1796#endif 1797 } 1798 if (badness) { 1799 flags &= ~M_WAITOK; 1800 flags |= M_NOWAIT; 1801 } 1802 } 1803 1804 /* 1805 * If possible, allocate from the per-CPU cache. There are two 1806 * requirements for safe access to the per-CPU cache: (1) the thread 1807 * accessing the cache must not be preempted or yield during access, 1808 * and (2) the thread must not migrate CPUs without switching which 1809 * cache it accesses. We rely on a critical section to prevent 1810 * preemption and migration. We release the critical section in 1811 * order to acquire the zone mutex if we are unable to allocate from 1812 * the current cache; when we re-acquire the critical section, we 1813 * must detect and handle migration if it has occurred. 1814 */ 1815zalloc_restart: 1816 critical_enter(); 1817 cpu = curcpu; 1818 cache = &zone->uz_cpu[cpu]; 1819 1820zalloc_start: 1821 bucket = cache->uc_allocbucket; 1822 1823 if (bucket) { 1824 if (bucket->ub_cnt > 0) { 1825 bucket->ub_cnt--; 1826 item = bucket->ub_bucket[bucket->ub_cnt]; 1827#ifdef INVARIANTS 1828 bucket->ub_bucket[bucket->ub_cnt] = NULL; 1829#endif 1830 KASSERT(item != NULL, 1831 ("uma_zalloc: Bucket pointer mangled.")); 1832 cache->uc_allocs++; 1833 critical_exit(); 1834#ifdef INVARIANTS 1835 ZONE_LOCK(zone); 1836 uma_dbg_alloc(zone, NULL, item); 1837 ZONE_UNLOCK(zone); 1838#endif 1839 if (zone->uz_ctor != NULL) { 1840 if (zone->uz_ctor(item, zone->uz_keg->uk_size, 1841 udata, flags) != 0) { 1842 uma_zfree_internal(zone, item, udata, 1843 SKIP_DTOR); 1844 return (NULL); 1845 } 1846 } 1847 if (flags & M_ZERO) 1848 bzero(item, zone->uz_keg->uk_size); 1849 return (item); 1850 } else if (cache->uc_freebucket) { 1851 /* 1852 * We have run out of items in our allocbucket. 1853 * See if we can switch with our free bucket. 1854 */ 1855 if (cache->uc_freebucket->ub_cnt > 0) { 1856#ifdef UMA_DEBUG_ALLOC 1857 printf("uma_zalloc: Swapping empty with" 1858 " alloc.\n"); 1859#endif 1860 bucket = cache->uc_freebucket; 1861 cache->uc_freebucket = cache->uc_allocbucket; 1862 cache->uc_allocbucket = bucket; 1863 1864 goto zalloc_start; 1865 } 1866 } 1867 } 1868 /* 1869 * Attempt to retrieve the item from the per-CPU cache has failed, so 1870 * we must go back to the zone. This requires the zone lock, so we 1871 * must drop the critical section, then re-acquire it when we go back 1872 * to the cache. Since the critical section is released, we may be 1873 * preempted or migrate. As such, make sure not to maintain any 1874 * thread-local state specific to the cache from prior to releasing 1875 * the critical section. 1876 */ 1877 critical_exit(); 1878 ZONE_LOCK(zone); 1879 critical_enter(); 1880 cpu = curcpu; 1881 cache = &zone->uz_cpu[cpu]; 1882 bucket = cache->uc_allocbucket; 1883 if (bucket != NULL) { 1884 if (bucket->ub_cnt > 0) { 1885 ZONE_UNLOCK(zone); 1886 goto zalloc_start; 1887 } 1888 bucket = cache->uc_freebucket; 1889 if (bucket != NULL && bucket->ub_cnt > 0) { 1890 ZONE_UNLOCK(zone); 1891 goto zalloc_start; 1892 } 1893 } 1894 1895 /* Since we have locked the zone we may as well send back our stats */ 1896 zone->uz_allocs += cache->uc_allocs; 1897 cache->uc_allocs = 0; 1898 zone->uz_frees += cache->uc_frees; 1899 cache->uc_frees = 0; 1900 1901 /* Our old one is now a free bucket */ 1902 if (cache->uc_allocbucket) { 1903 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1904 ("uma_zalloc_arg: Freeing a non free bucket.")); 1905 LIST_INSERT_HEAD(&zone->uz_free_bucket, 1906 cache->uc_allocbucket, ub_link); 1907 cache->uc_allocbucket = NULL; 1908 } 1909 1910 /* Check the free list for a new alloc bucket */ 1911 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1912 KASSERT(bucket->ub_cnt != 0, 1913 ("uma_zalloc_arg: Returning an empty bucket.")); 1914 1915 LIST_REMOVE(bucket, ub_link); 1916 cache->uc_allocbucket = bucket; 1917 ZONE_UNLOCK(zone); 1918 goto zalloc_start; 1919 } 1920 /* We are no longer associated with this CPU. */ 1921 critical_exit(); 1922 1923 /* Bump up our uz_count so we get here less */ 1924 if (zone->uz_count < BUCKET_MAX) 1925 zone->uz_count++; 1926 1927 /* 1928 * Now lets just fill a bucket and put it on the free list. If that 1929 * works we'll restart the allocation from the begining. 1930 */ 1931 if (uma_zalloc_bucket(zone, flags)) { 1932 ZONE_UNLOCK(zone); 1933 goto zalloc_restart; 1934 } 1935 ZONE_UNLOCK(zone); 1936 /* 1937 * We may not be able to get a bucket so return an actual item. 1938 */ 1939#ifdef UMA_DEBUG 1940 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1941#endif 1942 1943 return (uma_zalloc_internal(zone, udata, flags)); 1944} 1945 1946static uma_slab_t 1947uma_zone_slab(uma_zone_t zone, int flags) 1948{ 1949 uma_slab_t slab; 1950 uma_keg_t keg; 1951 1952 keg = zone->uz_keg; 1953 1954 /* 1955 * This is to prevent us from recursively trying to allocate 1956 * buckets. The problem is that if an allocation forces us to 1957 * grab a new bucket we will call page_alloc, which will go off 1958 * and cause the vm to allocate vm_map_entries. If we need new 1959 * buckets there too we will recurse in kmem_alloc and bad 1960 * things happen. So instead we return a NULL bucket, and make 1961 * the code that allocates buckets smart enough to deal with it 1962 * 1963 * XXX: While we want this protection for the bucket zones so that 1964 * recursion from the VM is handled (and the calling code that 1965 * allocates buckets knows how to deal with it), we do not want 1966 * to prevent allocation from the slab header zones (slabzone 1967 * and slabrefzone) if uk_recurse is not zero for them. The 1968 * reason is that it could lead to NULL being returned for 1969 * slab header allocations even in the M_WAITOK case, and the 1970 * caller can't handle that. 1971 */ 1972 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0) 1973 if ((zone != slabzone) && (zone != slabrefzone)) 1974 return (NULL); 1975 1976 slab = NULL; 1977 1978 for (;;) { 1979 /* 1980 * Find a slab with some space. Prefer slabs that are partially 1981 * used over those that are totally full. This helps to reduce 1982 * fragmentation. 1983 */ 1984 if (keg->uk_free != 0) { 1985 if (!LIST_EMPTY(&keg->uk_part_slab)) { 1986 slab = LIST_FIRST(&keg->uk_part_slab); 1987 } else { 1988 slab = LIST_FIRST(&keg->uk_free_slab); 1989 LIST_REMOVE(slab, us_link); 1990 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 1991 us_link); 1992 } 1993 return (slab); 1994 } 1995 1996 /* 1997 * M_NOVM means don't ask at all! 1998 */ 1999 if (flags & M_NOVM) 2000 break; 2001 2002 if (keg->uk_maxpages && 2003 keg->uk_pages >= keg->uk_maxpages) { 2004 keg->uk_flags |= UMA_ZFLAG_FULL; 2005 2006 if (flags & M_NOWAIT) 2007 break; 2008 else 2009 msleep(keg, &keg->uk_lock, PVM, 2010 "zonelimit", 0); 2011 continue; 2012 } 2013 keg->uk_recurse++; 2014 slab = slab_zalloc(zone, flags); 2015 keg->uk_recurse--; 2016 2017 /* 2018 * If we got a slab here it's safe to mark it partially used 2019 * and return. We assume that the caller is going to remove 2020 * at least one item. 2021 */ 2022 if (slab) { 2023 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2024 return (slab); 2025 } 2026 /* 2027 * We might not have been able to get a slab but another cpu 2028 * could have while we were unlocked. Check again before we 2029 * fail. 2030 */ 2031 if (flags & M_NOWAIT) 2032 flags |= M_NOVM; 2033 } 2034 return (slab); 2035} 2036 2037static void * 2038uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 2039{ 2040 uma_keg_t keg; 2041 uma_slabrefcnt_t slabref; 2042 void *item; 2043 u_int8_t freei; 2044 2045 keg = zone->uz_keg; 2046 2047 freei = slab->us_firstfree; 2048 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2049 slabref = (uma_slabrefcnt_t)slab; 2050 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2051 } else { 2052 slab->us_firstfree = slab->us_freelist[freei].us_item; 2053 } 2054 item = slab->us_data + (keg->uk_rsize * freei); 2055 2056 slab->us_freecount--; 2057 keg->uk_free--; 2058#ifdef INVARIANTS 2059 uma_dbg_alloc(zone, slab, item); 2060#endif 2061 /* Move this slab to the full list */ 2062 if (slab->us_freecount == 0) { 2063 LIST_REMOVE(slab, us_link); 2064 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2065 } 2066 2067 return (item); 2068} 2069 2070static int 2071uma_zalloc_bucket(uma_zone_t zone, int flags) 2072{ 2073 uma_bucket_t bucket; 2074 uma_slab_t slab; 2075 int16_t saved; 2076 int max, origflags = flags; 2077 2078 /* 2079 * Try this zone's free list first so we don't allocate extra buckets. 2080 */ 2081 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2082 KASSERT(bucket->ub_cnt == 0, 2083 ("uma_zalloc_bucket: Bucket on free list is not empty.")); 2084 LIST_REMOVE(bucket, ub_link); 2085 } else { 2086 int bflags; 2087 2088 bflags = (flags & ~M_ZERO); 2089 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2090 bflags |= M_NOVM; 2091 2092 ZONE_UNLOCK(zone); 2093 bucket = bucket_alloc(zone->uz_count, bflags); 2094 ZONE_LOCK(zone); 2095 } 2096 2097 if (bucket == NULL) 2098 return (0); 2099 2100#ifdef SMP 2101 /* 2102 * This code is here to limit the number of simultaneous bucket fills 2103 * for any given zone to the number of per cpu caches in this zone. This 2104 * is done so that we don't allocate more memory than we really need. 2105 */ 2106 if (zone->uz_fills >= mp_ncpus) 2107 goto done; 2108 2109#endif 2110 zone->uz_fills++; 2111 2112 max = MIN(bucket->ub_entries, zone->uz_count); 2113 /* Try to keep the buckets totally full */ 2114 saved = bucket->ub_cnt; 2115 while (bucket->ub_cnt < max && 2116 (slab = uma_zone_slab(zone, flags)) != NULL) { 2117 while (slab->us_freecount && bucket->ub_cnt < max) { 2118 bucket->ub_bucket[bucket->ub_cnt++] = 2119 uma_slab_alloc(zone, slab); 2120 } 2121 2122 /* Don't block on the next fill */ 2123 flags |= M_NOWAIT; 2124 } 2125 2126 /* 2127 * We unlock here because we need to call the zone's init. 2128 * It should be safe to unlock because the slab dealt with 2129 * above is already on the appropriate list within the keg 2130 * and the bucket we filled is not yet on any list, so we 2131 * own it. 2132 */ 2133 if (zone->uz_init != NULL) { 2134 int i; 2135 2136 ZONE_UNLOCK(zone); 2137 for (i = saved; i < bucket->ub_cnt; i++) 2138 if (zone->uz_init(bucket->ub_bucket[i], 2139 zone->uz_keg->uk_size, origflags) != 0) 2140 break; 2141 /* 2142 * If we couldn't initialize the whole bucket, put the 2143 * rest back onto the freelist. 2144 */ 2145 if (i != bucket->ub_cnt) { 2146 int j; 2147 2148 for (j = i; j < bucket->ub_cnt; j++) { 2149 uma_zfree_internal(zone, bucket->ub_bucket[j], 2150 NULL, SKIP_FINI); 2151#ifdef INVARIANTS 2152 bucket->ub_bucket[j] = NULL; 2153#endif 2154 } 2155 bucket->ub_cnt = i; 2156 } 2157 ZONE_LOCK(zone); 2158 } 2159 2160 zone->uz_fills--; 2161 if (bucket->ub_cnt != 0) { 2162 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2163 bucket, ub_link); 2164 return (1); 2165 } 2166#ifdef SMP 2167done: 2168#endif 2169 bucket_free(bucket); 2170 2171 return (0); 2172} 2173/* 2174 * Allocates an item for an internal zone 2175 * 2176 * Arguments 2177 * zone The zone to alloc for. 2178 * udata The data to be passed to the constructor. 2179 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2180 * 2181 * Returns 2182 * NULL if there is no memory and M_NOWAIT is set 2183 * An item if successful 2184 */ 2185 2186static void * 2187uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 2188{ 2189 uma_keg_t keg; 2190 uma_slab_t slab; 2191 void *item; 2192 2193 item = NULL; 2194 keg = zone->uz_keg; 2195 2196#ifdef UMA_DEBUG_ALLOC 2197 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2198#endif 2199 ZONE_LOCK(zone); 2200 2201 slab = uma_zone_slab(zone, flags); 2202 if (slab == NULL) { 2203 ZONE_UNLOCK(zone); 2204 return (NULL); 2205 } 2206 2207 item = uma_slab_alloc(zone, slab); 2208 2209 zone->uz_allocs++; 2210 2211 ZONE_UNLOCK(zone); 2212 2213 /* 2214 * We have to call both the zone's init (not the keg's init) 2215 * and the zone's ctor. This is because the item is going from 2216 * a keg slab directly to the user, and the user is expecting it 2217 * to be both zone-init'd as well as zone-ctor'd. 2218 */ 2219 if (zone->uz_init != NULL) { 2220 if (zone->uz_init(item, keg->uk_size, flags) != 0) { 2221 uma_zfree_internal(zone, item, udata, SKIP_FINI); 2222 return (NULL); 2223 } 2224 } 2225 if (zone->uz_ctor != NULL) { 2226 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) { 2227 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2228 return (NULL); 2229 } 2230 } 2231 if (flags & M_ZERO) 2232 bzero(item, keg->uk_size); 2233 2234 return (item); 2235} 2236 2237/* See uma.h */ 2238void 2239uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2240{ 2241 uma_keg_t keg; 2242 uma_cache_t cache; 2243 uma_bucket_t bucket; 2244 int bflags; 2245 int cpu; 2246 2247 keg = zone->uz_keg; 2248 2249#ifdef UMA_DEBUG_ALLOC_1 2250 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2251#endif 2252 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2253 zone->uz_name); 2254 2255 if (zone->uz_dtor) 2256 zone->uz_dtor(item, keg->uk_size, udata); 2257#ifdef INVARIANTS 2258 ZONE_LOCK(zone); 2259 if (keg->uk_flags & UMA_ZONE_MALLOC) 2260 uma_dbg_free(zone, udata, item); 2261 else 2262 uma_dbg_free(zone, NULL, item); 2263 ZONE_UNLOCK(zone); 2264#endif 2265 /* 2266 * The race here is acceptable. If we miss it we'll just have to wait 2267 * a little longer for the limits to be reset. 2268 */ 2269 if (keg->uk_flags & UMA_ZFLAG_FULL) 2270 goto zfree_internal; 2271 2272 /* 2273 * If possible, free to the per-CPU cache. There are two 2274 * requirements for safe access to the per-CPU cache: (1) the thread 2275 * accessing the cache must not be preempted or yield during access, 2276 * and (2) the thread must not migrate CPUs without switching which 2277 * cache it accesses. We rely on a critical section to prevent 2278 * preemption and migration. We release the critical section in 2279 * order to acquire the zone mutex if we are unable to free to the 2280 * current cache; when we re-acquire the critical section, we must 2281 * detect and handle migration if it has occurred. 2282 */ 2283zfree_restart: 2284 critical_enter(); 2285 cpu = curcpu; 2286 cache = &zone->uz_cpu[cpu]; 2287 2288zfree_start: 2289 bucket = cache->uc_freebucket; 2290 2291 if (bucket) { 2292 /* 2293 * Do we have room in our bucket? It is OK for this uz count 2294 * check to be slightly out of sync. 2295 */ 2296 2297 if (bucket->ub_cnt < bucket->ub_entries) { 2298 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2299 ("uma_zfree: Freeing to non free bucket index.")); 2300 bucket->ub_bucket[bucket->ub_cnt] = item; 2301 bucket->ub_cnt++; 2302 cache->uc_frees++; 2303 critical_exit(); 2304 return; 2305 } else if (cache->uc_allocbucket) { 2306#ifdef UMA_DEBUG_ALLOC 2307 printf("uma_zfree: Swapping buckets.\n"); 2308#endif 2309 /* 2310 * We have run out of space in our freebucket. 2311 * See if we can switch with our alloc bucket. 2312 */ 2313 if (cache->uc_allocbucket->ub_cnt < 2314 cache->uc_freebucket->ub_cnt) { 2315 bucket = cache->uc_freebucket; 2316 cache->uc_freebucket = cache->uc_allocbucket; 2317 cache->uc_allocbucket = bucket; 2318 goto zfree_start; 2319 } 2320 } 2321 } 2322 /* 2323 * We can get here for two reasons: 2324 * 2325 * 1) The buckets are NULL 2326 * 2) The alloc and free buckets are both somewhat full. 2327 * 2328 * We must go back the zone, which requires acquiring the zone lock, 2329 * which in turn means we must release and re-acquire the critical 2330 * section. Since the critical section is released, we may be 2331 * preempted or migrate. As such, make sure not to maintain any 2332 * thread-local state specific to the cache from prior to releasing 2333 * the critical section. 2334 */ 2335 critical_exit(); 2336 ZONE_LOCK(zone); 2337 critical_enter(); 2338 cpu = curcpu; 2339 cache = &zone->uz_cpu[cpu]; 2340 if (cache->uc_freebucket != NULL) { 2341 if (cache->uc_freebucket->ub_cnt < 2342 cache->uc_freebucket->ub_entries) { 2343 ZONE_UNLOCK(zone); 2344 goto zfree_start; 2345 } 2346 if (cache->uc_allocbucket != NULL && 2347 (cache->uc_allocbucket->ub_cnt < 2348 cache->uc_freebucket->ub_cnt)) { 2349 ZONE_UNLOCK(zone); 2350 goto zfree_start; 2351 } 2352 } 2353 2354 bucket = cache->uc_freebucket; 2355 cache->uc_freebucket = NULL; 2356 2357 /* Can we throw this on the zone full list? */ 2358 if (bucket != NULL) { 2359#ifdef UMA_DEBUG_ALLOC 2360 printf("uma_zfree: Putting old bucket on the free list.\n"); 2361#endif 2362 /* ub_cnt is pointing to the last free item */ 2363 KASSERT(bucket->ub_cnt != 0, 2364 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2365 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2366 bucket, ub_link); 2367 } 2368 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2369 LIST_REMOVE(bucket, ub_link); 2370 ZONE_UNLOCK(zone); 2371 cache->uc_freebucket = bucket; 2372 goto zfree_start; 2373 } 2374 /* We are no longer associated with this CPU. */ 2375 critical_exit(); 2376 2377 /* And the zone.. */ 2378 ZONE_UNLOCK(zone); 2379 2380#ifdef UMA_DEBUG_ALLOC 2381 printf("uma_zfree: Allocating new free bucket.\n"); 2382#endif 2383 bflags = M_NOWAIT; 2384 2385 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2386 bflags |= M_NOVM; 2387 bucket = bucket_alloc(zone->uz_count, bflags); 2388 if (bucket) { 2389 ZONE_LOCK(zone); 2390 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2391 bucket, ub_link); 2392 ZONE_UNLOCK(zone); 2393 goto zfree_restart; 2394 } 2395 2396 /* 2397 * If nothing else caught this, we'll just do an internal free. 2398 */ 2399zfree_internal: 2400 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2401 2402 return; 2403} 2404 2405/* 2406 * Frees an item to an INTERNAL zone or allocates a free bucket 2407 * 2408 * Arguments: 2409 * zone The zone to free to 2410 * item The item we're freeing 2411 * udata User supplied data for the dtor 2412 * skip Skip dtors and finis 2413 */ 2414static void 2415uma_zfree_internal(uma_zone_t zone, void *item, void *udata, 2416 enum zfreeskip skip) 2417{ 2418 uma_slab_t slab; 2419 uma_slabrefcnt_t slabref; 2420 uma_keg_t keg; 2421 u_int8_t *mem; 2422 u_int8_t freei; 2423 2424 keg = zone->uz_keg; 2425 2426 if (skip < SKIP_DTOR && zone->uz_dtor) 2427 zone->uz_dtor(item, keg->uk_size, udata); 2428 if (skip < SKIP_FINI && zone->uz_fini) 2429 zone->uz_fini(item, keg->uk_size); 2430 2431 ZONE_LOCK(zone); 2432 2433 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) { 2434 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2435 if (keg->uk_flags & UMA_ZONE_HASH) 2436 slab = hash_sfind(&keg->uk_hash, mem); 2437 else { 2438 mem += keg->uk_pgoff; 2439 slab = (uma_slab_t)mem; 2440 } 2441 } else { 2442 slab = (uma_slab_t)udata; 2443 } 2444 2445 /* Do we need to remove from any lists? */ 2446 if (slab->us_freecount+1 == keg->uk_ipers) { 2447 LIST_REMOVE(slab, us_link); 2448 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2449 } else if (slab->us_freecount == 0) { 2450 LIST_REMOVE(slab, us_link); 2451 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2452 } 2453 2454 /* Slab management stuff */ 2455 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2456 / keg->uk_rsize; 2457 2458#ifdef INVARIANTS 2459 if (!skip) 2460 uma_dbg_free(zone, slab, item); 2461#endif 2462 2463 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2464 slabref = (uma_slabrefcnt_t)slab; 2465 slabref->us_freelist[freei].us_item = slab->us_firstfree; 2466 } else { 2467 slab->us_freelist[freei].us_item = slab->us_firstfree; 2468 } 2469 slab->us_firstfree = freei; 2470 slab->us_freecount++; 2471 2472 /* Zone statistics */ 2473 keg->uk_free++; 2474 zone->uz_frees++; 2475 2476 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2477 if (keg->uk_pages < keg->uk_maxpages) 2478 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2479 2480 /* We can handle one more allocation */ 2481 wakeup_one(keg); 2482 } 2483 2484 ZONE_UNLOCK(zone); 2485} 2486 2487/* See uma.h */ 2488void 2489uma_zone_set_max(uma_zone_t zone, int nitems) 2490{ 2491 uma_keg_t keg; 2492 2493 keg = zone->uz_keg; 2494 ZONE_LOCK(zone); 2495 if (keg->uk_ppera > 1) 2496 keg->uk_maxpages = nitems * keg->uk_ppera; 2497 else 2498 keg->uk_maxpages = nitems / keg->uk_ipers; 2499 2500 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2501 keg->uk_maxpages++; 2502 2503 ZONE_UNLOCK(zone); 2504} 2505 2506/* See uma.h */ 2507void 2508uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2509{ 2510 ZONE_LOCK(zone); 2511 KASSERT(zone->uz_keg->uk_pages == 0, 2512 ("uma_zone_set_init on non-empty keg")); 2513 zone->uz_keg->uk_init = uminit; 2514 ZONE_UNLOCK(zone); 2515} 2516 2517/* See uma.h */ 2518void 2519uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2520{ 2521 ZONE_LOCK(zone); 2522 KASSERT(zone->uz_keg->uk_pages == 0, 2523 ("uma_zone_set_fini on non-empty keg")); 2524 zone->uz_keg->uk_fini = fini; 2525 ZONE_UNLOCK(zone); 2526} 2527 2528/* See uma.h */ 2529void 2530uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2531{ 2532 ZONE_LOCK(zone); 2533 KASSERT(zone->uz_keg->uk_pages == 0, 2534 ("uma_zone_set_zinit on non-empty keg")); 2535 zone->uz_init = zinit; 2536 ZONE_UNLOCK(zone); 2537} 2538 2539/* See uma.h */ 2540void 2541uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2542{ 2543 ZONE_LOCK(zone); 2544 KASSERT(zone->uz_keg->uk_pages == 0, 2545 ("uma_zone_set_zfini on non-empty keg")); 2546 zone->uz_fini = zfini; 2547 ZONE_UNLOCK(zone); 2548} 2549 2550/* See uma.h */ 2551/* XXX uk_freef is not actually used with the zone locked */ 2552void 2553uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2554{ 2555 ZONE_LOCK(zone); 2556 zone->uz_keg->uk_freef = freef; 2557 ZONE_UNLOCK(zone); 2558} 2559 2560/* See uma.h */ 2561/* XXX uk_allocf is not actually used with the zone locked */ 2562void 2563uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2564{ 2565 ZONE_LOCK(zone); 2566 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2567 zone->uz_keg->uk_allocf = allocf; 2568 ZONE_UNLOCK(zone); 2569} 2570 2571/* See uma.h */ 2572int 2573uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2574{ 2575 uma_keg_t keg; 2576 vm_offset_t kva; 2577 int pages; 2578 2579 keg = zone->uz_keg; 2580 pages = count / keg->uk_ipers; 2581 2582 if (pages * keg->uk_ipers < count) 2583 pages++; 2584 2585 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2586 2587 if (kva == 0) 2588 return (0); 2589 if (obj == NULL) { 2590 obj = vm_object_allocate(OBJT_DEFAULT, 2591 pages); 2592 } else { 2593 VM_OBJECT_LOCK_INIT(obj, "uma object"); 2594 _vm_object_allocate(OBJT_DEFAULT, 2595 pages, obj); 2596 } 2597 ZONE_LOCK(zone); 2598 keg->uk_kva = kva; 2599 keg->uk_obj = obj; 2600 keg->uk_maxpages = pages; 2601 keg->uk_allocf = obj_alloc; 2602 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 2603 ZONE_UNLOCK(zone); 2604 return (1); 2605} 2606 2607/* See uma.h */ 2608void 2609uma_prealloc(uma_zone_t zone, int items) 2610{ 2611 int slabs; 2612 uma_slab_t slab; 2613 uma_keg_t keg; 2614 2615 keg = zone->uz_keg; 2616 ZONE_LOCK(zone); 2617 slabs = items / keg->uk_ipers; 2618 if (slabs * keg->uk_ipers < items) 2619 slabs++; 2620 while (slabs > 0) { 2621 slab = slab_zalloc(zone, M_WAITOK); 2622 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2623 slabs--; 2624 } 2625 ZONE_UNLOCK(zone); 2626} 2627 2628/* See uma.h */ 2629u_int32_t * 2630uma_find_refcnt(uma_zone_t zone, void *item) 2631{ 2632 uma_slabrefcnt_t slabref; 2633 uma_keg_t keg; 2634 u_int32_t *refcnt; 2635 int idx; 2636 2637 keg = zone->uz_keg; 2638 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 2639 (~UMA_SLAB_MASK)); 2640 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 2641 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2642 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 2643 / keg->uk_rsize; 2644 refcnt = &slabref->us_freelist[idx].us_refcnt; 2645 return refcnt; 2646} 2647 2648/* See uma.h */ 2649void 2650uma_reclaim(void) 2651{ 2652#ifdef UMA_DEBUG 2653 printf("UMA: vm asked us to release pages!\n"); 2654#endif 2655 bucket_enable(); 2656 zone_foreach(zone_drain); 2657 /* 2658 * Some slabs may have been freed but this zone will be visited early 2659 * we visit again so that we can free pages that are empty once other 2660 * zones are drained. We have to do the same for buckets. 2661 */ 2662 zone_drain(slabzone); 2663 zone_drain(slabrefzone); 2664 bucket_zone_drain(); 2665} 2666 2667void * 2668uma_large_malloc(int size, int wait) 2669{ 2670 void *mem; 2671 uma_slab_t slab; 2672 u_int8_t flags; 2673 2674 slab = uma_zalloc_internal(slabzone, NULL, wait); 2675 if (slab == NULL) 2676 return (NULL); 2677 mem = page_alloc(NULL, size, &flags, wait); 2678 if (mem) { 2679 vsetslab((vm_offset_t)mem, slab); 2680 slab->us_data = mem; 2681 slab->us_flags = flags | UMA_SLAB_MALLOC; 2682 slab->us_size = size; 2683 } else { 2684 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2685 } 2686 2687 return (mem); 2688} 2689 2690void 2691uma_large_free(uma_slab_t slab) 2692{ 2693 vsetobj((vm_offset_t)slab->us_data, kmem_object); 2694 page_free(slab->us_data, slab->us_size, slab->us_flags); 2695 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2696} 2697 2698void 2699uma_print_stats(void) 2700{ 2701 zone_foreach(uma_print_zone); 2702} 2703 2704static void 2705slab_print(uma_slab_t slab) 2706{ 2707 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 2708 slab->us_keg, slab->us_data, slab->us_freecount, 2709 slab->us_firstfree); 2710} 2711 2712static void 2713cache_print(uma_cache_t cache) 2714{ 2715 printf("alloc: %p(%d), free: %p(%d)\n", 2716 cache->uc_allocbucket, 2717 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 2718 cache->uc_freebucket, 2719 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 2720} 2721 2722void 2723uma_print_zone(uma_zone_t zone) 2724{ 2725 uma_cache_t cache; 2726 uma_keg_t keg; 2727 uma_slab_t slab; 2728 int i; 2729 2730 keg = zone->uz_keg; 2731 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 2732 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 2733 keg->uk_ipers, keg->uk_ppera, 2734 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 2735 printf("Part slabs:\n"); 2736 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 2737 slab_print(slab); 2738 printf("Free slabs:\n"); 2739 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 2740 slab_print(slab); 2741 printf("Full slabs:\n"); 2742 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 2743 slab_print(slab); 2744 for (i = 0; i <= mp_maxid; i++) { 2745 if (CPU_ABSENT(i)) 2746 continue; 2747 cache = &zone->uz_cpu[i]; 2748 printf("CPU %d Cache:\n", i); 2749 cache_print(cache); 2750 } 2751} 2752 2753/*
| 260/* 261 * This routine checks to see whether or not it's safe to enable buckets. 262 */ 263 264static void 265bucket_enable(void) 266{ 267 if (cnt.v_free_count < cnt.v_free_min) 268 bucketdisable = 1; 269 else 270 bucketdisable = 0; 271} 272 273/* 274 * Initialize bucket_zones, the array of zones of buckets of various sizes. 275 * 276 * For each zone, calculate the memory required for each bucket, consisting 277 * of the header and an array of pointers. Initialize bucket_size[] to point 278 * the range of appropriate bucket sizes at the zone. 279 */ 280static void 281bucket_init(void) 282{ 283 struct uma_bucket_zone *ubz; 284 int i; 285 int j; 286 287 for (i = 0, j = 0; bucket_zones[j].ubz_entries != 0; j++) { 288 int size; 289 290 ubz = &bucket_zones[j]; 291 size = roundup(sizeof(struct uma_bucket), sizeof(void *)); 292 size += sizeof(void *) * ubz->ubz_entries; 293 ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, 294 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 295 for (; i <= ubz->ubz_entries; i += (1 << BUCKET_SHIFT)) 296 bucket_size[i >> BUCKET_SHIFT] = j; 297 } 298} 299 300/* 301 * Given a desired number of entries for a bucket, return the zone from which 302 * to allocate the bucket. 303 */ 304static struct uma_bucket_zone * 305bucket_zone_lookup(int entries) 306{ 307 int idx; 308 309 idx = howmany(entries, 1 << BUCKET_SHIFT); 310 return (&bucket_zones[bucket_size[idx]]); 311} 312 313static uma_bucket_t 314bucket_alloc(int entries, int bflags) 315{ 316 struct uma_bucket_zone *ubz; 317 uma_bucket_t bucket; 318 319 /* 320 * This is to stop us from allocating per cpu buckets while we're 321 * running out of UMA_BOOT_PAGES. Otherwise, we would exhaust the 322 * boot pages. This also prevents us from allocating buckets in 323 * low memory situations. 324 */ 325 if (bucketdisable) 326 return (NULL); 327 328 ubz = bucket_zone_lookup(entries); 329 bucket = uma_zalloc_internal(ubz->ubz_zone, NULL, bflags); 330 if (bucket) { 331#ifdef INVARIANTS 332 bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); 333#endif 334 bucket->ub_cnt = 0; 335 bucket->ub_entries = ubz->ubz_entries; 336 } 337 338 return (bucket); 339} 340 341static void 342bucket_free(uma_bucket_t bucket) 343{ 344 struct uma_bucket_zone *ubz; 345 346 ubz = bucket_zone_lookup(bucket->ub_entries); 347 uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE); 348} 349 350static void 351bucket_zone_drain(void) 352{ 353 struct uma_bucket_zone *ubz; 354 355 for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) 356 zone_drain(ubz->ubz_zone); 357} 358 359 360/* 361 * Routine called by timeout which is used to fire off some time interval 362 * based calculations. (stats, hash size, etc.) 363 * 364 * Arguments: 365 * arg Unused 366 * 367 * Returns: 368 * Nothing 369 */ 370static void 371uma_timeout(void *unused) 372{ 373 bucket_enable(); 374 zone_foreach(zone_timeout); 375 376 /* Reschedule this event */ 377 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 378} 379 380/* 381 * Routine to perform timeout driven calculations. This expands the 382 * hashes and does per cpu statistics aggregation. 383 * 384 * Arguments: 385 * zone The zone to operate on 386 * 387 * Returns: 388 * Nothing 389 */ 390static void 391zone_timeout(uma_zone_t zone) 392{ 393 uma_keg_t keg; 394 u_int64_t alloc; 395 396 keg = zone->uz_keg; 397 alloc = 0; 398 399 /* 400 * Expand the zone hash table. 401 * 402 * This is done if the number of slabs is larger than the hash size. 403 * What I'm trying to do here is completely reduce collisions. This 404 * may be a little aggressive. Should I allow for two collisions max? 405 */ 406 ZONE_LOCK(zone); 407 if (keg->uk_flags & UMA_ZONE_HASH && 408 keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) { 409 struct uma_hash newhash; 410 struct uma_hash oldhash; 411 int ret; 412 413 /* 414 * This is so involved because allocating and freeing 415 * while the zone lock is held will lead to deadlock. 416 * I have to do everything in stages and check for 417 * races. 418 */ 419 newhash = keg->uk_hash; 420 ZONE_UNLOCK(zone); 421 ret = hash_alloc(&newhash); 422 ZONE_LOCK(zone); 423 if (ret) { 424 if (hash_expand(&keg->uk_hash, &newhash)) { 425 oldhash = keg->uk_hash; 426 keg->uk_hash = newhash; 427 } else 428 oldhash = newhash; 429 430 ZONE_UNLOCK(zone); 431 hash_free(&oldhash); 432 ZONE_LOCK(zone); 433 } 434 } 435 ZONE_UNLOCK(zone); 436} 437 438/* 439 * Allocate and zero fill the next sized hash table from the appropriate 440 * backing store. 441 * 442 * Arguments: 443 * hash A new hash structure with the old hash size in uh_hashsize 444 * 445 * Returns: 446 * 1 on sucess and 0 on failure. 447 */ 448static int 449hash_alloc(struct uma_hash *hash) 450{ 451 int oldsize; 452 int alloc; 453 454 oldsize = hash->uh_hashsize; 455 456 /* We're just going to go to a power of two greater */ 457 if (oldsize) { 458 hash->uh_hashsize = oldsize * 2; 459 alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; 460 hash->uh_slab_hash = (struct slabhead *)malloc(alloc, 461 M_UMAHASH, M_NOWAIT); 462 } else { 463 alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; 464 hash->uh_slab_hash = uma_zalloc_internal(hashzone, NULL, 465 M_WAITOK); 466 hash->uh_hashsize = UMA_HASH_SIZE_INIT; 467 } 468 if (hash->uh_slab_hash) { 469 bzero(hash->uh_slab_hash, alloc); 470 hash->uh_hashmask = hash->uh_hashsize - 1; 471 return (1); 472 } 473 474 return (0); 475} 476 477/* 478 * Expands the hash table for HASH zones. This is done from zone_timeout 479 * to reduce collisions. This must not be done in the regular allocation 480 * path, otherwise, we can recurse on the vm while allocating pages. 481 * 482 * Arguments: 483 * oldhash The hash you want to expand 484 * newhash The hash structure for the new table 485 * 486 * Returns: 487 * Nothing 488 * 489 * Discussion: 490 */ 491static int 492hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) 493{ 494 uma_slab_t slab; 495 int hval; 496 int i; 497 498 if (!newhash->uh_slab_hash) 499 return (0); 500 501 if (oldhash->uh_hashsize >= newhash->uh_hashsize) 502 return (0); 503 504 /* 505 * I need to investigate hash algorithms for resizing without a 506 * full rehash. 507 */ 508 509 for (i = 0; i < oldhash->uh_hashsize; i++) 510 while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) { 511 slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]); 512 SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink); 513 hval = UMA_HASH(newhash, slab->us_data); 514 SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], 515 slab, us_hlink); 516 } 517 518 return (1); 519} 520 521/* 522 * Free the hash bucket to the appropriate backing store. 523 * 524 * Arguments: 525 * slab_hash The hash bucket we're freeing 526 * hashsize The number of entries in that hash bucket 527 * 528 * Returns: 529 * Nothing 530 */ 531static void 532hash_free(struct uma_hash *hash) 533{ 534 if (hash->uh_slab_hash == NULL) 535 return; 536 if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) 537 uma_zfree_internal(hashzone, 538 hash->uh_slab_hash, NULL, SKIP_NONE); 539 else 540 free(hash->uh_slab_hash, M_UMAHASH); 541} 542 543/* 544 * Frees all outstanding items in a bucket 545 * 546 * Arguments: 547 * zone The zone to free to, must be unlocked. 548 * bucket The free/alloc bucket with items, cpu queue must be locked. 549 * 550 * Returns: 551 * Nothing 552 */ 553 554static void 555bucket_drain(uma_zone_t zone, uma_bucket_t bucket) 556{ 557 uma_slab_t slab; 558 int mzone; 559 void *item; 560 561 if (bucket == NULL) 562 return; 563 564 slab = NULL; 565 mzone = 0; 566 567 /* We have to lookup the slab again for malloc.. */ 568 if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC) 569 mzone = 1; 570 571 while (bucket->ub_cnt > 0) { 572 bucket->ub_cnt--; 573 item = bucket->ub_bucket[bucket->ub_cnt]; 574#ifdef INVARIANTS 575 bucket->ub_bucket[bucket->ub_cnt] = NULL; 576 KASSERT(item != NULL, 577 ("bucket_drain: botched ptr, item is NULL")); 578#endif 579 /* 580 * This is extremely inefficient. The slab pointer was passed 581 * to uma_zfree_arg, but we lost it because the buckets don't 582 * hold them. This will go away when free() gets a size passed 583 * to it. 584 */ 585 if (mzone) 586 slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK)); 587 uma_zfree_internal(zone, item, slab, SKIP_DTOR); 588 } 589} 590 591/* 592 * Drains the per cpu caches for a zone. 593 * 594 * NOTE: This may only be called while the zone is being turn down, and not 595 * during normal operation. This is necessary in order that we do not have 596 * to migrate CPUs to drain the per-CPU caches. 597 * 598 * Arguments: 599 * zone The zone to drain, must be unlocked. 600 * 601 * Returns: 602 * Nothing 603 */ 604static void 605cache_drain(uma_zone_t zone) 606{ 607 uma_cache_t cache; 608 int cpu; 609 610 /* 611 * XXX: It is safe to not lock the per-CPU caches, because we're 612 * tearing down the zone anyway. I.e., there will be no further use 613 * of the caches at this point. 614 * 615 * XXX: It would good to be able to assert that the zone is being 616 * torn down to prevent improper use of cache_drain(). 617 * 618 * XXX: We lock the zone before passing into bucket_cache_drain() as 619 * it is used elsewhere. Should the tear-down path be made special 620 * there in some form? 621 */ 622 for (cpu = 0; cpu <= mp_maxid; cpu++) { 623 if (CPU_ABSENT(cpu)) 624 continue; 625 cache = &zone->uz_cpu[cpu]; 626 bucket_drain(zone, cache->uc_allocbucket); 627 bucket_drain(zone, cache->uc_freebucket); 628 if (cache->uc_allocbucket != NULL) 629 bucket_free(cache->uc_allocbucket); 630 if (cache->uc_freebucket != NULL) 631 bucket_free(cache->uc_freebucket); 632 cache->uc_allocbucket = cache->uc_freebucket = NULL; 633 } 634 ZONE_LOCK(zone); 635 bucket_cache_drain(zone); 636 ZONE_UNLOCK(zone); 637} 638 639/* 640 * Drain the cached buckets from a zone. Expects a locked zone on entry. 641 */ 642static void 643bucket_cache_drain(uma_zone_t zone) 644{ 645 uma_bucket_t bucket; 646 647 /* 648 * Drain the bucket queues and free the buckets, we just keep two per 649 * cpu (alloc/free). 650 */ 651 while ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 652 LIST_REMOVE(bucket, ub_link); 653 ZONE_UNLOCK(zone); 654 bucket_drain(zone, bucket); 655 bucket_free(bucket); 656 ZONE_LOCK(zone); 657 } 658 659 /* Now we do the free queue.. */ 660 while ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 661 LIST_REMOVE(bucket, ub_link); 662 bucket_free(bucket); 663 } 664} 665 666/* 667 * Frees pages from a zone back to the system. This is done on demand from 668 * the pageout daemon. 669 * 670 * Arguments: 671 * zone The zone to free pages from 672 * all Should we drain all items? 673 * 674 * Returns: 675 * Nothing. 676 */ 677static void 678zone_drain(uma_zone_t zone) 679{ 680 struct slabhead freeslabs = { 0 }; 681 uma_keg_t keg; 682 uma_slab_t slab; 683 uma_slab_t n; 684 u_int8_t flags; 685 u_int8_t *mem; 686 int i; 687 688 keg = zone->uz_keg; 689 690 /* 691 * We don't want to take pages from statically allocated zones at this 692 * time 693 */ 694 if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) 695 return; 696 697 ZONE_LOCK(zone); 698 699#ifdef UMA_DEBUG 700 printf("%s free items: %u\n", zone->uz_name, keg->uk_free); 701#endif 702 bucket_cache_drain(zone); 703 if (keg->uk_free == 0) 704 goto finished; 705 706 slab = LIST_FIRST(&keg->uk_free_slab); 707 while (slab) { 708 n = LIST_NEXT(slab, us_link); 709 710 /* We have no where to free these to */ 711 if (slab->us_flags & UMA_SLAB_BOOT) { 712 slab = n; 713 continue; 714 } 715 716 LIST_REMOVE(slab, us_link); 717 keg->uk_pages -= keg->uk_ppera; 718 keg->uk_free -= keg->uk_ipers; 719 720 if (keg->uk_flags & UMA_ZONE_HASH) 721 UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data); 722 723 SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); 724 725 slab = n; 726 } 727finished: 728 ZONE_UNLOCK(zone); 729 730 while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { 731 SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); 732 if (keg->uk_fini) 733 for (i = 0; i < keg->uk_ipers; i++) 734 keg->uk_fini( 735 slab->us_data + (keg->uk_rsize * i), 736 keg->uk_size); 737 flags = slab->us_flags; 738 mem = slab->us_data; 739 740 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 741 (keg->uk_flags & UMA_ZONE_REFCNT)) { 742 vm_object_t obj; 743 744 if (flags & UMA_SLAB_KMEM) 745 obj = kmem_object; 746 else 747 obj = NULL; 748 for (i = 0; i < keg->uk_ppera; i++) 749 vsetobj((vm_offset_t)mem + (i * PAGE_SIZE), 750 obj); 751 } 752 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 753 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 754 SKIP_NONE); 755#ifdef UMA_DEBUG 756 printf("%s: Returning %d bytes.\n", 757 zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera); 758#endif 759 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags); 760 } 761} 762 763/* 764 * Allocate a new slab for a zone. This does not insert the slab onto a list. 765 * 766 * Arguments: 767 * zone The zone to allocate slabs for 768 * wait Shall we wait? 769 * 770 * Returns: 771 * The slab that was allocated or NULL if there is no memory and the 772 * caller specified M_NOWAIT. 773 */ 774static uma_slab_t 775slab_zalloc(uma_zone_t zone, int wait) 776{ 777 uma_slabrefcnt_t slabref; 778 uma_slab_t slab; 779 uma_keg_t keg; 780 u_int8_t *mem; 781 u_int8_t flags; 782 int i; 783 784 slab = NULL; 785 keg = zone->uz_keg; 786 787#ifdef UMA_DEBUG 788 printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name); 789#endif 790 ZONE_UNLOCK(zone); 791 792 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 793 slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait); 794 if (slab == NULL) { 795 ZONE_LOCK(zone); 796 return NULL; 797 } 798 } 799 800 /* 801 * This reproduces the old vm_zone behavior of zero filling pages the 802 * first time they are added to a zone. 803 * 804 * Malloced items are zeroed in uma_zalloc. 805 */ 806 807 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 808 wait |= M_ZERO; 809 else 810 wait &= ~M_ZERO; 811 812 mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, 813 &flags, wait); 814 if (mem == NULL) { 815 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 816 uma_zfree_internal(keg->uk_slabzone, slab, NULL, 817 SKIP_NONE); 818 ZONE_LOCK(zone); 819 return (NULL); 820 } 821 822 /* Point the slab into the allocated memory */ 823 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) 824 slab = (uma_slab_t )(mem + keg->uk_pgoff); 825 826 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 827 (keg->uk_flags & UMA_ZONE_REFCNT)) 828 for (i = 0; i < keg->uk_ppera; i++) 829 vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); 830 831 slab->us_keg = keg; 832 slab->us_data = mem; 833 slab->us_freecount = keg->uk_ipers; 834 slab->us_firstfree = 0; 835 slab->us_flags = flags; 836 837 if (keg->uk_flags & UMA_ZONE_REFCNT) { 838 slabref = (uma_slabrefcnt_t)slab; 839 for (i = 0; i < keg->uk_ipers; i++) { 840 slabref->us_freelist[i].us_refcnt = 0; 841 slabref->us_freelist[i].us_item = i+1; 842 } 843 } else { 844 for (i = 0; i < keg->uk_ipers; i++) 845 slab->us_freelist[i].us_item = i+1; 846 } 847 848 if (keg->uk_init != NULL) { 849 for (i = 0; i < keg->uk_ipers; i++) 850 if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), 851 keg->uk_size, wait) != 0) 852 break; 853 if (i != keg->uk_ipers) { 854 if (keg->uk_fini != NULL) { 855 for (i--; i > -1; i--) 856 keg->uk_fini(slab->us_data + 857 (keg->uk_rsize * i), 858 keg->uk_size); 859 } 860 if ((keg->uk_flags & UMA_ZONE_MALLOC) || 861 (keg->uk_flags & UMA_ZONE_REFCNT)) { 862 vm_object_t obj; 863 864 if (flags & UMA_SLAB_KMEM) 865 obj = kmem_object; 866 else 867 obj = NULL; 868 for (i = 0; i < keg->uk_ppera; i++) 869 vsetobj((vm_offset_t)mem + 870 (i * PAGE_SIZE), obj); 871 } 872 if (keg->uk_flags & UMA_ZONE_OFFPAGE) 873 uma_zfree_internal(keg->uk_slabzone, slab, 874 NULL, SKIP_NONE); 875 keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, 876 flags); 877 ZONE_LOCK(zone); 878 return (NULL); 879 } 880 } 881 ZONE_LOCK(zone); 882 883 if (keg->uk_flags & UMA_ZONE_HASH) 884 UMA_HASH_INSERT(&keg->uk_hash, slab, mem); 885 886 keg->uk_pages += keg->uk_ppera; 887 keg->uk_free += keg->uk_ipers; 888 889 return (slab); 890} 891 892/* 893 * This function is intended to be used early on in place of page_alloc() so 894 * that we may use the boot time page cache to satisfy allocations before 895 * the VM is ready. 896 */ 897static void * 898startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 899{ 900 uma_keg_t keg; 901 902 keg = zone->uz_keg; 903 904 /* 905 * Check our small startup cache to see if it has pages remaining. 906 */ 907 mtx_lock(&uma_mtx); 908 if (uma_boot_free != 0) { 909 uma_slab_t tmps; 910 911 tmps = LIST_FIRST(&uma_boot_pages); 912 LIST_REMOVE(tmps, us_link); 913 uma_boot_free--; 914 mtx_unlock(&uma_mtx); 915 *pflag = tmps->us_flags; 916 return (tmps->us_data); 917 } 918 mtx_unlock(&uma_mtx); 919 if (booted == 0) 920 panic("UMA: Increase UMA_BOOT_PAGES"); 921 /* 922 * Now that we've booted reset these users to their real allocator. 923 */ 924#ifdef UMA_MD_SMALL_ALLOC 925 keg->uk_allocf = uma_small_alloc; 926#else 927 keg->uk_allocf = page_alloc; 928#endif 929 return keg->uk_allocf(zone, bytes, pflag, wait); 930} 931 932/* 933 * Allocates a number of pages from the system 934 * 935 * Arguments: 936 * zone Unused 937 * bytes The number of bytes requested 938 * wait Shall we wait? 939 * 940 * Returns: 941 * A pointer to the alloced memory or possibly 942 * NULL if M_NOWAIT is set. 943 */ 944static void * 945page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait) 946{ 947 void *p; /* Returned page */ 948 949 *pflag = UMA_SLAB_KMEM; 950 p = (void *) kmem_malloc(kmem_map, bytes, wait); 951 952 return (p); 953} 954 955/* 956 * Allocates a number of pages from within an object 957 * 958 * Arguments: 959 * zone Unused 960 * bytes The number of bytes requested 961 * wait Shall we wait? 962 * 963 * Returns: 964 * A pointer to the alloced memory or possibly 965 * NULL if M_NOWAIT is set. 966 */ 967static void * 968obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 969{ 970 vm_object_t object; 971 vm_offset_t retkva, zkva; 972 vm_page_t p; 973 int pages, startpages; 974 975 object = zone->uz_keg->uk_obj; 976 retkva = 0; 977 978 /* 979 * This looks a little weird since we're getting one page at a time. 980 */ 981 VM_OBJECT_LOCK(object); 982 p = TAILQ_LAST(&object->memq, pglist); 983 pages = p != NULL ? p->pindex + 1 : 0; 984 startpages = pages; 985 zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; 986 for (; bytes > 0; bytes -= PAGE_SIZE) { 987 p = vm_page_alloc(object, pages, 988 VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); 989 if (p == NULL) { 990 if (pages != startpages) 991 pmap_qremove(retkva, pages - startpages); 992 while (pages != startpages) { 993 pages--; 994 p = TAILQ_LAST(&object->memq, pglist); 995 vm_page_lock_queues(); 996 vm_page_unwire(p, 0); 997 vm_page_free(p); 998 vm_page_unlock_queues(); 999 } 1000 retkva = 0; 1001 goto done; 1002 } 1003 pmap_qenter(zkva, &p, 1); 1004 if (retkva == 0) 1005 retkva = zkva; 1006 zkva += PAGE_SIZE; 1007 pages += 1; 1008 } 1009done: 1010 VM_OBJECT_UNLOCK(object); 1011 *flags = UMA_SLAB_PRIV; 1012 1013 return ((void *)retkva); 1014} 1015 1016/* 1017 * Frees a number of pages to the system 1018 * 1019 * Arguments: 1020 * mem A pointer to the memory to be freed 1021 * size The size of the memory being freed 1022 * flags The original p->us_flags field 1023 * 1024 * Returns: 1025 * Nothing 1026 */ 1027static void 1028page_free(void *mem, int size, u_int8_t flags) 1029{ 1030 vm_map_t map; 1031 1032 if (flags & UMA_SLAB_KMEM) 1033 map = kmem_map; 1034 else 1035 panic("UMA: page_free used with invalid flags %d\n", flags); 1036 1037 kmem_free(map, (vm_offset_t)mem, size); 1038} 1039 1040/* 1041 * Zero fill initializer 1042 * 1043 * Arguments/Returns follow uma_init specifications 1044 */ 1045static int 1046zero_init(void *mem, int size, int flags) 1047{ 1048 bzero(mem, size); 1049 return (0); 1050} 1051 1052/* 1053 * Finish creating a small uma zone. This calculates ipers, and the zone size. 1054 * 1055 * Arguments 1056 * zone The zone we should initialize 1057 * 1058 * Returns 1059 * Nothing 1060 */ 1061static void 1062zone_small_init(uma_zone_t zone) 1063{ 1064 uma_keg_t keg; 1065 u_int rsize; 1066 u_int memused; 1067 u_int wastedspace; 1068 u_int shsize; 1069 1070 keg = zone->uz_keg; 1071 KASSERT(keg != NULL, ("Keg is null in zone_small_init")); 1072 rsize = keg->uk_size; 1073 1074 if (rsize < UMA_SMALLEST_UNIT) 1075 rsize = UMA_SMALLEST_UNIT; 1076 if (rsize & keg->uk_align) 1077 rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); 1078 1079 keg->uk_rsize = rsize; 1080 keg->uk_ppera = 1; 1081 1082 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1083 rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */ 1084 shsize = sizeof(struct uma_slab_refcnt); 1085 } else { 1086 rsize += UMA_FRITM_SZ; /* Account for linkage */ 1087 shsize = sizeof(struct uma_slab); 1088 } 1089 1090 keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize; 1091 KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0")); 1092 memused = keg->uk_ipers * rsize + shsize; 1093 wastedspace = UMA_SLAB_SIZE - memused; 1094 1095 /* 1096 * We can't do OFFPAGE if we're internal or if we've been 1097 * asked to not go to the VM for buckets. If we do this we 1098 * may end up going to the VM (kmem_map) for slabs which we 1099 * do not want to do if we're UMA_ZFLAG_CACHEONLY as a 1100 * result of UMA_ZONE_VM, which clearly forbids it. 1101 */ 1102 if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || 1103 (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) 1104 return; 1105 1106 if ((wastedspace >= UMA_MAX_WASTE) && 1107 (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) { 1108 keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize; 1109 KASSERT(keg->uk_ipers <= 255, 1110 ("zone_small_init: keg->uk_ipers too high!")); 1111#ifdef UMA_DEBUG 1112 printf("UMA decided we need offpage slab headers for " 1113 "zone: %s, calculated wastedspace = %d, " 1114 "maximum wasted space allowed = %d, " 1115 "calculated ipers = %d, " 1116 "new wasted space = %d\n", zone->uz_name, wastedspace, 1117 UMA_MAX_WASTE, keg->uk_ipers, 1118 UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize); 1119#endif 1120 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1121 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1122 keg->uk_flags |= UMA_ZONE_HASH; 1123 } 1124} 1125 1126/* 1127 * Finish creating a large (> UMA_SLAB_SIZE) uma zone. Just give in and do 1128 * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be 1129 * more complicated. 1130 * 1131 * Arguments 1132 * zone The zone we should initialize 1133 * 1134 * Returns 1135 * Nothing 1136 */ 1137static void 1138zone_large_init(uma_zone_t zone) 1139{ 1140 uma_keg_t keg; 1141 int pages; 1142 1143 keg = zone->uz_keg; 1144 1145 KASSERT(keg != NULL, ("Keg is null in zone_large_init")); 1146 KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, 1147 ("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone")); 1148 1149 pages = keg->uk_size / UMA_SLAB_SIZE; 1150 1151 /* Account for remainder */ 1152 if ((pages * UMA_SLAB_SIZE) < keg->uk_size) 1153 pages++; 1154 1155 keg->uk_ppera = pages; 1156 keg->uk_ipers = 1; 1157 1158 keg->uk_flags |= UMA_ZONE_OFFPAGE; 1159 if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) 1160 keg->uk_flags |= UMA_ZONE_HASH; 1161 1162 keg->uk_rsize = keg->uk_size; 1163} 1164 1165/* 1166 * Keg header ctor. This initializes all fields, locks, etc. And inserts 1167 * the keg onto the global keg list. 1168 * 1169 * Arguments/Returns follow uma_ctor specifications 1170 * udata Actually uma_kctor_args 1171 */ 1172static int 1173keg_ctor(void *mem, int size, void *udata, int flags) 1174{ 1175 struct uma_kctor_args *arg = udata; 1176 uma_keg_t keg = mem; 1177 uma_zone_t zone; 1178 1179 bzero(keg, size); 1180 keg->uk_size = arg->size; 1181 keg->uk_init = arg->uminit; 1182 keg->uk_fini = arg->fini; 1183 keg->uk_align = arg->align; 1184 keg->uk_free = 0; 1185 keg->uk_pages = 0; 1186 keg->uk_flags = arg->flags; 1187 keg->uk_allocf = page_alloc; 1188 keg->uk_freef = page_free; 1189 keg->uk_recurse = 0; 1190 keg->uk_slabzone = NULL; 1191 1192 /* 1193 * The master zone is passed to us at keg-creation time. 1194 */ 1195 zone = arg->zone; 1196 zone->uz_keg = keg; 1197 1198 if (arg->flags & UMA_ZONE_VM) 1199 keg->uk_flags |= UMA_ZFLAG_CACHEONLY; 1200 1201 if (arg->flags & UMA_ZONE_ZINIT) 1202 keg->uk_init = zero_init; 1203 1204 /* 1205 * The +UMA_FRITM_SZ added to uk_size is to account for the 1206 * linkage that is added to the size in zone_small_init(). If 1207 * we don't account for this here then we may end up in 1208 * zone_small_init() with a calculated 'ipers' of 0. 1209 */ 1210 if (keg->uk_flags & UMA_ZONE_REFCNT) { 1211 if ((keg->uk_size+UMA_FRITMREF_SZ) > 1212 (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt))) 1213 zone_large_init(zone); 1214 else 1215 zone_small_init(zone); 1216 } else { 1217 if ((keg->uk_size+UMA_FRITM_SZ) > 1218 (UMA_SLAB_SIZE - sizeof(struct uma_slab))) 1219 zone_large_init(zone); 1220 else 1221 zone_small_init(zone); 1222 } 1223 1224 if (keg->uk_flags & UMA_ZONE_OFFPAGE) { 1225 if (keg->uk_flags & UMA_ZONE_REFCNT) 1226 keg->uk_slabzone = slabrefzone; 1227 else 1228 keg->uk_slabzone = slabzone; 1229 } 1230 1231 /* 1232 * If we haven't booted yet we need allocations to go through the 1233 * startup cache until the vm is ready. 1234 */ 1235 if (keg->uk_ppera == 1) { 1236#ifdef UMA_MD_SMALL_ALLOC 1237 keg->uk_allocf = uma_small_alloc; 1238 keg->uk_freef = uma_small_free; 1239#endif 1240 if (booted == 0) 1241 keg->uk_allocf = startup_alloc; 1242 } 1243 1244 /* 1245 * Initialize keg's lock (shared among zones) through 1246 * Master zone 1247 */ 1248 zone->uz_lock = &keg->uk_lock; 1249 if (arg->flags & UMA_ZONE_MTXCLASS) 1250 ZONE_LOCK_INIT(zone, 1); 1251 else 1252 ZONE_LOCK_INIT(zone, 0); 1253 1254 /* 1255 * If we're putting the slab header in the actual page we need to 1256 * figure out where in each page it goes. This calculates a right 1257 * justified offset into the memory on an ALIGN_PTR boundary. 1258 */ 1259 if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { 1260 u_int totsize; 1261 1262 /* Size of the slab struct and free list */ 1263 if (keg->uk_flags & UMA_ZONE_REFCNT) 1264 totsize = sizeof(struct uma_slab_refcnt) + 1265 keg->uk_ipers * UMA_FRITMREF_SZ; 1266 else 1267 totsize = sizeof(struct uma_slab) + 1268 keg->uk_ipers * UMA_FRITM_SZ; 1269 1270 if (totsize & UMA_ALIGN_PTR) 1271 totsize = (totsize & ~UMA_ALIGN_PTR) + 1272 (UMA_ALIGN_PTR + 1); 1273 keg->uk_pgoff = UMA_SLAB_SIZE - totsize; 1274 1275 if (keg->uk_flags & UMA_ZONE_REFCNT) 1276 totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt) 1277 + keg->uk_ipers * UMA_FRITMREF_SZ; 1278 else 1279 totsize = keg->uk_pgoff + sizeof(struct uma_slab) 1280 + keg->uk_ipers * UMA_FRITM_SZ; 1281 1282 /* 1283 * The only way the following is possible is if with our 1284 * UMA_ALIGN_PTR adjustments we are now bigger than 1285 * UMA_SLAB_SIZE. I haven't checked whether this is 1286 * mathematically possible for all cases, so we make 1287 * sure here anyway. 1288 */ 1289 if (totsize > UMA_SLAB_SIZE) { 1290 printf("zone %s ipers %d rsize %d size %d\n", 1291 zone->uz_name, keg->uk_ipers, keg->uk_rsize, 1292 keg->uk_size); 1293 panic("UMA slab won't fit.\n"); 1294 } 1295 } 1296 1297 if (keg->uk_flags & UMA_ZONE_HASH) 1298 hash_alloc(&keg->uk_hash); 1299 1300#ifdef UMA_DEBUG 1301 printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n", 1302 zone->uz_name, zone, 1303 keg->uk_size, keg->uk_ipers, 1304 keg->uk_ppera, keg->uk_pgoff); 1305#endif 1306 1307 LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); 1308 1309 mtx_lock(&uma_mtx); 1310 LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); 1311 mtx_unlock(&uma_mtx); 1312 return (0); 1313} 1314 1315/* 1316 * Zone header ctor. This initializes all fields, locks, etc. 1317 * 1318 * Arguments/Returns follow uma_ctor specifications 1319 * udata Actually uma_zctor_args 1320 */ 1321 1322static int 1323zone_ctor(void *mem, int size, void *udata, int flags) 1324{ 1325 struct uma_zctor_args *arg = udata; 1326 uma_zone_t zone = mem; 1327 uma_zone_t z; 1328 uma_keg_t keg; 1329 1330 bzero(zone, size); 1331 zone->uz_name = arg->name; 1332 zone->uz_ctor = arg->ctor; 1333 zone->uz_dtor = arg->dtor; 1334 zone->uz_init = NULL; 1335 zone->uz_fini = NULL; 1336 zone->uz_allocs = 0; 1337 zone->uz_frees = 0; 1338 zone->uz_fills = zone->uz_count = 0; 1339 1340 if (arg->flags & UMA_ZONE_SECONDARY) { 1341 KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); 1342 keg = arg->keg; 1343 zone->uz_keg = keg; 1344 zone->uz_init = arg->uminit; 1345 zone->uz_fini = arg->fini; 1346 zone->uz_lock = &keg->uk_lock; 1347 mtx_lock(&uma_mtx); 1348 ZONE_LOCK(zone); 1349 keg->uk_flags |= UMA_ZONE_SECONDARY; 1350 LIST_FOREACH(z, &keg->uk_zones, uz_link) { 1351 if (LIST_NEXT(z, uz_link) == NULL) { 1352 LIST_INSERT_AFTER(z, zone, uz_link); 1353 break; 1354 } 1355 } 1356 ZONE_UNLOCK(zone); 1357 mtx_unlock(&uma_mtx); 1358 } else if (arg->keg == NULL) { 1359 if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini, 1360 arg->align, arg->flags) == NULL) 1361 return (ENOMEM); 1362 } else { 1363 struct uma_kctor_args karg; 1364 int error; 1365 1366 /* We should only be here from uma_startup() */ 1367 karg.size = arg->size; 1368 karg.uminit = arg->uminit; 1369 karg.fini = arg->fini; 1370 karg.align = arg->align; 1371 karg.flags = arg->flags; 1372 karg.zone = zone; 1373 error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, 1374 flags); 1375 if (error) 1376 return (error); 1377 } 1378 keg = zone->uz_keg; 1379 zone->uz_lock = &keg->uk_lock; 1380 1381 /* 1382 * Some internal zones don't have room allocated for the per cpu 1383 * caches. If we're internal, bail out here. 1384 */ 1385 if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { 1386 KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0, 1387 ("Secondary zone requested UMA_ZFLAG_INTERNAL")); 1388 return (0); 1389 } 1390 1391 if (keg->uk_flags & UMA_ZONE_MAXBUCKET) 1392 zone->uz_count = BUCKET_MAX; 1393 else if (keg->uk_ipers <= BUCKET_MAX) 1394 zone->uz_count = keg->uk_ipers; 1395 else 1396 zone->uz_count = BUCKET_MAX; 1397 return (0); 1398} 1399 1400/* 1401 * Keg header dtor. This frees all data, destroys locks, frees the hash 1402 * table and removes the keg from the global list. 1403 * 1404 * Arguments/Returns follow uma_dtor specifications 1405 * udata unused 1406 */ 1407static void 1408keg_dtor(void *arg, int size, void *udata) 1409{ 1410 uma_keg_t keg; 1411 1412 keg = (uma_keg_t)arg; 1413 mtx_lock(&keg->uk_lock); 1414 if (keg->uk_free != 0) { 1415 printf("Freed UMA keg was not empty (%d items). " 1416 " Lost %d pages of memory.\n", 1417 keg->uk_free, keg->uk_pages); 1418 } 1419 mtx_unlock(&keg->uk_lock); 1420 1421 if (keg->uk_flags & UMA_ZONE_HASH) 1422 hash_free(&keg->uk_hash); 1423 1424 mtx_destroy(&keg->uk_lock); 1425} 1426 1427/* 1428 * Zone header dtor. 1429 * 1430 * Arguments/Returns follow uma_dtor specifications 1431 * udata unused 1432 */ 1433static void 1434zone_dtor(void *arg, int size, void *udata) 1435{ 1436 uma_zone_t zone; 1437 uma_keg_t keg; 1438 1439 zone = (uma_zone_t)arg; 1440 keg = zone->uz_keg; 1441 1442 if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) 1443 cache_drain(zone); 1444 1445 mtx_lock(&uma_mtx); 1446 zone_drain(zone); 1447 if (keg->uk_flags & UMA_ZONE_SECONDARY) { 1448 LIST_REMOVE(zone, uz_link); 1449 /* 1450 * XXX there are some races here where 1451 * the zone can be drained but zone lock 1452 * released and then refilled before we 1453 * remove it... we dont care for now 1454 */ 1455 ZONE_LOCK(zone); 1456 if (LIST_EMPTY(&keg->uk_zones)) 1457 keg->uk_flags &= ~UMA_ZONE_SECONDARY; 1458 ZONE_UNLOCK(zone); 1459 mtx_unlock(&uma_mtx); 1460 } else { 1461 LIST_REMOVE(keg, uk_link); 1462 LIST_REMOVE(zone, uz_link); 1463 mtx_unlock(&uma_mtx); 1464 uma_zfree_internal(kegs, keg, NULL, SKIP_NONE); 1465 } 1466 zone->uz_keg = NULL; 1467} 1468 1469/* 1470 * Traverses every zone in the system and calls a callback 1471 * 1472 * Arguments: 1473 * zfunc A pointer to a function which accepts a zone 1474 * as an argument. 1475 * 1476 * Returns: 1477 * Nothing 1478 */ 1479static void 1480zone_foreach(void (*zfunc)(uma_zone_t)) 1481{ 1482 uma_keg_t keg; 1483 uma_zone_t zone; 1484 1485 mtx_lock(&uma_mtx); 1486 LIST_FOREACH(keg, &uma_kegs, uk_link) { 1487 LIST_FOREACH(zone, &keg->uk_zones, uz_link) 1488 zfunc(zone); 1489 } 1490 mtx_unlock(&uma_mtx); 1491} 1492 1493/* Public functions */ 1494/* See uma.h */ 1495void 1496uma_startup(void *bootmem) 1497{ 1498 struct uma_zctor_args args; 1499 uma_slab_t slab; 1500 u_int slabsize; 1501 u_int objsize, totsize, wsize; 1502 int i; 1503 1504#ifdef UMA_DEBUG 1505 printf("Creating uma keg headers zone and keg.\n"); 1506#endif 1507 /* 1508 * The general UMA lock is a recursion-allowed lock because 1509 * there is a code path where, while we're still configured 1510 * to use startup_alloc() for backend page allocations, we 1511 * may end up in uma_reclaim() which calls zone_foreach(zone_drain), 1512 * which grabs uma_mtx, only to later call into startup_alloc() 1513 * because while freeing we needed to allocate a bucket. Since 1514 * startup_alloc() also takes uma_mtx, we need to be able to 1515 * recurse on it. 1516 */ 1517 mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE); 1518 1519 /* 1520 * Figure out the maximum number of items-per-slab we'll have if 1521 * we're using the OFFPAGE slab header to track free items, given 1522 * all possible object sizes and the maximum desired wastage 1523 * (UMA_MAX_WASTE). 1524 * 1525 * We iterate until we find an object size for 1526 * which the calculated wastage in zone_small_init() will be 1527 * enough to warrant OFFPAGE. Since wastedspace versus objsize 1528 * is an overall increasing see-saw function, we find the smallest 1529 * objsize such that the wastage is always acceptable for objects 1530 * with that objsize or smaller. Since a smaller objsize always 1531 * generates a larger possible uma_max_ipers, we use this computed 1532 * objsize to calculate the largest ipers possible. Since the 1533 * ipers calculated for OFFPAGE slab headers is always larger than 1534 * the ipers initially calculated in zone_small_init(), we use 1535 * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to 1536 * obtain the maximum ipers possible for offpage slab headers. 1537 * 1538 * It should be noted that ipers versus objsize is an inversly 1539 * proportional function which drops off rather quickly so as 1540 * long as our UMA_MAX_WASTE is such that the objsize we calculate 1541 * falls into the portion of the inverse relation AFTER the steep 1542 * falloff, then uma_max_ipers shouldn't be too high (~10 on i386). 1543 * 1544 * Note that we have 8-bits (1 byte) to use as a freelist index 1545 * inside the actual slab header itself and this is enough to 1546 * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized 1547 * object with offpage slab header would have ipers = 1548 * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is 1549 * 1 greater than what our byte-integer freelist index can 1550 * accomodate, but we know that this situation never occurs as 1551 * for UMA_SMALLEST_UNIT-sized objects, we will never calculate 1552 * that we need to go to offpage slab headers. Or, if we do, 1553 * then we trap that condition below and panic in the INVARIANTS case. 1554 */ 1555 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE; 1556 totsize = wsize; 1557 objsize = UMA_SMALLEST_UNIT; 1558 while (totsize >= wsize) { 1559 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / 1560 (objsize + UMA_FRITM_SZ); 1561 totsize *= (UMA_FRITM_SZ + objsize); 1562 objsize++; 1563 } 1564 if (objsize > UMA_SMALLEST_UNIT) 1565 objsize--; 1566 uma_max_ipers = UMA_SLAB_SIZE / objsize; 1567 1568 wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE; 1569 totsize = wsize; 1570 objsize = UMA_SMALLEST_UNIT; 1571 while (totsize >= wsize) { 1572 totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) / 1573 (objsize + UMA_FRITMREF_SZ); 1574 totsize *= (UMA_FRITMREF_SZ + objsize); 1575 objsize++; 1576 } 1577 if (objsize > UMA_SMALLEST_UNIT) 1578 objsize--; 1579 uma_max_ipers_ref = UMA_SLAB_SIZE / objsize; 1580 1581 KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255), 1582 ("uma_startup: calculated uma_max_ipers values too large!")); 1583 1584#ifdef UMA_DEBUG 1585 printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers); 1586 printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n", 1587 uma_max_ipers_ref); 1588#endif 1589 1590 /* "manually" create the initial zone */ 1591 args.name = "UMA Kegs"; 1592 args.size = sizeof(struct uma_keg); 1593 args.ctor = keg_ctor; 1594 args.dtor = keg_dtor; 1595 args.uminit = zero_init; 1596 args.fini = NULL; 1597 args.keg = &masterkeg; 1598 args.align = 32 - 1; 1599 args.flags = UMA_ZFLAG_INTERNAL; 1600 /* The initial zone has no Per cpu queues so it's smaller */ 1601 zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK); 1602 1603#ifdef UMA_DEBUG 1604 printf("Filling boot free list.\n"); 1605#endif 1606 for (i = 0; i < UMA_BOOT_PAGES; i++) { 1607 slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE)); 1608 slab->us_data = (u_int8_t *)slab; 1609 slab->us_flags = UMA_SLAB_BOOT; 1610 LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link); 1611 uma_boot_free++; 1612 } 1613 1614#ifdef UMA_DEBUG 1615 printf("Creating uma zone headers zone and keg.\n"); 1616#endif 1617 args.name = "UMA Zones"; 1618 args.size = sizeof(struct uma_zone) + 1619 (sizeof(struct uma_cache) * (mp_maxid + 1)); 1620 args.ctor = zone_ctor; 1621 args.dtor = zone_dtor; 1622 args.uminit = zero_init; 1623 args.fini = NULL; 1624 args.keg = NULL; 1625 args.align = 32 - 1; 1626 args.flags = UMA_ZFLAG_INTERNAL; 1627 /* The initial zone has no Per cpu queues so it's smaller */ 1628 zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK); 1629 1630#ifdef UMA_DEBUG 1631 printf("Initializing pcpu cache locks.\n"); 1632#endif 1633#ifdef UMA_DEBUG 1634 printf("Creating slab and hash zones.\n"); 1635#endif 1636 1637 /* 1638 * This is the max number of free list items we'll have with 1639 * offpage slabs. 1640 */ 1641 slabsize = uma_max_ipers * UMA_FRITM_SZ; 1642 slabsize += sizeof(struct uma_slab); 1643 1644 /* Now make a zone for slab headers */ 1645 slabzone = uma_zcreate("UMA Slabs", 1646 slabsize, 1647 NULL, NULL, NULL, NULL, 1648 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1649 1650 /* 1651 * We also create a zone for the bigger slabs with reference 1652 * counts in them, to accomodate UMA_ZONE_REFCNT zones. 1653 */ 1654 slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ; 1655 slabsize += sizeof(struct uma_slab_refcnt); 1656 slabrefzone = uma_zcreate("UMA RCntSlabs", 1657 slabsize, 1658 NULL, NULL, NULL, NULL, 1659 UMA_ALIGN_PTR, 1660 UMA_ZFLAG_INTERNAL); 1661 1662 hashzone = uma_zcreate("UMA Hash", 1663 sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, 1664 NULL, NULL, NULL, NULL, 1665 UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); 1666 1667 bucket_init(); 1668 1669#ifdef UMA_MD_SMALL_ALLOC 1670 booted = 1; 1671#endif 1672 1673#ifdef UMA_DEBUG 1674 printf("UMA startup complete.\n"); 1675#endif 1676} 1677 1678/* see uma.h */ 1679void 1680uma_startup2(void) 1681{ 1682 booted = 1; 1683 bucket_enable(); 1684#ifdef UMA_DEBUG 1685 printf("UMA startup2 complete.\n"); 1686#endif 1687} 1688 1689/* 1690 * Initialize our callout handle 1691 * 1692 */ 1693 1694static void 1695uma_startup3(void) 1696{ 1697#ifdef UMA_DEBUG 1698 printf("Starting callout.\n"); 1699#endif 1700 callout_init(&uma_callout, CALLOUT_MPSAFE); 1701 callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); 1702#ifdef UMA_DEBUG 1703 printf("UMA startup3 complete.\n"); 1704#endif 1705} 1706 1707static uma_zone_t 1708uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, 1709 int align, u_int16_t flags) 1710{ 1711 struct uma_kctor_args args; 1712 1713 args.size = size; 1714 args.uminit = uminit; 1715 args.fini = fini; 1716 args.align = align; 1717 args.flags = flags; 1718 args.zone = zone; 1719 return (uma_zalloc_internal(kegs, &args, M_WAITOK)); 1720} 1721 1722/* See uma.h */ 1723uma_zone_t 1724uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor, 1725 uma_init uminit, uma_fini fini, int align, u_int16_t flags) 1726 1727{ 1728 struct uma_zctor_args args; 1729 1730 /* This stuff is essential for the zone ctor */ 1731 args.name = name; 1732 args.size = size; 1733 args.ctor = ctor; 1734 args.dtor = dtor; 1735 args.uminit = uminit; 1736 args.fini = fini; 1737 args.align = align; 1738 args.flags = flags; 1739 args.keg = NULL; 1740 1741 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1742} 1743 1744/* See uma.h */ 1745uma_zone_t 1746uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, 1747 uma_init zinit, uma_fini zfini, uma_zone_t master) 1748{ 1749 struct uma_zctor_args args; 1750 1751 args.name = name; 1752 args.size = master->uz_keg->uk_size; 1753 args.ctor = ctor; 1754 args.dtor = dtor; 1755 args.uminit = zinit; 1756 args.fini = zfini; 1757 args.align = master->uz_keg->uk_align; 1758 args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY; 1759 args.keg = master->uz_keg; 1760 1761 return (uma_zalloc_internal(zones, &args, M_WAITOK)); 1762} 1763 1764/* See uma.h */ 1765void 1766uma_zdestroy(uma_zone_t zone) 1767{ 1768 uma_zfree_internal(zones, zone, NULL, SKIP_NONE); 1769} 1770 1771/* See uma.h */ 1772void * 1773uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) 1774{ 1775 void *item; 1776 uma_cache_t cache; 1777 uma_bucket_t bucket; 1778 int cpu; 1779 int badness; 1780 1781 /* This is the fast path allocation */ 1782#ifdef UMA_DEBUG_ALLOC_1 1783 printf("Allocating one item from %s(%p)\n", zone->uz_name, zone); 1784#endif 1785 CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread, 1786 zone->uz_name, flags); 1787 1788 if (!(flags & M_NOWAIT)) { 1789 KASSERT(curthread->td_intr_nesting_level == 0, 1790 ("malloc(M_WAITOK) in interrupt context")); 1791 if (nosleepwithlocks) { 1792#ifdef WITNESS 1793 badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, 1794 NULL, 1795 "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT", 1796 zone->uz_name); 1797#else 1798 badness = 1; 1799#endif 1800 } else { 1801 badness = 0; 1802#ifdef WITNESS 1803 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1804 "malloc(M_WAITOK) of \"%s\"", zone->uz_name); 1805#endif 1806 } 1807 if (badness) { 1808 flags &= ~M_WAITOK; 1809 flags |= M_NOWAIT; 1810 } 1811 } 1812 1813 /* 1814 * If possible, allocate from the per-CPU cache. There are two 1815 * requirements for safe access to the per-CPU cache: (1) the thread 1816 * accessing the cache must not be preempted or yield during access, 1817 * and (2) the thread must not migrate CPUs without switching which 1818 * cache it accesses. We rely on a critical section to prevent 1819 * preemption and migration. We release the critical section in 1820 * order to acquire the zone mutex if we are unable to allocate from 1821 * the current cache; when we re-acquire the critical section, we 1822 * must detect and handle migration if it has occurred. 1823 */ 1824zalloc_restart: 1825 critical_enter(); 1826 cpu = curcpu; 1827 cache = &zone->uz_cpu[cpu]; 1828 1829zalloc_start: 1830 bucket = cache->uc_allocbucket; 1831 1832 if (bucket) { 1833 if (bucket->ub_cnt > 0) { 1834 bucket->ub_cnt--; 1835 item = bucket->ub_bucket[bucket->ub_cnt]; 1836#ifdef INVARIANTS 1837 bucket->ub_bucket[bucket->ub_cnt] = NULL; 1838#endif 1839 KASSERT(item != NULL, 1840 ("uma_zalloc: Bucket pointer mangled.")); 1841 cache->uc_allocs++; 1842 critical_exit(); 1843#ifdef INVARIANTS 1844 ZONE_LOCK(zone); 1845 uma_dbg_alloc(zone, NULL, item); 1846 ZONE_UNLOCK(zone); 1847#endif 1848 if (zone->uz_ctor != NULL) { 1849 if (zone->uz_ctor(item, zone->uz_keg->uk_size, 1850 udata, flags) != 0) { 1851 uma_zfree_internal(zone, item, udata, 1852 SKIP_DTOR); 1853 return (NULL); 1854 } 1855 } 1856 if (flags & M_ZERO) 1857 bzero(item, zone->uz_keg->uk_size); 1858 return (item); 1859 } else if (cache->uc_freebucket) { 1860 /* 1861 * We have run out of items in our allocbucket. 1862 * See if we can switch with our free bucket. 1863 */ 1864 if (cache->uc_freebucket->ub_cnt > 0) { 1865#ifdef UMA_DEBUG_ALLOC 1866 printf("uma_zalloc: Swapping empty with" 1867 " alloc.\n"); 1868#endif 1869 bucket = cache->uc_freebucket; 1870 cache->uc_freebucket = cache->uc_allocbucket; 1871 cache->uc_allocbucket = bucket; 1872 1873 goto zalloc_start; 1874 } 1875 } 1876 } 1877 /* 1878 * Attempt to retrieve the item from the per-CPU cache has failed, so 1879 * we must go back to the zone. This requires the zone lock, so we 1880 * must drop the critical section, then re-acquire it when we go back 1881 * to the cache. Since the critical section is released, we may be 1882 * preempted or migrate. As such, make sure not to maintain any 1883 * thread-local state specific to the cache from prior to releasing 1884 * the critical section. 1885 */ 1886 critical_exit(); 1887 ZONE_LOCK(zone); 1888 critical_enter(); 1889 cpu = curcpu; 1890 cache = &zone->uz_cpu[cpu]; 1891 bucket = cache->uc_allocbucket; 1892 if (bucket != NULL) { 1893 if (bucket->ub_cnt > 0) { 1894 ZONE_UNLOCK(zone); 1895 goto zalloc_start; 1896 } 1897 bucket = cache->uc_freebucket; 1898 if (bucket != NULL && bucket->ub_cnt > 0) { 1899 ZONE_UNLOCK(zone); 1900 goto zalloc_start; 1901 } 1902 } 1903 1904 /* Since we have locked the zone we may as well send back our stats */ 1905 zone->uz_allocs += cache->uc_allocs; 1906 cache->uc_allocs = 0; 1907 zone->uz_frees += cache->uc_frees; 1908 cache->uc_frees = 0; 1909 1910 /* Our old one is now a free bucket */ 1911 if (cache->uc_allocbucket) { 1912 KASSERT(cache->uc_allocbucket->ub_cnt == 0, 1913 ("uma_zalloc_arg: Freeing a non free bucket.")); 1914 LIST_INSERT_HEAD(&zone->uz_free_bucket, 1915 cache->uc_allocbucket, ub_link); 1916 cache->uc_allocbucket = NULL; 1917 } 1918 1919 /* Check the free list for a new alloc bucket */ 1920 if ((bucket = LIST_FIRST(&zone->uz_full_bucket)) != NULL) { 1921 KASSERT(bucket->ub_cnt != 0, 1922 ("uma_zalloc_arg: Returning an empty bucket.")); 1923 1924 LIST_REMOVE(bucket, ub_link); 1925 cache->uc_allocbucket = bucket; 1926 ZONE_UNLOCK(zone); 1927 goto zalloc_start; 1928 } 1929 /* We are no longer associated with this CPU. */ 1930 critical_exit(); 1931 1932 /* Bump up our uz_count so we get here less */ 1933 if (zone->uz_count < BUCKET_MAX) 1934 zone->uz_count++; 1935 1936 /* 1937 * Now lets just fill a bucket and put it on the free list. If that 1938 * works we'll restart the allocation from the begining. 1939 */ 1940 if (uma_zalloc_bucket(zone, flags)) { 1941 ZONE_UNLOCK(zone); 1942 goto zalloc_restart; 1943 } 1944 ZONE_UNLOCK(zone); 1945 /* 1946 * We may not be able to get a bucket so return an actual item. 1947 */ 1948#ifdef UMA_DEBUG 1949 printf("uma_zalloc_arg: Bucketzone returned NULL\n"); 1950#endif 1951 1952 return (uma_zalloc_internal(zone, udata, flags)); 1953} 1954 1955static uma_slab_t 1956uma_zone_slab(uma_zone_t zone, int flags) 1957{ 1958 uma_slab_t slab; 1959 uma_keg_t keg; 1960 1961 keg = zone->uz_keg; 1962 1963 /* 1964 * This is to prevent us from recursively trying to allocate 1965 * buckets. The problem is that if an allocation forces us to 1966 * grab a new bucket we will call page_alloc, which will go off 1967 * and cause the vm to allocate vm_map_entries. If we need new 1968 * buckets there too we will recurse in kmem_alloc and bad 1969 * things happen. So instead we return a NULL bucket, and make 1970 * the code that allocates buckets smart enough to deal with it 1971 * 1972 * XXX: While we want this protection for the bucket zones so that 1973 * recursion from the VM is handled (and the calling code that 1974 * allocates buckets knows how to deal with it), we do not want 1975 * to prevent allocation from the slab header zones (slabzone 1976 * and slabrefzone) if uk_recurse is not zero for them. The 1977 * reason is that it could lead to NULL being returned for 1978 * slab header allocations even in the M_WAITOK case, and the 1979 * caller can't handle that. 1980 */ 1981 if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0) 1982 if ((zone != slabzone) && (zone != slabrefzone)) 1983 return (NULL); 1984 1985 slab = NULL; 1986 1987 for (;;) { 1988 /* 1989 * Find a slab with some space. Prefer slabs that are partially 1990 * used over those that are totally full. This helps to reduce 1991 * fragmentation. 1992 */ 1993 if (keg->uk_free != 0) { 1994 if (!LIST_EMPTY(&keg->uk_part_slab)) { 1995 slab = LIST_FIRST(&keg->uk_part_slab); 1996 } else { 1997 slab = LIST_FIRST(&keg->uk_free_slab); 1998 LIST_REMOVE(slab, us_link); 1999 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, 2000 us_link); 2001 } 2002 return (slab); 2003 } 2004 2005 /* 2006 * M_NOVM means don't ask at all! 2007 */ 2008 if (flags & M_NOVM) 2009 break; 2010 2011 if (keg->uk_maxpages && 2012 keg->uk_pages >= keg->uk_maxpages) { 2013 keg->uk_flags |= UMA_ZFLAG_FULL; 2014 2015 if (flags & M_NOWAIT) 2016 break; 2017 else 2018 msleep(keg, &keg->uk_lock, PVM, 2019 "zonelimit", 0); 2020 continue; 2021 } 2022 keg->uk_recurse++; 2023 slab = slab_zalloc(zone, flags); 2024 keg->uk_recurse--; 2025 2026 /* 2027 * If we got a slab here it's safe to mark it partially used 2028 * and return. We assume that the caller is going to remove 2029 * at least one item. 2030 */ 2031 if (slab) { 2032 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2033 return (slab); 2034 } 2035 /* 2036 * We might not have been able to get a slab but another cpu 2037 * could have while we were unlocked. Check again before we 2038 * fail. 2039 */ 2040 if (flags & M_NOWAIT) 2041 flags |= M_NOVM; 2042 } 2043 return (slab); 2044} 2045 2046static void * 2047uma_slab_alloc(uma_zone_t zone, uma_slab_t slab) 2048{ 2049 uma_keg_t keg; 2050 uma_slabrefcnt_t slabref; 2051 void *item; 2052 u_int8_t freei; 2053 2054 keg = zone->uz_keg; 2055 2056 freei = slab->us_firstfree; 2057 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2058 slabref = (uma_slabrefcnt_t)slab; 2059 slab->us_firstfree = slabref->us_freelist[freei].us_item; 2060 } else { 2061 slab->us_firstfree = slab->us_freelist[freei].us_item; 2062 } 2063 item = slab->us_data + (keg->uk_rsize * freei); 2064 2065 slab->us_freecount--; 2066 keg->uk_free--; 2067#ifdef INVARIANTS 2068 uma_dbg_alloc(zone, slab, item); 2069#endif 2070 /* Move this slab to the full list */ 2071 if (slab->us_freecount == 0) { 2072 LIST_REMOVE(slab, us_link); 2073 LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link); 2074 } 2075 2076 return (item); 2077} 2078 2079static int 2080uma_zalloc_bucket(uma_zone_t zone, int flags) 2081{ 2082 uma_bucket_t bucket; 2083 uma_slab_t slab; 2084 int16_t saved; 2085 int max, origflags = flags; 2086 2087 /* 2088 * Try this zone's free list first so we don't allocate extra buckets. 2089 */ 2090 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2091 KASSERT(bucket->ub_cnt == 0, 2092 ("uma_zalloc_bucket: Bucket on free list is not empty.")); 2093 LIST_REMOVE(bucket, ub_link); 2094 } else { 2095 int bflags; 2096 2097 bflags = (flags & ~M_ZERO); 2098 if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2099 bflags |= M_NOVM; 2100 2101 ZONE_UNLOCK(zone); 2102 bucket = bucket_alloc(zone->uz_count, bflags); 2103 ZONE_LOCK(zone); 2104 } 2105 2106 if (bucket == NULL) 2107 return (0); 2108 2109#ifdef SMP 2110 /* 2111 * This code is here to limit the number of simultaneous bucket fills 2112 * for any given zone to the number of per cpu caches in this zone. This 2113 * is done so that we don't allocate more memory than we really need. 2114 */ 2115 if (zone->uz_fills >= mp_ncpus) 2116 goto done; 2117 2118#endif 2119 zone->uz_fills++; 2120 2121 max = MIN(bucket->ub_entries, zone->uz_count); 2122 /* Try to keep the buckets totally full */ 2123 saved = bucket->ub_cnt; 2124 while (bucket->ub_cnt < max && 2125 (slab = uma_zone_slab(zone, flags)) != NULL) { 2126 while (slab->us_freecount && bucket->ub_cnt < max) { 2127 bucket->ub_bucket[bucket->ub_cnt++] = 2128 uma_slab_alloc(zone, slab); 2129 } 2130 2131 /* Don't block on the next fill */ 2132 flags |= M_NOWAIT; 2133 } 2134 2135 /* 2136 * We unlock here because we need to call the zone's init. 2137 * It should be safe to unlock because the slab dealt with 2138 * above is already on the appropriate list within the keg 2139 * and the bucket we filled is not yet on any list, so we 2140 * own it. 2141 */ 2142 if (zone->uz_init != NULL) { 2143 int i; 2144 2145 ZONE_UNLOCK(zone); 2146 for (i = saved; i < bucket->ub_cnt; i++) 2147 if (zone->uz_init(bucket->ub_bucket[i], 2148 zone->uz_keg->uk_size, origflags) != 0) 2149 break; 2150 /* 2151 * If we couldn't initialize the whole bucket, put the 2152 * rest back onto the freelist. 2153 */ 2154 if (i != bucket->ub_cnt) { 2155 int j; 2156 2157 for (j = i; j < bucket->ub_cnt; j++) { 2158 uma_zfree_internal(zone, bucket->ub_bucket[j], 2159 NULL, SKIP_FINI); 2160#ifdef INVARIANTS 2161 bucket->ub_bucket[j] = NULL; 2162#endif 2163 } 2164 bucket->ub_cnt = i; 2165 } 2166 ZONE_LOCK(zone); 2167 } 2168 2169 zone->uz_fills--; 2170 if (bucket->ub_cnt != 0) { 2171 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2172 bucket, ub_link); 2173 return (1); 2174 } 2175#ifdef SMP 2176done: 2177#endif 2178 bucket_free(bucket); 2179 2180 return (0); 2181} 2182/* 2183 * Allocates an item for an internal zone 2184 * 2185 * Arguments 2186 * zone The zone to alloc for. 2187 * udata The data to be passed to the constructor. 2188 * flags M_WAITOK, M_NOWAIT, M_ZERO. 2189 * 2190 * Returns 2191 * NULL if there is no memory and M_NOWAIT is set 2192 * An item if successful 2193 */ 2194 2195static void * 2196uma_zalloc_internal(uma_zone_t zone, void *udata, int flags) 2197{ 2198 uma_keg_t keg; 2199 uma_slab_t slab; 2200 void *item; 2201 2202 item = NULL; 2203 keg = zone->uz_keg; 2204 2205#ifdef UMA_DEBUG_ALLOC 2206 printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone); 2207#endif 2208 ZONE_LOCK(zone); 2209 2210 slab = uma_zone_slab(zone, flags); 2211 if (slab == NULL) { 2212 ZONE_UNLOCK(zone); 2213 return (NULL); 2214 } 2215 2216 item = uma_slab_alloc(zone, slab); 2217 2218 zone->uz_allocs++; 2219 2220 ZONE_UNLOCK(zone); 2221 2222 /* 2223 * We have to call both the zone's init (not the keg's init) 2224 * and the zone's ctor. This is because the item is going from 2225 * a keg slab directly to the user, and the user is expecting it 2226 * to be both zone-init'd as well as zone-ctor'd. 2227 */ 2228 if (zone->uz_init != NULL) { 2229 if (zone->uz_init(item, keg->uk_size, flags) != 0) { 2230 uma_zfree_internal(zone, item, udata, SKIP_FINI); 2231 return (NULL); 2232 } 2233 } 2234 if (zone->uz_ctor != NULL) { 2235 if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) { 2236 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2237 return (NULL); 2238 } 2239 } 2240 if (flags & M_ZERO) 2241 bzero(item, keg->uk_size); 2242 2243 return (item); 2244} 2245 2246/* See uma.h */ 2247void 2248uma_zfree_arg(uma_zone_t zone, void *item, void *udata) 2249{ 2250 uma_keg_t keg; 2251 uma_cache_t cache; 2252 uma_bucket_t bucket; 2253 int bflags; 2254 int cpu; 2255 2256 keg = zone->uz_keg; 2257 2258#ifdef UMA_DEBUG_ALLOC_1 2259 printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone); 2260#endif 2261 CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, 2262 zone->uz_name); 2263 2264 if (zone->uz_dtor) 2265 zone->uz_dtor(item, keg->uk_size, udata); 2266#ifdef INVARIANTS 2267 ZONE_LOCK(zone); 2268 if (keg->uk_flags & UMA_ZONE_MALLOC) 2269 uma_dbg_free(zone, udata, item); 2270 else 2271 uma_dbg_free(zone, NULL, item); 2272 ZONE_UNLOCK(zone); 2273#endif 2274 /* 2275 * The race here is acceptable. If we miss it we'll just have to wait 2276 * a little longer for the limits to be reset. 2277 */ 2278 if (keg->uk_flags & UMA_ZFLAG_FULL) 2279 goto zfree_internal; 2280 2281 /* 2282 * If possible, free to the per-CPU cache. There are two 2283 * requirements for safe access to the per-CPU cache: (1) the thread 2284 * accessing the cache must not be preempted or yield during access, 2285 * and (2) the thread must not migrate CPUs without switching which 2286 * cache it accesses. We rely on a critical section to prevent 2287 * preemption and migration. We release the critical section in 2288 * order to acquire the zone mutex if we are unable to free to the 2289 * current cache; when we re-acquire the critical section, we must 2290 * detect and handle migration if it has occurred. 2291 */ 2292zfree_restart: 2293 critical_enter(); 2294 cpu = curcpu; 2295 cache = &zone->uz_cpu[cpu]; 2296 2297zfree_start: 2298 bucket = cache->uc_freebucket; 2299 2300 if (bucket) { 2301 /* 2302 * Do we have room in our bucket? It is OK for this uz count 2303 * check to be slightly out of sync. 2304 */ 2305 2306 if (bucket->ub_cnt < bucket->ub_entries) { 2307 KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, 2308 ("uma_zfree: Freeing to non free bucket index.")); 2309 bucket->ub_bucket[bucket->ub_cnt] = item; 2310 bucket->ub_cnt++; 2311 cache->uc_frees++; 2312 critical_exit(); 2313 return; 2314 } else if (cache->uc_allocbucket) { 2315#ifdef UMA_DEBUG_ALLOC 2316 printf("uma_zfree: Swapping buckets.\n"); 2317#endif 2318 /* 2319 * We have run out of space in our freebucket. 2320 * See if we can switch with our alloc bucket. 2321 */ 2322 if (cache->uc_allocbucket->ub_cnt < 2323 cache->uc_freebucket->ub_cnt) { 2324 bucket = cache->uc_freebucket; 2325 cache->uc_freebucket = cache->uc_allocbucket; 2326 cache->uc_allocbucket = bucket; 2327 goto zfree_start; 2328 } 2329 } 2330 } 2331 /* 2332 * We can get here for two reasons: 2333 * 2334 * 1) The buckets are NULL 2335 * 2) The alloc and free buckets are both somewhat full. 2336 * 2337 * We must go back the zone, which requires acquiring the zone lock, 2338 * which in turn means we must release and re-acquire the critical 2339 * section. Since the critical section is released, we may be 2340 * preempted or migrate. As such, make sure not to maintain any 2341 * thread-local state specific to the cache from prior to releasing 2342 * the critical section. 2343 */ 2344 critical_exit(); 2345 ZONE_LOCK(zone); 2346 critical_enter(); 2347 cpu = curcpu; 2348 cache = &zone->uz_cpu[cpu]; 2349 if (cache->uc_freebucket != NULL) { 2350 if (cache->uc_freebucket->ub_cnt < 2351 cache->uc_freebucket->ub_entries) { 2352 ZONE_UNLOCK(zone); 2353 goto zfree_start; 2354 } 2355 if (cache->uc_allocbucket != NULL && 2356 (cache->uc_allocbucket->ub_cnt < 2357 cache->uc_freebucket->ub_cnt)) { 2358 ZONE_UNLOCK(zone); 2359 goto zfree_start; 2360 } 2361 } 2362 2363 bucket = cache->uc_freebucket; 2364 cache->uc_freebucket = NULL; 2365 2366 /* Can we throw this on the zone full list? */ 2367 if (bucket != NULL) { 2368#ifdef UMA_DEBUG_ALLOC 2369 printf("uma_zfree: Putting old bucket on the free list.\n"); 2370#endif 2371 /* ub_cnt is pointing to the last free item */ 2372 KASSERT(bucket->ub_cnt != 0, 2373 ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); 2374 LIST_INSERT_HEAD(&zone->uz_full_bucket, 2375 bucket, ub_link); 2376 } 2377 if ((bucket = LIST_FIRST(&zone->uz_free_bucket)) != NULL) { 2378 LIST_REMOVE(bucket, ub_link); 2379 ZONE_UNLOCK(zone); 2380 cache->uc_freebucket = bucket; 2381 goto zfree_start; 2382 } 2383 /* We are no longer associated with this CPU. */ 2384 critical_exit(); 2385 2386 /* And the zone.. */ 2387 ZONE_UNLOCK(zone); 2388 2389#ifdef UMA_DEBUG_ALLOC 2390 printf("uma_zfree: Allocating new free bucket.\n"); 2391#endif 2392 bflags = M_NOWAIT; 2393 2394 if (keg->uk_flags & UMA_ZFLAG_CACHEONLY) 2395 bflags |= M_NOVM; 2396 bucket = bucket_alloc(zone->uz_count, bflags); 2397 if (bucket) { 2398 ZONE_LOCK(zone); 2399 LIST_INSERT_HEAD(&zone->uz_free_bucket, 2400 bucket, ub_link); 2401 ZONE_UNLOCK(zone); 2402 goto zfree_restart; 2403 } 2404 2405 /* 2406 * If nothing else caught this, we'll just do an internal free. 2407 */ 2408zfree_internal: 2409 uma_zfree_internal(zone, item, udata, SKIP_DTOR); 2410 2411 return; 2412} 2413 2414/* 2415 * Frees an item to an INTERNAL zone or allocates a free bucket 2416 * 2417 * Arguments: 2418 * zone The zone to free to 2419 * item The item we're freeing 2420 * udata User supplied data for the dtor 2421 * skip Skip dtors and finis 2422 */ 2423static void 2424uma_zfree_internal(uma_zone_t zone, void *item, void *udata, 2425 enum zfreeskip skip) 2426{ 2427 uma_slab_t slab; 2428 uma_slabrefcnt_t slabref; 2429 uma_keg_t keg; 2430 u_int8_t *mem; 2431 u_int8_t freei; 2432 2433 keg = zone->uz_keg; 2434 2435 if (skip < SKIP_DTOR && zone->uz_dtor) 2436 zone->uz_dtor(item, keg->uk_size, udata); 2437 if (skip < SKIP_FINI && zone->uz_fini) 2438 zone->uz_fini(item, keg->uk_size); 2439 2440 ZONE_LOCK(zone); 2441 2442 if (!(keg->uk_flags & UMA_ZONE_MALLOC)) { 2443 mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK)); 2444 if (keg->uk_flags & UMA_ZONE_HASH) 2445 slab = hash_sfind(&keg->uk_hash, mem); 2446 else { 2447 mem += keg->uk_pgoff; 2448 slab = (uma_slab_t)mem; 2449 } 2450 } else { 2451 slab = (uma_slab_t)udata; 2452 } 2453 2454 /* Do we need to remove from any lists? */ 2455 if (slab->us_freecount+1 == keg->uk_ipers) { 2456 LIST_REMOVE(slab, us_link); 2457 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2458 } else if (slab->us_freecount == 0) { 2459 LIST_REMOVE(slab, us_link); 2460 LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link); 2461 } 2462 2463 /* Slab management stuff */ 2464 freei = ((unsigned long)item - (unsigned long)slab->us_data) 2465 / keg->uk_rsize; 2466 2467#ifdef INVARIANTS 2468 if (!skip) 2469 uma_dbg_free(zone, slab, item); 2470#endif 2471 2472 if (keg->uk_flags & UMA_ZONE_REFCNT) { 2473 slabref = (uma_slabrefcnt_t)slab; 2474 slabref->us_freelist[freei].us_item = slab->us_firstfree; 2475 } else { 2476 slab->us_freelist[freei].us_item = slab->us_firstfree; 2477 } 2478 slab->us_firstfree = freei; 2479 slab->us_freecount++; 2480 2481 /* Zone statistics */ 2482 keg->uk_free++; 2483 zone->uz_frees++; 2484 2485 if (keg->uk_flags & UMA_ZFLAG_FULL) { 2486 if (keg->uk_pages < keg->uk_maxpages) 2487 keg->uk_flags &= ~UMA_ZFLAG_FULL; 2488 2489 /* We can handle one more allocation */ 2490 wakeup_one(keg); 2491 } 2492 2493 ZONE_UNLOCK(zone); 2494} 2495 2496/* See uma.h */ 2497void 2498uma_zone_set_max(uma_zone_t zone, int nitems) 2499{ 2500 uma_keg_t keg; 2501 2502 keg = zone->uz_keg; 2503 ZONE_LOCK(zone); 2504 if (keg->uk_ppera > 1) 2505 keg->uk_maxpages = nitems * keg->uk_ppera; 2506 else 2507 keg->uk_maxpages = nitems / keg->uk_ipers; 2508 2509 if (keg->uk_maxpages * keg->uk_ipers < nitems) 2510 keg->uk_maxpages++; 2511 2512 ZONE_UNLOCK(zone); 2513} 2514 2515/* See uma.h */ 2516void 2517uma_zone_set_init(uma_zone_t zone, uma_init uminit) 2518{ 2519 ZONE_LOCK(zone); 2520 KASSERT(zone->uz_keg->uk_pages == 0, 2521 ("uma_zone_set_init on non-empty keg")); 2522 zone->uz_keg->uk_init = uminit; 2523 ZONE_UNLOCK(zone); 2524} 2525 2526/* See uma.h */ 2527void 2528uma_zone_set_fini(uma_zone_t zone, uma_fini fini) 2529{ 2530 ZONE_LOCK(zone); 2531 KASSERT(zone->uz_keg->uk_pages == 0, 2532 ("uma_zone_set_fini on non-empty keg")); 2533 zone->uz_keg->uk_fini = fini; 2534 ZONE_UNLOCK(zone); 2535} 2536 2537/* See uma.h */ 2538void 2539uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) 2540{ 2541 ZONE_LOCK(zone); 2542 KASSERT(zone->uz_keg->uk_pages == 0, 2543 ("uma_zone_set_zinit on non-empty keg")); 2544 zone->uz_init = zinit; 2545 ZONE_UNLOCK(zone); 2546} 2547 2548/* See uma.h */ 2549void 2550uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) 2551{ 2552 ZONE_LOCK(zone); 2553 KASSERT(zone->uz_keg->uk_pages == 0, 2554 ("uma_zone_set_zfini on non-empty keg")); 2555 zone->uz_fini = zfini; 2556 ZONE_UNLOCK(zone); 2557} 2558 2559/* See uma.h */ 2560/* XXX uk_freef is not actually used with the zone locked */ 2561void 2562uma_zone_set_freef(uma_zone_t zone, uma_free freef) 2563{ 2564 ZONE_LOCK(zone); 2565 zone->uz_keg->uk_freef = freef; 2566 ZONE_UNLOCK(zone); 2567} 2568 2569/* See uma.h */ 2570/* XXX uk_allocf is not actually used with the zone locked */ 2571void 2572uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) 2573{ 2574 ZONE_LOCK(zone); 2575 zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC; 2576 zone->uz_keg->uk_allocf = allocf; 2577 ZONE_UNLOCK(zone); 2578} 2579 2580/* See uma.h */ 2581int 2582uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) 2583{ 2584 uma_keg_t keg; 2585 vm_offset_t kva; 2586 int pages; 2587 2588 keg = zone->uz_keg; 2589 pages = count / keg->uk_ipers; 2590 2591 if (pages * keg->uk_ipers < count) 2592 pages++; 2593 2594 kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); 2595 2596 if (kva == 0) 2597 return (0); 2598 if (obj == NULL) { 2599 obj = vm_object_allocate(OBJT_DEFAULT, 2600 pages); 2601 } else { 2602 VM_OBJECT_LOCK_INIT(obj, "uma object"); 2603 _vm_object_allocate(OBJT_DEFAULT, 2604 pages, obj); 2605 } 2606 ZONE_LOCK(zone); 2607 keg->uk_kva = kva; 2608 keg->uk_obj = obj; 2609 keg->uk_maxpages = pages; 2610 keg->uk_allocf = obj_alloc; 2611 keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; 2612 ZONE_UNLOCK(zone); 2613 return (1); 2614} 2615 2616/* See uma.h */ 2617void 2618uma_prealloc(uma_zone_t zone, int items) 2619{ 2620 int slabs; 2621 uma_slab_t slab; 2622 uma_keg_t keg; 2623 2624 keg = zone->uz_keg; 2625 ZONE_LOCK(zone); 2626 slabs = items / keg->uk_ipers; 2627 if (slabs * keg->uk_ipers < items) 2628 slabs++; 2629 while (slabs > 0) { 2630 slab = slab_zalloc(zone, M_WAITOK); 2631 LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link); 2632 slabs--; 2633 } 2634 ZONE_UNLOCK(zone); 2635} 2636 2637/* See uma.h */ 2638u_int32_t * 2639uma_find_refcnt(uma_zone_t zone, void *item) 2640{ 2641 uma_slabrefcnt_t slabref; 2642 uma_keg_t keg; 2643 u_int32_t *refcnt; 2644 int idx; 2645 2646 keg = zone->uz_keg; 2647 slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & 2648 (~UMA_SLAB_MASK)); 2649 KASSERT(slabref != NULL && slabref->us_keg->uk_flags & UMA_ZONE_REFCNT, 2650 ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT")); 2651 idx = ((unsigned long)item - (unsigned long)slabref->us_data) 2652 / keg->uk_rsize; 2653 refcnt = &slabref->us_freelist[idx].us_refcnt; 2654 return refcnt; 2655} 2656 2657/* See uma.h */ 2658void 2659uma_reclaim(void) 2660{ 2661#ifdef UMA_DEBUG 2662 printf("UMA: vm asked us to release pages!\n"); 2663#endif 2664 bucket_enable(); 2665 zone_foreach(zone_drain); 2666 /* 2667 * Some slabs may have been freed but this zone will be visited early 2668 * we visit again so that we can free pages that are empty once other 2669 * zones are drained. We have to do the same for buckets. 2670 */ 2671 zone_drain(slabzone); 2672 zone_drain(slabrefzone); 2673 bucket_zone_drain(); 2674} 2675 2676void * 2677uma_large_malloc(int size, int wait) 2678{ 2679 void *mem; 2680 uma_slab_t slab; 2681 u_int8_t flags; 2682 2683 slab = uma_zalloc_internal(slabzone, NULL, wait); 2684 if (slab == NULL) 2685 return (NULL); 2686 mem = page_alloc(NULL, size, &flags, wait); 2687 if (mem) { 2688 vsetslab((vm_offset_t)mem, slab); 2689 slab->us_data = mem; 2690 slab->us_flags = flags | UMA_SLAB_MALLOC; 2691 slab->us_size = size; 2692 } else { 2693 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2694 } 2695 2696 return (mem); 2697} 2698 2699void 2700uma_large_free(uma_slab_t slab) 2701{ 2702 vsetobj((vm_offset_t)slab->us_data, kmem_object); 2703 page_free(slab->us_data, slab->us_size, slab->us_flags); 2704 uma_zfree_internal(slabzone, slab, NULL, SKIP_NONE); 2705} 2706 2707void 2708uma_print_stats(void) 2709{ 2710 zone_foreach(uma_print_zone); 2711} 2712 2713static void 2714slab_print(uma_slab_t slab) 2715{ 2716 printf("slab: keg %p, data %p, freecount %d, firstfree %d\n", 2717 slab->us_keg, slab->us_data, slab->us_freecount, 2718 slab->us_firstfree); 2719} 2720 2721static void 2722cache_print(uma_cache_t cache) 2723{ 2724 printf("alloc: %p(%d), free: %p(%d)\n", 2725 cache->uc_allocbucket, 2726 cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, 2727 cache->uc_freebucket, 2728 cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); 2729} 2730 2731void 2732uma_print_zone(uma_zone_t zone) 2733{ 2734 uma_cache_t cache; 2735 uma_keg_t keg; 2736 uma_slab_t slab; 2737 int i; 2738 2739 keg = zone->uz_keg; 2740 printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n", 2741 zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags, 2742 keg->uk_ipers, keg->uk_ppera, 2743 (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free); 2744 printf("Part slabs:\n"); 2745 LIST_FOREACH(slab, &keg->uk_part_slab, us_link) 2746 slab_print(slab); 2747 printf("Free slabs:\n"); 2748 LIST_FOREACH(slab, &keg->uk_free_slab, us_link) 2749 slab_print(slab); 2750 printf("Full slabs:\n"); 2751 LIST_FOREACH(slab, &keg->uk_full_slab, us_link) 2752 slab_print(slab); 2753 for (i = 0; i <= mp_maxid; i++) { 2754 if (CPU_ABSENT(i)) 2755 continue; 2756 cache = &zone->uz_cpu[i]; 2757 printf("CPU %d Cache:\n", i); 2758 cache_print(cache); 2759 } 2760} 2761 2762/*
|