Lines Matching refs:zone

62  *	Zone-based memory allocator.  A zone is a collection of fixed size
109 * into the zone, the page is translated to use the alias address of the page
122 * zinit favors using PAGE_SIZE backing allocations for a zone unless it would
154 * We use three techniques to detect modification of a zone element
168 * (1) and (2) occur for every allocation and free to a zone.
172 * Poisoning (3) occurs periodically for every N frees (counted per-zone)
184 * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c.
189 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later
194 * To debug leaks of zone memory, use the zone leak detection tool 'zleaks'
210 * A zp_factor of 0 indicates zone poisoning is disabled,
228 * initialize zone poisoning
282 /* zp-factor=XXXX: override how often to poison freed zone elements */
298 * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
310 * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
329 * of pages being used by the zone currently. The
330 * z->page_count is protected by the zone lock.
347 /* Helpful for walking through a zone's free element list. */
357 zone_t zone;
382 is_sane_zone_ptr(zone_t zone,
394 /* Must be from zone map if the zone only uses memory from the zone_map */
396 * TODO: Remove the zone->collectable check when every
397 * zone using foreign memory is properly tagged with allows_foreign
399 if (zone->collectable && !zone->allows_foreign) {
403 * the alias address of a valid zone element.
417 /* check if addr is from zone map */
429 is_sane_zone_page_metadata(zone_t zone,
435 return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata));
439 is_sane_zone_element(zone_t zone,
445 return is_sane_zone_ptr(zone, addr, zone->elem_size);
450 zone_element_was_modified_panic(zone_t zone,
455 panic("a freed zone element has been modified: expected %p but found %p, bits changed %p, at offset %d of %d in zone: %s",
460 (uint32_t) zone->elem_size,
461 zone->zone_name);
471 backup_ptr_mismatch_panic(zone_t zone,
478 boolean_t sane_primary = is_sane_zone_element(zone, primary);
483 sane_backup = is_sane_zone_element(zone, likely_backup);
486 sane_backup = is_sane_zone_element(zone, likely_backup);
491 zone_element_was_modified_panic(zone, primary, likely_backup, 0);
495 zone_element_was_modified_panic(zone, backup, primary,
496 zone->elem_size - sizeof(vm_offset_t));
505 zone_element_was_modified_panic(zone, primary, likely_backup, 0);
508 zone_element_was_modified_panic(zone, primary, likely_backup, 0);
518 append_zone_element(zone_t zone,
522 vm_offset_t *backup = get_backup_ptr(zone->elem_size, (vm_offset_t *) tail);
534 backup_ptr_mismatch_panic(zone,
544 * the zone free list. Every element in the list being added has already gone
550 add_list_to_zone(zone_t zone,
555 assert(!zone->use_page_list);
557 append_zone_element(zone, tail, zone->free_elements);
559 zone->free_elements = head;
564 * Adds the element to the head of the zone's free list
569 free_to_zone(zone_t zone,
576 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
578 if (zone->use_page_list) {
580 assert(page_meta->zone == zone);
583 old_head = (vm_offset_t)zone->free_elements;
587 if (__improbable(!is_sane_zone_element(zone, old_head)))
588 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
589 (void *) old_head, zone->zone_name);
592 if (__improbable(!is_sane_zone_element(zone, element)))
593 panic("zfree: freeing invalid pointer %p to zone %s\n",
594 (void *) element, zone->zone_name);
599 if (zone->elem_size <= zp_tiny_zone_limit)
601 else if (zp_factor != 0 && ++zone->zp_count >= zp_factor) {
602 /* Poison zone elements periodically */
603 zone->zp_count = 0;
626 if (zone->use_page_list) {
629 if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
633 enqueue_tail(&zone->pages.any_free_foreign, (queue_entry_t)page_meta);
640 enqueue_tail(&zone->pages.all_free, (queue_entry_t)page_meta);
644 enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
647 zone->free_elements = (struct zone_free_element *)element;
649 zone->count--;
650 zone->countfree++;
655 * Removes an element from the zone's free list, returning 0 if the free list is empty.
660 try_alloc_from_zone(zone_t zone)
665 /* if zone is empty, bail */
666 if (zone->use_page_list) {
667 if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign))
668 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
669 else if (!queue_empty(&zone->pages.intermediate))
670 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
671 else if (!queue_empty(&zone->pages.all_free))
672 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
678 if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta)))
679 panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
680 (void *) page_meta, zone->zone_name);
681 assert(page_meta->zone == zone);
684 if (zone->free_elements == NULL)
687 element = (vm_offset_t)zone->free_elements;
691 if (__improbable(!is_sane_zone_element(zone, element)))
692 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
693 (void *) element, zone->zone_name);
697 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
706 if (__improbable(!is_sane_zone_element(zone, next_element)))
707 backup_ptr_mismatch_panic(zone, next_element, next_element_backup);
715 backup_ptr_mismatch_panic(zone, next_element, next_element_backup);
725 zone_element_was_modified_panic(zone,
731 if (zone->use_page_list) {
735 panic("zalloc: metadata located at incorrect location on page of zone %s\n",
736 zone->zone_name);
741 panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
742 (void *)next_element, (void *)element, zone->zone_name);
754 if (zone->use_page_list) {
759 if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
763 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
770 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
774 enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
777 zone->free_elements = (struct zone_free_element *)next_element;
779 zone->countfree--;
780 zone->count++;
781 zone->sum_count++;
788 * End of zone poisoning
830 * Support for garbage collection of unused zone pages
832 * The kernel virtually allocates the "zone map" submap of the kernel
833 * map. When an individual zone needs more storage, memory is allocated
834 * out of the zone map, and the two-level "zone_page_table" is
837 * to the number of zone elements that occupy the zone page (which may
838 * be a minimum of 1, including if a zone element spans multiple
841 * Asynchronously, the zone_gc() logic attempts to walk zone free
842 * lists to see if all the elements on a zone page are free. If
844 * "alloc_count", the zone page is a candidate for collection and the
846 * first word of the zone page is re-used to maintain a linked list of
847 * to-be-collected zone pages.
893 * This mechanism allows for bootstrapping an empty zone which is setup with
896 * This will prime the zone for the next use.
899 * looking for any zone with async_pending set and do the work for it.
902 * then zalloc_noblock to an empty zone may succeed.
912 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
914 zone_t zinfo_zone = ZONE_NULL; /* zone of per-task zone info */
919 * all the way through the pmap zone.
925 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
926 #define zone_sleep(zone) \
927 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
938 #define lock_zone_init(zone) \
940 lck_attr_setdefault(&(zone)->lock_attr); \
941 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
942 &zone_locks_grp, &(zone)->lock_attr); \
945 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
994 * When enabled, this code keeps a log to track allocations to a particular zone that have not
995 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
999 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
1000 * is the name of the zone you wish to log.
1002 * This code only tracks one zone, so you need to identify which one is leaking first.
1003 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
1004 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
1005 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
1006 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
1013 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
1014 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
1015 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
1017 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
1023 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1025 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
1027 /* Log allocations and frees to help debug a zone element corruption */
1047 * Each record in the log contains a pointer to the zone element it refers to,
1063 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
1066 static zone_t zone_of_interest = NULL; /* the zone being watched; corresponds to zone_name_to_log */
1069 * Decide if we want to log this zone by doing a string compare between a zone name and the name
1070 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
1072 * match a space in the zone name.
1090 * in the zone name and a corresponding period in the log name.
1110 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
1123 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
1124 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
1129 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
1133 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
1143 vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
1144 vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
1200 * Initializes the zone leak monitor. Called from zone_init()
1212 /* -zleakoff (flag to disable zone leak monitor) */
1215 printf("zone leak detection disabled\n");
1218 printf("zone leak detection enabled\n");
1221 /* zfactor=XXXX (override how often to sample the zone allocator) */
1489 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1637 * zinit initializes a new zone. The zone data structures themselves
1638 * are stored in a zone, which is initially a static structure that
1646 const char *name) /* a name for the zone */
1653 z = (struct zone *)zdata;
1778 * Add the zone to the all-zones list.
1779 * If we are tracking zone info per task, and we have
1781 * using the overflow zone slot.
1796 * Check if we should be logging this zone. If so, remember the zone pointer.
1803 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1807 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1813 printf("zone: logging started for zone %s\n", zone_of_interest->zone_name);
1815 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1829 * zone, such as the reserved VM map entry zone.
1912 * Cram the given memory into the specified zone.
1916 zone_t zone,
1924 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
1925 assert(!zone->collectable || zone->allows_foreign
1928 elem_size = zone->elem_size;
1934 kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name,
1937 if (from_zm && !zone->use_page_list)
1940 lock_zone(zone);
1942 if (zone->use_page_list) {
1955 page_metadata->zone = zone;
1959 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_metadata);
1963 zone->count++; /* compensate for free_to_zone */
1964 if ((newmem + pos_in_page) == (vm_offset_t)zone) {
1966 * special case for the "zone_zone" zone, which is using the first
1971 free_to_zone(zone, newmem + pos_in_page);
1973 zone->cur_size += elem_size;
1978 zone->count++; /* compensate for free_to_zone */
1979 if (newmem == (vm_offset_t)zone) {
1980 /* Don't free zone_zone zone */
1982 free_to_zone(zone, newmem);
1988 zone->cur_size += elem_size;
1991 unlock_zone(zone);
1996 * Steal memory for the zone package. Called from
2005 /* Request enough early memory to get to the pmap zone */
2006 zdata_size = 12 * sizeof(struct zone);
2013 * Fill a zone with enough memory to contain at least nelem elements.
2015 * Return the number of elements actually put into the zone, which may
2021 zone_t zone,
2032 size = nelem * zone->elem_size;
2038 zone_change(zone, Z_FOREIGN, TRUE);
2039 ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE));
2040 zcram(zone, memory, size);
2041 nalloc = (int)(size / zone->elem_size);
2048 * Initialize the "zone of zones" which uses fixed memory allocated
2064 /* Set up zone element poisoning */
2067 /* should zlog log to debug zone corruption instead of leaks? */
2073 * Check for and set up zone leak detection if requested via boot-args. We recognized two
2079 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
2113 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
2114 sizeof(struct zone), "zones");
2121 /* initialize fake zones and zone info if tracking by task */
2205 * Initialize the zone leak monitor
2281 * zalloc returns an element from the specified zone.
2285 zone_t zone,
2290 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
2302 assert(zone != ZONE_NULL);
2305 addr = gzalloc_alloc(zone, canblock);
2310 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2312 if (__improbable(DO_LOGGING(zone)))
2315 lock_zone(zone);
2321 * allocations in this zone.
2323 if (zone->zleak_on && (++zone->zleak_capture >= zleak_sample_factor)) {
2324 zone->zleak_capture = 0;
2326 /* Avoid backtracing twice if zone logging is on */
2334 if (zone->async_prio_refill && zone->zone_replenish_thread) {
2336 vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size));
2337 vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size;
2343 unlock_zone(zone);
2347 thread_wakeup(&zone->zone_replenish_thread);
2357 assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC);
2360 lock_zone(zone);
2366 addr = try_alloc_from_zone(zone);
2373 if (zone->doing_alloc) {
2375 * Someone is allocating memory for this zone.
2378 zone->waiting = TRUE;
2379 zone_sleep(zone);
2380 } else if (zone->doing_gc) {
2387 zone->waiting = TRUE;
2388 zone_sleep(zone);
2394 if ((zone->cur_size + zone->elem_size) >
2395 zone->max_size) {
2396 if (zone->exhaustible)
2398 if (zone->expandable) {
2409 zone->max_size += (zone->max_size >> 1);
2411 unlock_zone(zone);
2418 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
2421 zone->doing_alloc = TRUE;
2422 unlock_zone(zone);
2429 round_page(zone->elem_size);
2431 alloc_size = zone->alloc_size;
2433 if (zone->noencrypt)
2450 printf("Failed to activate live zone leak debugging (%d).\n", kr);
2455 if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
2456 if (zone->cur_size > zleak_per_zone_tracking_threshold) {
2457 zone->zleak_on = TRUE;
2461 ZONE_PAGE_COUNT_INCR(zone, (alloc_size / PAGE_SIZE));
2462 zcram(zone, space, alloc_size);
2482 panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)",
2483 zone->zone_name, zone_largest->zone_name,
2487 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
2493 lock_zone(zone);
2494 zone->doing_alloc = FALSE;
2495 if (zone->waiting) {
2496 zone->waiting = FALSE;
2497 zone_wakeup(zone);
2499 addr = try_alloc_from_zone(zone);
2502 unlock_zone(zone);
2505 lock_zone(zone);
2509 addr = try_alloc_from_zone(zone);
2517 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2518 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2520 zone->zleak_capture = zleak_sample_factor;
2526 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
2527 zone->async_pending = TRUE;
2528 unlock_zone(zone);
2530 lock_zone(zone);
2531 addr = try_alloc_from_zone(zone);
2535 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
2536 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
2540 if (__improbable(DO_LOGGING(zone) && addr)) {
2545 if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
2546 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2551 unlock_zone(zone);
2553 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
2558 vm_size_t sz = zone->elem_size;
2560 if (zone->caller_acct)
2566 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
2574 register zone_t zone)
2576 return( zalloc_canblock(zone, TRUE) );
2581 register zone_t zone)
2583 return( zalloc_canblock(zone, FALSE) );
2624 * zget returns an element from the specified zone
2636 register zone_t zone)
2641 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
2645 assert( zone != ZONE_NULL );
2647 if (!lock_try_zone(zone))
2654 if (zone->zleak_on && (++zone->zleak_capture >= zleak_sample_factor)) {
2655 zone->zleak_capture = 0;
2660 addr = try_alloc_from_zone(zone);
2662 if (addr && zone_debug_enabled(zone)) {
2663 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2672 if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
2673 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2674 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2676 zone->zleak_capture = zleak_sample_factor;
2681 unlock_zone(zone);
2690 static void zone_check_freelist(zone_t zone, vm_offset_t elem)
2695 if (zone->use_page_list) {
2696 if (zone->allows_foreign) {
2697 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
2698 !queue_end(&zone->pages.any_free_foreign, (queue_entry_t)thispage);
2703 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2708 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
2709 !queue_end(&zone->pages.all_free, (queue_entry_t)thispage);
2714 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2718 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
2719 !queue_end(&zone->pages.intermediate, (queue_entry_t)thispage);
2724 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2729 for (this = zone->free_elements;
2732 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2743 register zone_t zone,
2747 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
2751 assert(zone != ZONE_NULL);
2754 if (zone->use_page_list) {
2756 if (zone != page_meta->zone) {
2759 * a different zone (or maybe it's from a zone that doesn't use page free lists at all). We can repair
2761 * 1) The specified zone had use_page_list, and the true zone also has use_page_list set. In that case
2763 * 2) The specified zone had use_page_list, but the true zone does not. In this case page_meta is garbage,
2764 * and dereferencing page_meta->zone might panic.
2765 * To distinguish the two, we enumerate the zone list to match it up.
2766 * We do not handle the case where an incorrect zone is passed that does not have use_page_list set,
2767 * even if the true zone did have this set.
2778 if (fixed_zone == page_meta->zone && fixed_zone->use_page_list) {
2780 printf("Fixing incorrect zfree from zone %s to zone %s\n", zone->zone_name, fixed_zone->zone_name);
2781 zone = fixed_zone;
2790 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2793 if (__improbable(DO_LOGGING(zone) && corruption_debug_flag))
2798 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
2801 if (zone == zone_zone)
2806 gzfreed = gzalloc_free(zone, addr);
2809 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
2811 if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
2812 !from_zone_map(elem, zone->elem_size))) {
2814 panic("zfree: non-allocated memory in collectable zone!");
2816 zone_last_bogus_zone = zone;
2821 lock_zone(zone);
2824 * See if we're doing logging on this zone. There are two styles of logging used depending on
2828 if (__improbable(DO_LOGGING(zone))) {
2847 if (!gzfreed && zone_debug_enabled(zone)) {
2852 /* check the zone's consistency */
2854 for (tmp_elem = queue_first(&zone->active_zones);
2855 !queue_end(tmp_elem, &zone->active_zones);
2860 panic("zfree()ing element from wrong zone");
2866 zone_check_freelist(zone, elem);
2870 free_to_zone(zone, elem);
2873 if (zone->count < 0)
2874 panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone",
2875 zone->zone_name, addr);
2883 if (zone->zleak_on) {
2884 zleak_free(elem, zone->elem_size);
2890 * request to run the garbage collection in the zone the next
2893 if (zone->elem_size >= PAGE_SIZE &&
2897 unlock_zone(zone);
2903 vm_size_t sz = zone->elem_size;
2905 if (zone->caller_acct)
2911 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
2916 /* Change a zone's flags.
2921 zone_t zone,
2925 assert( zone != ZONE_NULL );
2930 zone->noencrypt = value;
2933 zone->exhaustible = value;
2936 zone->collectable = value;
2939 zone->expandable = value;
2942 zone->allows_foreign = value;
2945 zone->caller_acct = value;
2948 zone->no_callout = value;
2951 zone->gzalloc_exempt = value;
2953 gzalloc_reconfigure(zone);
2957 zone->alignment_required = value;
2959 zone_debug_disable(zone);
2962 gzalloc_reconfigure(zone);
2972 * Return the expected number of free elements in the zone.
2975 * into a zone is by zcram.
2979 zone_free_count(zone_t zone)
2983 lock_zone(zone);
2984 free_count = zone->countfree;
2985 unlock_zone(zone);
3244 * while holding a zone lock since it's taken
3279 * Do a quick feasibility check before we scan the zone:
3283 * the zone has more than 10 percent of it's elements free
3297 * Snatch all of the free elements away from the zone.
3368 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages);
3371 continue; /* go to next zone */
3379 * are returned to the zone.
3415 * back to the freelist so that others waiting on the zone don't get stuck
3523 * and update the zone size info.
3550 * the zone size info.
3643 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages);
3696 * By default, don't attempt zone GC more frequently
3767 struct zone zcopy;
3944 struct zone zcopy;
4028 * host_zone_info - LEGACY user interface for Mach zone information
4102 struct zone zcopy;