Lines Matching refs:zone

62  *	Zone-based memory allocator.  A zone is a collection of fixed size
105 * We perform three methods to detect use of a zone element after it's been freed. These
106 * checks are enabled for every N'th element (counted per-zone) by specifying
130 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later in this
153 * Frees the specified element, which is within the specified zone. If this
160 free_to_zone(zone_t zone, void *elem) {
166 zone->free_check_count++ % free_check_sample_factor == 0 &&
167 zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
168 zone->free_check_count = 1;
170 for (i++; i < zone->elem_size / sizeof(uint32_t); i++) {
173 ((vm_offset_t *) elem)[((zone->elem_size)/sizeof(vm_offset_t))-1] = zone->free_elements;
178 /* maintain free list and decrement number of active objects in zone */
179 ((vm_offset_t *) elem)[0] = zone->free_elements;
180 zone->free_elements = (vm_offset_t) elem;
181 zone->count--;
185 * Allocates an element from the specifed zone, storing its address in the
186 * return arg. This function will look for corruptions revealed through zone
190 alloc_from_zone(zone_t zone, void **ret) {
191 void *elem = (void *) zone->free_elements;
198 zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
202 ((vm_offset_t *) elem)[(zone->elem_size/sizeof(vm_offset_t))-1]) {
203 panic("a freed zone element has been modified in zone: %s (0x%08x)",
204 zone->zone_name, ((uint32_t *) elem)[i]);
209 i < zone->elem_size / sizeof(uint32_t) -
213 panic("a freed zone element has been modified in zone: %s, element is %08x but expected %08x (element: %p)",
214 zone->zone_name, ((uint32_t *) elem)[i], ZP_POISON, elem);
218 panic("a freed zone element has been modified in zone: %s, element is %08x but expected %08x (element: %p)",
219 zone->zone_name, ((uint32_t *) elem)[i], ZP_NOT_POISONED, elem);
222 zone->count++;
223 zone->sum_count++;
224 zone->free_elements = ((vm_offset_t *) elem)[0];
269 * Support for garbage collection of unused zone pages
271 * The kernel virtually allocates the "zone map" submap of the kernel
272 * map. When an individual zone needs more storage, memory is allocated
273 * out of the zone map, and the two-level "zone_page_table" is
276 * to the number of zone elements that occupy the zone page (which may
277 * be a minimum of 1, including if a zone element spans multiple
280 * Asynchronously, the zone_gc() logic attempts to walk zone free
281 * lists to see if all the elements on a zone page are free. If
283 * "alloc_count", the zone page is a candidate for collection and the
285 * first word of the zone page is re-used to maintain a linked list of
286 * to-be-collected zone pages.
334 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
336 zone_t zinfo_zone = ZONE_NULL; /* zone of per-task zone info */
341 * all the way through the pmap zone.
347 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
348 #define zone_sleep(zone) \
349 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
352 #define lock_zone_init(zone) \
355 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
356 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
357 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
358 lck_attr_setdefault(&(zone)->lock_attr); \
359 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
360 &(zone)->lock_grp, &(zone)->lock_attr); \
363 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
420 * When enabled, this code keeps a log to track allocations to a particular zone that have not
421 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
425 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
426 * is the name of the zone you wish to log.
428 * This code only tracks one zone, so you need to identify which one is leaking first.
429 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
430 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
431 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
432 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
439 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
440 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
441 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
443 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
449 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
451 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
470 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
492 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
499 static zone_t zone_of_interest = NULL; /* the zone being watched; corresponds to zone_name_to_log */
502 * Decide if we want to log this zone by doing a string compare between a zone name and the name
503 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
505 * match a space in the zone name.
523 * in the zone name and a corresponding period in the log name.
543 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
556 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
557 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
562 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
566 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
576 vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
577 vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
633 * Initializes the zone leak monitor. Called from zone_init()
648 printf("zone leak detection enabled\n");
651 printf("zone leak detection disabled\n");
654 /* -zleakoff (flag to disable zone leak monitor) */
657 printf("zone leak detection disabled\n");
660 printf("zone leak detection enabled\n");
664 /* zfactor=XXXX (override how often to sample the zone allocator) */
932 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1078 * zinit initializes a new zone. The zone data structures themselves
1079 * are stored in a zone, which is initially a static structure that
1087 const char *name) /* a name for the zone */
1093 z = (struct zone *)zdata;
1192 * Add the zone to the all-zones list.
1193 * If we are tracking zone info per task, and we have
1195 * using the overflow zone slot.
1211 * Check if we should be logging this zone. If so, remember the zone pointer.
1218 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1222 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1230 * At this point, everything is set up and we're ready to start logging this zone.
1234 printf("zone: logging started for zone %s (%p)\n", zone_of_interest->zone_name, zone_of_interest);
1237 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1251 * zone, such as the reserved VM map entry zone.
1328 * Cram the given memory into the specified zone.
1332 zone_t zone,
1340 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
1341 assert(!zone->collectable || zone->allows_foreign
1344 elem_size = zone->elem_size;
1352 lock_zone(zone);
1354 free_to_zone(zone, (void *) newmem);
1357 zone->count++; /* compensate for free_to_zone */
1360 zone->cur_size += elem_size;
1362 unlock_zone(zone);
1367 * Steal memory for the zone package. Called from
1376 /* Request enough early memory to get to the pmap zone */
1377 zdata_size = 12 * sizeof(struct zone);
1383 * Fill a zone with enough memory to contain at least nelem elements.
1385 * Return the number of elements actually put into the zone, which may
1391 zone_t zone,
1402 size = nelem * zone->elem_size;
1408 zone_change(zone, Z_FOREIGN, TRUE);
1409 zcram(zone, memory, size);
1410 nalloc = (int)(size / zone->elem_size);
1417 * Initialize the "zone of zones" which uses fixed memory allocated
1442 /* Set up zone poisoning */
1446 /* support for old zone poisoning boot-args */
1454 /* zp-factor=XXXX (override how often to poison freed zone elements) */
1460 * Check for and set up zone leak detection if requested via boot-args. We recognized two
1466 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
1494 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
1495 sizeof(struct zone), "zones");
1502 /* initialize fake zones and zone info if tracking by task */
1583 * Initialize the zone leak monitor
1657 * zalloc returns an element from the specified zone.
1661 register zone_t zone,
1666 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
1677 assert(zone != ZONE_NULL);
1680 addr = gzalloc_alloc(zone, canblock);
1684 lock_zone(zone);
1687 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1690 if (DO_LOGGING(zone))
1696 * allocations in this zone.
1698 if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
1699 zone->zleak_capture = 1;
1701 /* Avoid backtracing twice if zone logging is on */
1710 alloc_from_zone(zone, (void **) &addr);
1712 if (zone->async_prio_refill &&
1713 ((zone->cur_size - (zone->count * zone->elem_size)) <
1714 (zone->prio_refill_watermark * zone->elem_size))) {
1723 if (zone->doing_alloc) {
1725 * Someone is allocating memory for this zone.
1728 zone->waiting = TRUE;
1729 zone_sleep(zone);
1730 } else if (zone->doing_gc) {
1737 zone->waiting = TRUE;
1738 zone_sleep(zone);
1744 if ((zone->cur_size + zone->elem_size) >
1745 zone->max_size) {
1746 if (zone->exhaustible)
1748 if (zone->expandable) {
1759 zone->max_size += (zone->max_size >> 1);
1761 unlock_zone(zone);
1768 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
1771 zone->doing_alloc = TRUE;
1772 unlock_zone(zone);
1779 round_page(zone->elem_size);
1781 alloc_size = zone->alloc_size;
1783 if (zone->noencrypt)
1800 printf("Failed to activate live zone leak debugging (%d).\n", kr);
1805 if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
1806 if (zone->cur_size > zleak_per_zone_tracking_threshold) {
1807 zone->zleak_on = TRUE;
1812 zcram(zone, space, alloc_size);
1833 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
1839 lock_zone(zone);
1840 zone->doing_alloc = FALSE;
1841 if (zone->waiting) {
1842 zone->waiting = FALSE;
1843 zone_wakeup(zone);
1845 alloc_from_zone(zone, (void **) &addr);
1848 unlock_zone(zone);
1851 lock_zone(zone);
1855 alloc_from_zone(zone, (void **) &addr);
1863 /* Sampling can fail if another sample is happening at the same time in a different zone. */
1864 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
1866 zone->zleak_capture = zleak_sample_factor;
1873 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1874 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1878 if (DO_LOGGING(zone) && addr) {
1882 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1886 * the allocations and frees done in the zone so that the history of operations for a specific zone
1942 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
1943 zone->async_pending = TRUE;
1944 unlock_zone(zone);
1945 thread_call_enter(&zone->call_async_alloc);
1946 lock_zone(zone);
1947 alloc_from_zone(zone, (void **) &addr);
1951 if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
1952 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
1959 zone->num_allocs++;
1963 unlock_zone(zone);
1966 thread_wakeup(&zone->zone_replenish_thread);
1968 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
1974 vm_size_t sz = zone->elem_size;
1976 if (zone->caller_acct)
1982 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
1990 register zone_t zone)
1992 return( zalloc_canblock(zone, TRUE) );
1997 register zone_t zone)
1999 return( zalloc_canblock(zone, FALSE) );
2017 * zget returns an element from the specified zone
2029 register zone_t zone)
2034 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
2038 assert( zone != ZONE_NULL );
2040 if (!lock_try_zone(zone))
2047 if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
2048 zone->zleak_capture = 1;
2053 alloc_from_zone(zone, (void **) &addr);
2055 if (addr && zone_debug_enabled(zone)) {
2056 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2065 if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
2066 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2067 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2069 zone->zleak_capture = zleak_sample_factor;
2074 zone->num_allocs++;
2078 unlock_zone(zone);
2092 register zone_t zone,
2096 void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
2100 assert(zone != ZONE_NULL);
2103 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2106 if (DO_LOGGING(zone))
2111 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
2114 if (zone == zone_zone)
2119 gzfreed = gzalloc_free(zone, addr);
2122 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
2124 if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
2125 !from_zone_map(elem, zone->elem_size))) {
2127 panic("zfree: non-allocated memory in collectable zone!");
2129 zone_last_bogus_zone = zone;
2134 lock_zone(zone);
2137 * See if we're doing logging on this zone. There are two styles of logging used depending on
2141 if (DO_LOGGING(zone)) {
2191 if (!gzfreed && zone_debug_enabled(zone)) {
2196 /* check the zone's consistency */
2198 for (tmp_elem = queue_first(&zone->active_zones);
2199 !queue_end(tmp_elem, &zone->active_zones);
2204 panic("zfree()ing element from wrong zone");
2212 /* check the zone's consistency */
2214 for (this = zone->free_elements;
2222 free_to_zone(zone, (void *) elem);
2225 if (zone->count < 0)
2231 zone->num_frees++;
2236 if (zone->zleak_on) {
2237 zleak_free(elem, zone->elem_size);
2243 * request to run the garbage collection in the zone the next
2246 if (zone->elem_size >= PAGE_SIZE &&
2250 unlock_zone(zone);
2256 vm_size_t sz = zone->elem_size;
2258 if (zone->caller_acct)
2264 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
2269 /* Change a zone's flags.
2274 zone_t zone,
2278 assert( zone != ZONE_NULL );
2283 zone->noencrypt = value;
2286 zone->exhaustible = value;
2289 zone->collectable = value;
2292 zone->expandable = value;
2295 zone->allows_foreign = value;
2298 zone->caller_acct = value;
2301 zone->no_callout = value;
2304 zone->gzalloc_exempt = value;
2306 gzalloc_reconfigure(zone);
2310 zone->alignment_required = value;
2312 zone_debug_disable(zone);
2315 gzalloc_reconfigure(zone);
2325 * Return the expected number of free elements in the zone.
2328 * into a zone is by zcram.
2332 zone_free_count(zone_t zone)
2336 lock_zone(zone);
2337 free_count = (integer_t)(zone->cur_size/zone->elem_size - zone->count);
2338 unlock_zone(zone);
2545 /* This is used for walking through a zone's free element list.
2552 * Add a linked list of pages starting at base back into the zone
2555 #define ADD_LIST_TO_ZONE(zone, base, tail) \
2557 (tail)->next = (void *)((zone)->free_elements); \
2558 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2559 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2560 (zone)->free_elements; \
2562 (zone)->free_elements = (unsigned long)(base); \
2568 #define ADD_ELEMENT(zone, prev, elem) \
2571 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2572 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2612 * while holding a zone lock since it's taken
2645 * Do a quick feasibility check before we scan the zone:
2649 * the zone has more than 10 percent of it's elements free
2662 * Snatch all of the free elements away from the zone.
2675 * are returned to the zone.
2810 * and update the zone size info.
2836 * the zone size info.
2954 * By default, don't attempt zone GC more frequently
3025 struct zone zcopy;
3202 struct zone zcopy;
3286 * host_zone_info - LEGACY user interface for Mach zone information
3360 struct zone zcopy;