• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/kern/

Lines Matching refs:zone

62  *	Zone-based memory allocator.  A zone is a collection of fixed size
100 /* for fake zone stat routines */
109 * We provide three methods to detect use of a zone element after it's been freed. These
114 * each other when re-using the zone element, to detect modifications.
130 #define ADD_TO_ZONE(zone, element) \
135 i < zone->elem_size/sizeof(uint32_t); \
139 *((vm_offset_t *)(element)) = (zone)->free_elements; \
141 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
142 ((vm_offset_t *)(element))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
143 (zone)->free_elements; \
145 (zone)->free_elements = (vm_offset_t) (element); \
146 (zone)->count--; \
149 #define REMOVE_FROM_ZONE(zone, ret, type) \
151 (ret) = (type) (zone)->free_elements; \
155 ((zone)->elem_size >= (2 * sizeof(vm_offset_t)) && \
156 ((vm_offset_t *)(ret))[((zone)->elem_size/sizeof(vm_offset_t))-1] != \
158 panic("a freed zone element has been modified");\
162 ii < zone->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
165 panic("a freed zone element has been modified");\
168 (zone)->count++; \
169 (zone)->free_elements = *((vm_offset_t *)(ret)); \
180 * Support for garbage collection of unused zone pages:
229 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
239 #define lock_zone(zone) \
241 lck_mtx_lock(&(zone)->lock); \
244 #define unlock_zone(zone) \
246 lck_mtx_unlock(&(zone)->lock); \
249 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
250 #define zone_sleep(zone) \
251 (void) lck_mtx_sleep(&(zone)->lock, 0, (event_t)(zone), THREAD_UNINT);
254 #define lock_zone_init(zone) \
257 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
258 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
259 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
260 lck_attr_setdefault(&(zone)->lock_attr); \
261 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
262 &(zone)->lock_grp, &(zone)->lock_attr); \
265 #define lock_try_zone(zone) lck_mtx_try_lock(&zone->lock)
321 * When enabled, this code keeps a log to track allocations to a particular zone that have not
322 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
326 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
327 * is the name of the zone you wish to log.
329 * This code only tracks one zone, so you need to identify which one is leaking first.
330 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
331 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
332 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
333 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
340 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
341 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
342 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
344 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
350 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
352 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
362 * MAX_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interrest. 15
372 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
394 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
401 static zone_t zone_of_interest = NULL; /* the zone being watched; corresponds to zone_name_to_log */
404 * Decide if we want to log this zone by doing a string compare between a zone name and the name
405 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
407 * match a space in the zone name.
425 * in the zone name and a corresponding period in the log name.
445 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
455 * zinit initializes a new zone. The zone data structures themselves
456 * are stored in a zone, which is initially a static structure that
464 const char *name) /* a name for the zone */
469 if (zget_space(sizeof(struct zone), (vm_offset_t *)&z)
548 * Add the zone to the all-zones list.
560 * Check if we should be logging this zone. If so, remember the zone pointer.
568 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
572 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
581 * At this point, everything is set up and we're ready to start logging this zone.
585 printf("zone: logging started for zone %s (%p)\n", zone_of_interest->zone_name, zone_of_interest);
588 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
597 * Cram the given memory into the specified zone.
601 register zone_t zone,
609 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
610 assert(!zone->collectable || zone->allows_foreign
613 elem_size = zone->elem_size;
615 lock_zone(zone);
617 ADD_TO_ZONE(zone, newmem);
620 zone->count++; /* compensate for ADD_TO_ZONE */
623 zone->cur_size += elem_size;
625 unlock_zone(zone);
714 * Steal memory for the zone package. Called from
720 zdata_size = round_page(128*sizeof(struct zone));
726 * Fill a zone with enough memory to contain at least nelem elements.
728 * Return the number of elements actually put into the zone, which may
734 zone_t zone,
745 size = nelem * zone->elem_size;
751 zone_change(zone, Z_FOREIGN, TRUE);
752 zcram(zone, (void *)memory, size);
753 nalloc = size / zone->elem_size;
760 * Initialize the "zone of zones" which uses fixed memory allocated
771 /* see if we want freed zone element checking and/or poisoning */
781 * Check for and set up zone leak detection if requested via boot-args. We recognized two
787 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
820 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
821 sizeof(struct zone), "zones");
861 * zalloc returns an element from the specified zone.
865 register zone_t zone,
870 void *bt[MAX_DEPTH]; /* only used if zone logging is enabled */
874 assert(zone != ZONE_NULL);
877 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
880 if (DO_LOGGING(zone))
883 lock_zone(zone);
885 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
887 while ((addr == 0) && canblock && (zone->doing_gc)) {
888 zone->waiting = TRUE;
889 zone_sleep(zone);
890 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
897 if (zone->doing_alloc) {
899 * Someone is allocating memory for this zone.
902 zone->waiting = TRUE;
903 zone_sleep(zone);
906 if ((zone->cur_size + zone->elem_size) >
907 zone->max_size) {
908 if (zone->exhaustible)
910 if (zone->expandable) {
921 zone->max_size += (zone->max_size >> 1);
923 unlock_zone(zone);
925 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
928 zone->doing_alloc = TRUE;
929 unlock_zone(zone);
931 if (zone->collectable) {
940 round_page(zone->elem_size);
942 alloc_size = zone->alloc_size;
954 zcram(zone, (void *)space, alloc_size);
966 panic("zalloc: \"%s\" (%d elements) retry fail %d", zone->zone_name, zone->count, retval);
972 lock_zone(zone);
973 zone->doing_alloc = FALSE;
974 if (zone->waiting) {
975 zone->waiting = FALSE;
976 zone_wakeup(zone);
978 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
981 unlock_zone(zone);
984 lock_zone(zone);
988 retval = zget_space(zone->elem_size, &space);
990 lock_zone(zone);
991 zone->doing_alloc = FALSE;
992 if (zone->waiting) {
993 zone->waiting = FALSE;
994 thread_wakeup((event_t)zone);
997 zone->count++;
998 zone->cur_size += zone->elem_size;
1000 if (zone_debug_enabled(zone)) {
1001 enqueue_tail(&zone->active_zones, (queue_entry_t)space);
1004 unlock_zone(zone);
1005 zone_page_alloc(space, zone->elem_size);
1007 if (zone_debug_enabled(zone))
1014 unlock_zone(zone);
1017 lock_zone(zone);
1019 panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval);
1024 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
1028 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1029 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1033 if (DO_LOGGING(zone) && addr) {
1037 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1041 * the allocations and frees done in the zone so that the history of operations for a specific zone
1097 if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
1098 zone->async_pending = TRUE;
1099 unlock_zone(zone);
1100 thread_call_enter(&zone->call_async_alloc);
1101 lock_zone(zone);
1102 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
1106 if (addr && zone_debug_enabled(zone)) {
1107 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
1112 unlock_zone(zone);
1115 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
1123 register zone_t zone)
1125 return( zalloc_canblock(zone, TRUE) );
1130 register zone_t zone)
1132 return( zalloc_canblock(zone, FALSE) );
1151 * zget returns an element from the specified zone
1159 register zone_t zone)
1163 assert( zone != ZONE_NULL );
1165 if (!lock_try_zone(zone))
1168 REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
1170 if (addr && zone_debug_enabled(zone)) {
1171 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
1175 unlock_zone(zone);
1189 register zone_t zone,
1193 void *bt[MAX_DEPTH]; /* only used if zone logging is enable via boot-args */
1196 assert(zone != ZONE_NULL);
1199 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1202 if (DO_LOGGING(zone))
1207 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
1210 if (zone == zone_zone)
1214 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (int)addr);
1216 if (zone->collectable && !zone->allows_foreign &&
1217 !from_zone_map(elem, zone->elem_size)) {
1219 panic("zfree: non-allocated memory in collectable zone!");
1221 zone_last_bogus_zone = zone;
1226 lock_zone(zone);
1229 * See if we're doing logging on this zone. There are two styles of logging used depending on
1233 if (DO_LOGGING(zone)) {
1283 if (zone_debug_enabled(zone)) {
1288 /* check the zone's consistency */
1290 for (tmp_elem = queue_first(&zone->active_zones);
1291 !queue_end(tmp_elem, &zone->active_zones);
1296 panic("zfree()ing element from wrong zone");
1298 remqueue(&zone->active_zones, (queue_t) elem);
1304 /* check the zone's consistency */
1306 for (this = zone->free_elements;
1312 ADD_TO_ZONE(zone, elem);
1316 * request to run the garbage collection in the zone the next
1319 if (zone->elem_size >= PAGE_SIZE &&
1323 unlock_zone(zone);
1327 /* Change a zone's flags.
1332 zone_t zone,
1336 assert( zone != ZONE_NULL );
1341 zone->exhaustible = value;
1344 zone->collectable = value;
1347 zone->expandable = value;
1350 zone->allows_foreign = value;
1361 * Return the expected number of free elements in the zone.
1364 * into a zone is by zcram.
1368 zone_free_count(zone_t zone)
1372 lock_zone(zone);
1373 free_count = zone->cur_size/zone->elem_size - zone->count;
1374 unlock_zone(zone);
1383 * zone to the specified size
1387 zone_t zone,
1396 zcram(zone, (void *)addr, size);
1567 /* This is used for walking through a zone's free element list.
1574 * Add a linked list of pages starting at base back into the zone
1578 #define ADD_LIST_TO_ZONE(zone, base, tail) \
1580 (tail)->next = (void *)((zone)->free_elements); \
1582 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
1583 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
1584 (zone)->free_elements; \
1586 (zone)->free_elements = (unsigned long)(base); \
1593 #define ADD_ELEMENT(zone, prev, elem) \
1597 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
1598 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
1655 * Do a quick feasability check before we scan the zone:
1659 * the zone has more than 10 percent of it's elements free
1672 * Snatch all of the free elements away from the zone.
1685 * are returned to the zone.
1800 * and update the zone size info.
1826 * the zone size info.
1876 * By default, don't attempt zone GC more frequently
1995 struct zone zcopy;
2090 zone_t zone);
2092 zone_t zone);
2095 zone_t zone);
2100 struct zone zcopy;
2122 struct zone *z = (zone_t)((char *)0 + addr);
2174 zone_t zone)
2179 if (!zone_debug_enabled(zone) || !zone_check)
2181 tmp_elem = queue_first(&zone->active_zones);
2182 while (count < zone->count) {
2185 printf("unexpected zero element, zone=%p, count=%d\n",
2186 zone, count);
2190 if (queue_end(tmp_elem, &zone->active_zones)) {
2191 printf("unexpected queue_end, zone=%p, count=%d\n",
2192 zone, count);
2198 if (!queue_end(tmp_elem, &zone->active_zones)) {
2199 printf("not at queue_end, zone=%p, tmp_elem=%p\n",
2200 zone, tmp_elem);
2207 zone_t zone)
2212 if (!zone_debug_enabled(zone)) {
2213 printf("zone %p debug not enabled\n", zone);
2221 printf("zone %p, active elements %d\n", zone, zone->count);
2223 tmp_elem = queue_first(&zone->active_zones);
2224 while (count < zone->count) {
2233 if (queue_end(tmp_elem, &zone->active_zones)) {
2239 if (!queue_end(tmp_elem, &zone->active_zones))
2248 zone_t zone)
2254 freecount = zone_free_count(zone);
2255 printf("zone %p, free elements %d\n", zone, freecount);
2257 elem = zone->free_elements;
2316 * Second arg controls how many zone elements are printed: