1/*
2 * Copyright (c) 1999, 2000, 2003, 2005, 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include "magmallocProvider.h"
25
26#include <_simple.h>
27#include <crt_externs.h>
28#include <dlfcn.h>
29#include <errno.h>
30#include <fcntl.h>
31#include <limits.h>
32#include <stdlib.h>
33#include <stdio.h>
34#include <string.h>
35#include <unistd.h>
36#include <libkern/OSAtomic.h>
37#include <mach/mach_vm.h>
38#include <mach/mach_init.h>
39#include <mach/thread_switch.h>
40#include <mach/vm_map.h>
41#include <mach-o/dyld.h>
42#include <os/tsd.h>
43#include <sys/mman.h>
44#include <xlocale.h>
45#include <TargetConditionals.h>
46
47#include "malloc.h"
48#include "malloc_printf.h"
49#include "scalable_malloc.h"
50#include "malloc_internal.h"
51#include "stack_logging.h"
52
53#if TARGET_OS_EMBEDDED || TARGET_IPHONE_SIMULATOR
54// _malloc_printf(ASL_LEVEL_INFO...) on iOS doesn't show up in the Xcode Console log of the device,
55// but ASL_LEVEL_NOTICE does.  So raising the log level is helpful.
56#undef ASL_LEVEL_INFO
57#define ASL_LEVEL_INFO ASL_LEVEL_NOTICE
58#endif
59
60#include <CrashReporterClient.h>
61
62#ifdef __LP64__
63#define CONFIG_NANOZONE 1
64#else
65#define CONFIG_NANOZONE 0
66#endif
67
68/*
69 * MALLOC_ABSOLUTE_MAX_SIZE - There are many instances of addition to a
70 * user-specified size_t, which can cause overflow (and subsequent crashes)
71 * for values near SIZE_T_MAX.  Rather than add extra "if" checks everywhere
72 * this occurs, it is easier to just set an absolute maximum request size,
73 * and immediately return an error if the requested size exceeds this maximum.
74 * Of course, values less than this absolute max can fail later if the value
75 * is still too large for the available memory.  The largest value added
76 * seems to be PAGE_SIZE (in the macro round_page()), so to be safe, we set
77 * the maximum to be 2 * PAGE_SIZE less than SIZE_T_MAX.
78 */
79#define MALLOC_ABSOLUTE_MAX_SIZE	(SIZE_T_MAX - (2 * PAGE_SIZE))
80
81#define USE_SLEEP_RATHER_THAN_ABORT	0
82
83typedef void (malloc_logger_t)(uint32_t type, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t result, uint32_t num_hot_frames_to_skip);
84
85extern malloc_logger_t *__syscall_logger;	// use this to set up syscall logging (e.g., vm_allocate, vm_deallocate, mmap, munmap)
86
87extern void __prepare_to_log_stacks(void);
88
89static _malloc_lock_s _malloc_lock = _MALLOC_LOCK_INIT;
90#define MALLOC_LOCK()		_malloc_lock_lock(&_malloc_lock)
91#define MALLOC_UNLOCK()		_malloc_lock_unlock(&_malloc_lock)
92
93static inline void yield(void) {
94	thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, 1);
95}
96
97/* The following variables are exported for the benefit of performance tools
98 *
99 * It should always be safe to first read malloc_num_zones, then read
100 * malloc_zones without taking the lock, if only iteration is required and
101 * provided that when malloc_destroy_zone is called all prior operations on that
102 * zone are complete and no further calls referencing that zone can be made.
103 */
104unsigned malloc_num_zones = 0;
105unsigned malloc_num_zones_allocated = 0;
106malloc_zone_t **malloc_zones = 0;
107malloc_logger_t *malloc_logger = NULL;
108
109unsigned malloc_debug_flags = 0;
110
111unsigned malloc_check_start = 0; // 0 means don't check
112unsigned malloc_check_counter = 0;
113unsigned malloc_check_each = 1000;
114
115/* global flag to suppress ASL logging e.g. for syslogd */
116int _malloc_no_asl_log = 0;
117
118static int malloc_check_sleep = 100; // default 100 second sleep
119static int malloc_check_abort = 0; // default is to sleep, not abort
120
121static int malloc_debug_file = STDERR_FILENO;
122static boolean_t _malloc_is_initialized = FALSE;
123
124static const char Malloc_Facility[] = "com.apple.Libsystem.malloc";
125
126/*
127 * Counters that coordinate zone destruction (in malloc_zone_unregister) with
128 * find_registered_zone (here abbreviated as FRZ).
129 */
130static int counterAlice = 0, counterBob = 0;
131static int *pFRZCounterLive= &counterAlice, *pFRZCounterDrain = &counterBob;
132
133#define MALLOC_LOG_TYPE_ALLOCATE	stack_logging_type_alloc
134#define MALLOC_LOG_TYPE_DEALLOCATE	stack_logging_type_dealloc
135#define MALLOC_LOG_TYPE_HAS_ZONE	stack_logging_flag_zone
136#define MALLOC_LOG_TYPE_CLEARED		stack_logging_flag_cleared
137
138#define DEFAULT_MALLOC_ZONE_STRING "DefaultMallocZone"
139#define DEFAULT_PUREGEABLE_ZONE_STRING "DefaultPurgeableMallocZone"
140
141boolean_t malloc_engaged_nano(void);
142#if CONFIG_NANOZONE
143extern boolean_t _malloc_engaged_nano;
144malloc_zone_t *create_nano_zone(size_t initial_size, malloc_zone_t *helper_zone, unsigned debug_flags);
145void nano_forked_zone(malloc_zone_t *zone);
146#define MALLOC_HELPER_ZONE_STRING "MallocHelperZone"
147#endif
148
149/*********	Utilities	************/
150__attribute__((visibility("hidden"))) uint64_t malloc_entropy[2] = {0, 0};
151
152void __malloc_init(const char *apple[]);
153
154static int
155__entropy_from_kernel(const char *str)
156{
157	unsigned long long val;
158	char tmp[20], *p;
159	int idx = 0;
160
161	/* Skip over key to the first value */
162	str = strchr(str, '=');
163	if (str == NULL)
164		return 0;
165	str++;
166
167	while (str && idx < sizeof(malloc_entropy)/sizeof(malloc_entropy[0])) {
168		strlcpy(tmp, str, 20);
169		p = strchr(tmp, ',');
170		if (p) *p = '\0';
171		val = strtoull_l(tmp, NULL, 0, NULL);
172		malloc_entropy[idx] = (uint64_t)val;
173		idx++;
174		if ((str = strchr(str, ',')) != NULL)
175			str++;
176	}
177	return idx;
178}
179
180/* TODO: Remove __malloc_entropy_setup, it was left to avoid rev-lock */
181void
182__malloc_entropy_setup(const char *apple[]) {
183	__malloc_init(apple);
184}
185
186/* TODO: Investigate adding _malloc_initialize() into this libSystem initializer */
187void
188__malloc_init(const char *apple[])
189{
190	const char **p;
191#if CONFIG_NANOZONE
192	_malloc_engaged_nano = 0;
193	for (p = apple; p && *p; p++) {
194		if (0 == strncmp(*p, "MallocNanoZone=1", strlen("MallocNanoZone=1"))) {
195			// _malloc_printf(ASL_LEVEL_INFO, "MallocNanoZone=1\n");
196			_malloc_engaged_nano = 1;
197			break;
198		}
199	}
200#endif
201
202	for (p = apple; p && *p; p++) {
203		if (strstr(*p, "malloc_entropy") == *p) {
204			int count = __entropy_from_kernel(*p);
205			bzero((void*)*p, strlen(*p));
206
207			if (sizeof(malloc_entropy)/sizeof(malloc_entropy[0]) == count) {
208				return;
209			}
210			break;
211		}
212	}
213
214	malloc_entropy[0] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
215	malloc_entropy[1] = ((uint64_t)arc4random()) << 32 | ((uint64_t)arc4random());
216	return;
217}
218
219static inline malloc_zone_t * find_registered_zone(const void *, size_t *) __attribute__((always_inline));
220static inline malloc_zone_t *
221find_registered_zone(const void *ptr, size_t *returned_size) {
222	// Returns a zone which contains ptr, else NULL
223
224	if (0 == malloc_num_zones) {
225		if (returned_size) *returned_size = 0;
226		return NULL;
227	}
228
229	// The default zone is registered in malloc_zones[0]. There's no danger that it will ever be unregistered.
230	// So don't advance the FRZ counter yet.
231	malloc_zone_t *zone = malloc_zones[0];
232	size_t size = zone->size(zone, ptr);
233	if (size) { // Claimed by this zone?
234		if (returned_size) *returned_size = size;
235		return zone;
236	}
237
238	int *pFRZCounter = pFRZCounterLive; // Capture pointer to the counter of the moment
239	__sync_fetch_and_add(pFRZCounter, 1); // Advance this counter -- our thread is in FRZ
240
241	unsigned		index;
242	unsigned		limit = malloc_num_zones;
243	malloc_zone_t	**zones = &malloc_zones[1];
244
245	for (index = 1; index < limit; ++index, ++zones) {
246		zone = *zones;
247		size = zone->size(zone, ptr);
248		if (size) { // Claimed by this zone?
249			if (returned_size) *returned_size = size;
250			__sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
251			return zone;
252		}
253	}
254	// Unclaimed by any zone.
255	if (returned_size) *returned_size = 0;
256	__sync_fetch_and_sub(pFRZCounter, 1); // our thread is leaving FRZ
257	return NULL;
258}
259
260__attribute__((visibility("hidden"))) __attribute__((noinline)) void
261malloc_error_break(void) {
262	// Provides a non-inlined place for various malloc error procedures to call
263	// that will be called after an error message appears.  It does not make
264	// sense for developers to call this function, so it is marked
265	// hidden to prevent it from becoming API.
266	MAGMALLOC_MALLOCERRORBREAK(); // DTrace USDT probe
267}
268
269extern boolean_t __stack_logging_locked();
270
271__attribute__((visibility("hidden"))) __attribute__((noinline)) __attribute__((used)) int
272malloc_gdb_po_unsafe(void) {
273	// In order to implement "po" other data formatters in gdb, the debugger
274	// calls functions that call malloc.  The debugger will  only run one thread
275	// of the program in this case, so if another thread is holding a zone lock,
276	// gdb may deadlock in this case.
277	//
278	// Iterate over the zones in malloc_zones, and call "trylock" on the zone
279	// lock.  If trylock succeeds, unlock it, otherwise return "locked".  Returns
280	// 0 == safe, 1 == locked/unsafe.
281
282	if (__stack_logging_locked())
283		return 1;
284
285	malloc_zone_t **zones = malloc_zones;
286	unsigned i, e = malloc_num_zones;
287
288	for (i = 0; i != e; ++i) {
289		malloc_zone_t *zone = zones[i];
290
291		// Version must be >= 5 to look at the new introspection field.
292		if (zone->version < 5)
293			continue;
294
295		if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone))
296			return 1;
297	}
298	return 0;
299}
300
301/*********	Creation and destruction	************/
302
303static void set_flags_from_environment(void);
304
305static void
306malloc_zone_register_while_locked(malloc_zone_t *zone) {
307	size_t protect_size;
308	unsigned i;
309
310	/* scan the list of zones, to see if this zone is already registered.  If
311	 * so, print an error message and return. */
312	for (i = 0; i != malloc_num_zones; ++i)
313		if (zone == malloc_zones[i]) {
314			_malloc_printf(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone);
315			return;
316		}
317
318	if (malloc_num_zones == malloc_num_zones_allocated) {
319		size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *);
320		mach_vm_size_t alloc_size = round_page(malloc_zones_size + vm_page_size);
321		mach_vm_address_t vm_addr;
322		int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MALLOC);
323
324		vm_addr = vm_page_size;
325		kern_return_t kr = mach_vm_allocate(mach_task_self(), &vm_addr, alloc_size, alloc_flags);
326		if (kr) {
327			_malloc_printf(ASL_LEVEL_ERR, "malloc_zone_register allocation failed: %d\n", kr);
328			return;
329		}
330
331		malloc_zone_t **new_zones = (uintptr_t)vm_addr;
332		/* If there were previously allocated malloc zones, we need to copy them
333		 * out of the previous array and into the new zones array */
334		if (malloc_zones) {
335			memcpy(new_zones, malloc_zones, malloc_zones_size);
336			vm_addr = malloc_zones;
337			mach_vm_size_t dealloc_size = round_page(malloc_zones_size);
338			mach_vm_deallocate(mach_task_self(), vm_addr, dealloc_size);
339		}
340
341		/* Update the malloc_zones pointer, which we leak if it was previously
342		 * allocated, and the number of zones allocated */
343		protect_size = alloc_size;
344		malloc_zones = new_zones;
345		malloc_num_zones_allocated = alloc_size / sizeof(malloc_zone_t *);
346	} else {
347		/* If we don't need to reallocate zones, we need to briefly change the
348		 * page protection the malloc zones to allow writes */
349		protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
350		mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
351	}
352	malloc_zones[malloc_num_zones++] = zone;
353
354	/* Finally, now that the zone is registered, disallow write access to the
355	 * malloc_zones array */
356	mprotect(malloc_zones, protect_size, PROT_READ);
357	//_malloc_printf(ASL_LEVEL_INFO, "Registered malloc_zone %p in malloc_zones %p [%u zones, %u bytes]\n", zone, malloc_zones, malloc_num_zones, protect_size);
358}
359
360static void
361_malloc_initialize(void) {
362	MALLOC_LOCK();
363	if (!_malloc_is_initialized) {
364		unsigned n;
365		malloc_zone_t	*zone;
366
367		_malloc_is_initialized = TRUE;
368
369		set_flags_from_environment(); // will only set flags up to two times
370		n = malloc_num_zones;
371
372#if CONFIG_NANOZONE
373		malloc_zone_t *helper_zone = create_scalable_zone(0, malloc_debug_flags);
374		zone = create_nano_zone(0, helper_zone, malloc_debug_flags);
375
376		if (zone) {
377			malloc_zone_register_while_locked(zone);
378			malloc_zone_register_while_locked(helper_zone);
379
380			// Must call malloc_set_zone_name() *after* helper and nano are hooked together.
381			malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
382			malloc_set_zone_name(helper_zone, MALLOC_HELPER_ZONE_STRING);
383		} else {
384			zone = helper_zone;
385			malloc_zone_register_while_locked(zone);
386			malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
387		}
388#else
389		zone = create_scalable_zone(0, malloc_debug_flags);
390		malloc_zone_register_while_locked(zone);
391		malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
392#endif
393
394		if (n != 0) { // make the default first, for efficiency
395			unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
396			malloc_zone_t *hold = malloc_zones[0];
397
398			if(hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING)  == 0) {
399				malloc_set_zone_name(hold, NULL);
400			}
401
402			mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
403			malloc_zones[0] = malloc_zones[n];
404			malloc_zones[n] = hold;
405			mprotect(malloc_zones, protect_size, PROT_READ);
406		}
407		// _malloc_printf(ASL_LEVEL_INFO, "%d registered zones\n", malloc_num_zones);
408		// _malloc_printf(ASL_LEVEL_INFO, "malloc_zones is at %p; malloc_num_zones is at %p\n", (unsigned)&malloc_zones, (unsigned)&malloc_num_zones);
409	}
410	MALLOC_UNLOCK();
411}
412
413static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline));
414static inline malloc_zone_t *
415inline_malloc_default_zone(void) {
416	if (!_malloc_is_initialized) _malloc_initialize();
417	// _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
418	return malloc_zones[0];
419}
420
421malloc_zone_t *
422malloc_default_zone(void) {
423	return inline_malloc_default_zone();
424}
425
426static inline malloc_zone_t *inline_malloc_default_scalable_zone(void) __attribute__((always_inline));
427static inline malloc_zone_t *
428inline_malloc_default_scalable_zone(void) {
429	unsigned index;
430
431	if (!_malloc_is_initialized) _malloc_initialize();
432	// _malloc_printf(ASL_LEVEL_INFO, "In inline_malloc_default_scalable_zone with %d %d\n", malloc_num_zones, malloc_has_debug_zone);
433
434	MALLOC_LOCK();
435#if CONFIG_NANOZONE
436	for (index = 0; index < malloc_num_zones; ++index) {
437		malloc_zone_t *z = malloc_zones[index];
438
439		if(z->zone_name && strcmp(z->zone_name, MALLOC_HELPER_ZONE_STRING)  == 0) {
440			MALLOC_UNLOCK();
441			return z;
442		}
443	}
444#endif
445	for (index = 0; index < malloc_num_zones; ++index) {
446		malloc_zone_t *z = malloc_zones[index];
447
448		if(z->zone_name && strcmp(z->zone_name, DEFAULT_MALLOC_ZONE_STRING)  == 0) {
449			MALLOC_UNLOCK();
450			return z;
451		}
452	}
453	MALLOC_UNLOCK();
454
455	malloc_printf("*** malloc_default_scalable_zone() failed to find 'DefaultMallocZone'\n");
456	return NULL; // FIXME: abort() instead?
457}
458
459/*
460 * malloc_engaged_nano() is for the benefit of libdispatch, which calls here just once.
461 */
462boolean_t malloc_engaged_nano(void)
463{
464#if CONFIG_NANOZONE
465	return _malloc_engaged_nano;
466#else
467	return 0;
468#endif
469}
470
471malloc_zone_t *
472malloc_default_purgeable_zone(void) {
473	static malloc_zone_t *dpz;
474
475	if (!dpz) {
476		//
477		// PR_7288598: Must pass a *scalable* zone (szone) as the helper for create_purgeable_zone().
478		// Take care that the zone so obtained is not subject to interposing.
479		//
480		malloc_zone_t *tmp = create_purgeable_zone(0, inline_malloc_default_scalable_zone(), malloc_debug_flags);
481		malloc_zone_register(tmp);
482		malloc_set_zone_name(tmp, DEFAULT_PUREGEABLE_ZONE_STRING);
483		if (!__sync_bool_compare_and_swap(&dpz, NULL, tmp))
484			malloc_destroy_zone(tmp);
485	}
486	return dpz;
487}
488
489static void
490set_flags_from_environment(void) {
491	const char	*flag;
492	int		fd;
493	char	**env = * _NSGetEnviron();
494	char	**p;
495	char	*c;
496
497	if (malloc_debug_file != STDERR_FILENO) {
498		close(malloc_debug_file);
499		malloc_debug_file = STDERR_FILENO;
500	}
501#if __LP64__
502	malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION; // Set always on 64-bit processes
503#else
504	int libSystemVersion  = NSVersionOfLinkTimeLibrary("System");
505	if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 126) /* Lion or greater */)
506		malloc_debug_flags = 0;
507	else
508		malloc_debug_flags = SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
509#endif
510	stack_logging_enable_logging = 0;
511	stack_logging_dontcompact = 0;
512	malloc_logger = NULL;
513	malloc_check_start = 0;
514	malloc_check_each = 1000;
515	malloc_check_abort = 0;
516	malloc_check_sleep = 100;
517	/*
518	 * Given that all environment variables start with "Malloc" we optimize by scanning quickly
519	 * first the environment, therefore avoiding repeated calls to getenv().
520	 * If we are setu/gid these flags are ignored to prevent a malicious invoker from changing
521	 * our behaviour.
522	 */
523	for (p = env; (c = *p) != NULL; ++p) {
524		if (!strncmp(c, "Malloc", 6)) {
525			if (issetugid())
526				return;
527			break;
528		}
529	}
530	if (c == NULL)
531		return;
532	flag = getenv("MallocLogFile");
533	if (flag) {
534		fd = open(flag, O_WRONLY|O_APPEND|O_CREAT, 0644);
535		if (fd >= 0) {
536			malloc_debug_file = fd;
537			fcntl(fd, F_SETFD, 0); // clear close-on-exec flag  XXX why?
538		} else {
539			malloc_printf("Could not open %s, using stderr\n", flag);
540		}
541	}
542	if (getenv("MallocGuardEdges")) {
543		malloc_debug_flags |= SCALABLE_MALLOC_ADD_GUARD_PAGES;
544		_malloc_printf(ASL_LEVEL_INFO, "protecting edges\n");
545		if (getenv("MallocDoNotProtectPrelude")) {
546			malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_PRELUDE;
547			_malloc_printf(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n");
548		}
549		if (getenv("MallocDoNotProtectPostlude")) {
550			malloc_debug_flags |= SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE;
551			_malloc_printf(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n");
552		}
553	}
554	flag = getenv("MallocStackLogging");
555	if (!flag) {
556		flag = getenv("MallocStackLoggingNoCompact");
557		stack_logging_dontcompact = 1;
558	}
559	if (flag) {
560		// Set up stack logging as early as possible to catch all ensuing VM allocations,
561		// including those from _malloc_printf and malloc zone setup.  Make sure to set
562		// __syscall_logger after this, because prepare_to_log_stacks() itself makes VM
563		// allocations that we aren't prepared to log yet.
564		__prepare_to_log_stacks();
565		if (strcmp(flag,"malloc") == 0) {
566			malloc_logger = __disk_stack_logging_log_stack;
567			_malloc_printf(ASL_LEVEL_INFO, "recording malloc (but not VM allocation) stacks to disk using standard recorder\n");
568		} else if (strcmp(flag,"vm") == 0) {
569			__syscall_logger = __disk_stack_logging_log_stack;
570			_malloc_printf(ASL_LEVEL_INFO, "recording VM allocation (but not malloc) stacks to disk using standard recorder\n");
571		} else {
572			malloc_logger = __disk_stack_logging_log_stack;
573			__syscall_logger = __disk_stack_logging_log_stack;
574			_malloc_printf(ASL_LEVEL_INFO, "recording malloc and VM allocation stacks to disk using standard recorder\n");
575		}
576		stack_logging_enable_logging = 1;
577		if (stack_logging_dontcompact) {
578			if (malloc_logger == __disk_stack_logging_log_stack) {
579				_malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; size of log files on disk can increase rapidly\n");
580			} else {
581				_malloc_printf(ASL_LEVEL_INFO, "stack logging compaction turned off; VM can increase rapidly\n");
582			}
583		}
584	}
585	if (getenv("MallocScribble")) {
586		malloc_debug_flags |= SCALABLE_MALLOC_DO_SCRIBBLE;
587		_malloc_printf(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
588	}
589	if (getenv("MallocErrorAbort")) {
590		malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_ERROR;
591		_malloc_printf(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
592	}
593#if CONFIG_NANOZONE
594	/* Explicit overrides from the environment */
595	if ((flag = getenv("MallocNanoZone"))) {
596		if (flag[0] == '1') {
597			_malloc_engaged_nano = 1;
598		} else if (flag[0] == '0') {
599			_malloc_engaged_nano = 0;
600		}
601	}
602#endif /* CONFIG_NANOZONE */
603
604#if __LP64__
605	/* initialization above forces SCALABLE_MALLOC_ABORT_ON_CORRUPTION of 64-bit processes */
606#else
607	flag = getenv("MallocCorruptionAbort");
608	if (flag && (flag[0] == '0')) { // Set from an environment variable in 32-bit processes
609		malloc_debug_flags &= ~SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
610	} else if (flag) {
611		malloc_debug_flags |= SCALABLE_MALLOC_ABORT_ON_CORRUPTION;
612	}
613#endif
614	flag = getenv("MallocCheckHeapStart");
615	if (flag) {
616		malloc_check_start = strtoul(flag, NULL, 0);
617		if (malloc_check_start == 0) malloc_check_start = 1;
618		if (malloc_check_start == -1) malloc_check_start = 1;
619		flag = getenv("MallocCheckHeapEach");
620		if (flag) {
621			malloc_check_each = strtoul(flag, NULL, 0);
622			if (malloc_check_each == 0) malloc_check_each = 1;
623			if (malloc_check_each == -1) malloc_check_each = 1;
624		}
625		_malloc_printf(ASL_LEVEL_INFO, "checks heap after %dth operation and each %d operations\n", malloc_check_start, malloc_check_each);
626		flag = getenv("MallocCheckHeapAbort");
627		if (flag)
628			malloc_check_abort = strtol(flag, NULL, 0);
629		if (malloc_check_abort)
630			_malloc_printf(ASL_LEVEL_INFO, "will abort on heap corruption\n");
631		else {
632			flag = getenv("MallocCheckHeapSleep");
633			if (flag)
634				malloc_check_sleep = strtol(flag, NULL, 0);
635			if (malloc_check_sleep > 0)
636				_malloc_printf(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep);
637			else if (malloc_check_sleep < 0)
638				_malloc_printf(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep);
639			else
640				_malloc_printf(ASL_LEVEL_INFO, "no sleep on heap corruption\n");
641		}
642	}
643	if (getenv("MallocHelp")) {
644		_malloc_printf(ASL_LEVEL_INFO,
645					   "environment variables that can be set for debug:\n"
646					   "- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n"
647					   "- MallocGuardEdges to add 2 guard pages for each large block\n"
648					   "- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n"
649					   "- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
650					   "- MallocStackLogging to record all stacks.  Tools like leaks can then be applied\n"
651					   "- MallocStackLoggingNoCompact to record all stacks.  Needed for malloc_history\n"
652					   "- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
653					   "- MallocScribble to detect writing on free blocks and missing initializers:\n"
654					   "  0x55 is written upon free and 0xaa is written on allocation\n"
655					   "- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
656					   "- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
657					   "- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
658					   "- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
659					   "- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
660					   "  MallocCorruptionAbort is always set on 64-bit processes\n"
661					   "- MallocErrorAbort to abort on any malloc error, including out of memory\n"
662					   "- MallocHelp - this help!\n");
663	}
664}
665
666malloc_zone_t *
667malloc_create_zone(vm_size_t start_size, unsigned flags)
668{
669	malloc_zone_t	*zone;
670
671	/* start_size doesn't actually appear to be used, but we test anyway. */
672	if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) {
673		return NULL;
674	}
675	if (!_malloc_is_initialized) _malloc_initialize();
676	zone = create_scalable_zone(start_size, flags | malloc_debug_flags);
677	malloc_zone_register(zone);
678	return zone;
679}
680
681/*
682 * For use by CheckFix: establish a new default zone whose behavior is, apart from
683 * the use of death-row and per-CPU magazines, that of Leopard.
684 */
685void
686malloc_create_legacy_default_zone(void)
687{
688	malloc_zone_t	*zone;
689	int i;
690
691	if (!_malloc_is_initialized) _malloc_initialize();
692	zone = create_legacy_scalable_zone(0, malloc_debug_flags);
693
694	MALLOC_LOCK();
695	malloc_zone_register_while_locked(zone);
696
697	//
698	// Establish the legacy scalable zone just created as the default zone.
699	//
700	malloc_zone_t *hold = malloc_zones[0];
701	if(hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING)  == 0) {
702		malloc_set_zone_name(hold, NULL);
703	}
704	malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
705
706	unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
707	mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
708
709	// assert(zone == malloc_zones[malloc_num_zones - 1];
710	for (i = malloc_num_zones - 1; i > 0; --i) {
711		malloc_zones[i] = malloc_zones[i - 1];
712	}
713	malloc_zones[0] = zone;
714
715	mprotect(malloc_zones, protect_size, PROT_READ);
716	MALLOC_UNLOCK();
717}
718
719void
720malloc_destroy_zone(malloc_zone_t *zone) {
721	malloc_set_zone_name(zone, NULL); // Deallocate zone name wherever it may reside PR_7701095
722	malloc_zone_unregister(zone);
723	zone->destroy(zone);
724}
725
726/*********	Block creation and manipulation	************/
727
728static void
729internal_check(void) {
730	static vm_address_t	*frames = NULL;
731	static unsigned	num_frames;
732	if (malloc_zone_check(NULL)) {
733		if (!frames) vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
734		thread_stack_pcs(frames, vm_page_size/sizeof(vm_address_t) - 1, &num_frames);
735	} else {
736		_SIMPLE_STRING b = _simple_salloc();
737		if (b)
738			_simple_sprintf(b, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
739		else
740			_malloc_printf(MALLOC_PRINTF_NOLOG, "*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
741		malloc_printf("*** MallocCheckHeap: FAILED check at %dth operation\n", malloc_check_counter-1);
742		if (frames) {
743			unsigned	index = 1;
744			if (b) {
745				_simple_sappend(b, "Stack for last operation where the malloc check succeeded: ");
746				while (index < num_frames) _simple_sprintf(b, "%p ", frames[index++]);
747				malloc_printf("%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b));
748			} else {
749				/*
750				 * Should only get here if vm_allocate() can't get a single page of
751				 * memory, implying _simple_asl_log() would also fail.  So we just
752				 * print to the file descriptor.
753				 */
754				_malloc_printf(MALLOC_PRINTF_NOLOG, "Stack for last operation where the malloc check succeeded: ");
755				while (index < num_frames) _malloc_printf(MALLOC_PRINTF_NOLOG, "%p ", frames[index++]);
756				_malloc_printf(MALLOC_PRINTF_NOLOG, "\n(Use 'atos' for a symbolic stack)\n");
757			}
758		}
759		if (malloc_check_each > 1) {
760			unsigned	recomm_each = (malloc_check_each > 10) ? malloc_check_each/10 : 1;
761			unsigned	recomm_start = (malloc_check_counter > malloc_check_each+1) ? malloc_check_counter-1-malloc_check_each : 1;
762			malloc_printf("*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n", recomm_start, recomm_each);
763		}
764		if (malloc_check_abort) {
765			CRSetCrashLogMessage(b ? _simple_string(b) : "*** MallocCheckHeap: FAILED check");
766			abort();
767		} else if (b)
768			_simple_sfree(b);
769		if (malloc_check_sleep > 0) {
770			_malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping for %d seconds to leave time to attach\n",
771						   malloc_check_sleep);
772			sleep(malloc_check_sleep);
773		} else if (malloc_check_sleep < 0) {
774			_malloc_printf(ASL_LEVEL_NOTICE, "*** Sleeping once for %d seconds to leave time to attach\n",
775						   -malloc_check_sleep);
776			sleep(-malloc_check_sleep);
777			malloc_check_sleep = 0;
778		}
779	}
780	malloc_check_start += malloc_check_each;
781}
782
783void *
784malloc_zone_malloc(malloc_zone_t *zone, size_t size) {
785	void	*ptr;
786	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
787		internal_check();
788	}
789	if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
790		return NULL;
791	}
792	ptr = zone->malloc(zone, size);
793	if (malloc_logger)
794		malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
795	return ptr;
796}
797
798void *
799malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) {
800	void	*ptr;
801	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
802		internal_check();
803	}
804	if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
805		return NULL;
806	}
807	ptr = zone->calloc(zone, num_items, size);
808	if (malloc_logger)
809		malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone, (uintptr_t)(num_items * size), 0,
810					  (uintptr_t)ptr, 0);
811	return ptr;
812}
813
814void *
815malloc_zone_valloc(malloc_zone_t *zone, size_t size) {
816	void	*ptr;
817	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
818		internal_check();
819	}
820	if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
821		return NULL;
822	}
823	ptr = zone->valloc(zone, size);
824	if (malloc_logger)
825		malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
826	return ptr;
827}
828
829void *
830malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
831	void	*new_ptr;
832	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
833		internal_check();
834	}
835	if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
836		return NULL;
837	}
838	new_ptr = zone->realloc(zone, ptr, size);
839	if (malloc_logger)
840		malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, (uintptr_t)size,
841					  (uintptr_t)new_ptr, 0);
842	return new_ptr;
843}
844
845void
846malloc_zone_free(malloc_zone_t *zone, void *ptr) {
847	if (malloc_logger)
848		malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
849	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
850		internal_check();
851	}
852	zone->free(zone, ptr);
853}
854
855static void
856malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
857	if (malloc_logger)
858		malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
859	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
860		internal_check();
861	}
862	zone->free_definite_size(zone, ptr, size);
863}
864
865malloc_zone_t *
866malloc_zone_from_ptr(const void *ptr) {
867	if (!ptr)
868		return NULL;
869	else
870		return find_registered_zone(ptr, NULL);
871}
872
873void *
874malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
875	void	*ptr;
876	if (zone->version < 5) // Version must be >= 5 to look at the new memalign field.
877		return NULL;
878	if (!(zone->memalign))
879		return NULL;
880	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
881		internal_check();
882	}
883	if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
884		return NULL;
885	}
886	if (alignment < sizeof( void *) ||		// excludes 0 == alignment
887		0 != (alignment & (alignment - 1))) {	// relies on sizeof(void *) being a power of two.
888		return NULL;
889	}
890	ptr = zone->memalign(zone, alignment, size);
891	if (malloc_logger)
892		malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
893	return ptr;
894}
895
896/*********	Functions for zone implementors	************/
897
898void
899malloc_zone_register(malloc_zone_t *zone) {
900	MALLOC_LOCK();
901	malloc_zone_register_while_locked(zone);
902	MALLOC_UNLOCK();
903}
904
905void
906malloc_zone_unregister(malloc_zone_t *z) {
907	unsigned	index;
908
909	if (malloc_num_zones == 0)
910		return;
911
912	MALLOC_LOCK();
913	for (index = 0; index < malloc_num_zones; ++index) {
914		if (z != malloc_zones[index])
915			continue;
916
917		// Modify the page to be allow write access, so that we can update the
918		// malloc_zones array.
919		size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
920		mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
921
922		// If we found a match, replace it with the entry at the end of the list, shrink the list,
923		// and leave the end of the list intact to avoid racing with find_registered_zone().
924
925		malloc_zones[index] = malloc_zones[malloc_num_zones - 1];
926		--malloc_num_zones;
927
928		mprotect(malloc_zones, protect_size, PROT_READ);
929
930		// Exchange the roles of the FRZ counters. The counter that has captured the number of threads presently
931		// executing *inside* find_regiatered_zone is swapped with the counter drained to zero last time through.
932		// The former is then allowed to drain to zero while this thread yields.
933		int *p = pFRZCounterLive;
934		pFRZCounterLive = pFRZCounterDrain;
935		pFRZCounterDrain = p;
936		__sync_synchronize(); // Full memory barrier
937
938		while (0 != *pFRZCounterDrain) { yield(); }
939
940		MALLOC_UNLOCK();
941
942		return;
943	}
944	MALLOC_UNLOCK();
945	malloc_printf("*** malloc_zone_unregister() failed for %p\n", z);
946}
947
948void
949malloc_set_zone_name(malloc_zone_t *z, const char *name) {
950	char	*newName;
951
952	mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE);
953	if (z->zone_name) {
954		free((char *)z->zone_name);
955		z->zone_name = NULL;
956	}
957	if (name) {
958		size_t buflen = strlen(name) + 1;
959		newName = malloc_zone_malloc(z, buflen);
960		if (newName) {
961			strlcpy(newName, name, buflen);
962			z->zone_name = (const char *)newName;
963		} else {
964			z->zone_name = NULL;
965		}
966	}
967	mprotect(z, sizeof(malloc_zone_t), PROT_READ);
968}
969
970const char *
971malloc_get_zone_name(malloc_zone_t *zone) {
972	return zone->zone_name;
973}
974
975/*
976 * XXX malloc_printf now uses _simple_*printf.  It only deals with a
977 * subset of printf format specifiers, but it doesn't call malloc.
978 */
979
980__attribute__((visibility("hidden")))
981void
982_malloc_vprintf(int flags, const char *format, va_list ap)
983{
984	_SIMPLE_STRING b;
985
986	if (_malloc_no_asl_log || (flags & MALLOC_PRINTF_NOLOG) || (b = _simple_salloc()) == NULL) {
987		if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
988			void *self = _os_tsd_get_direct(__TSD_THREAD_SELF);
989			_simple_dprintf(malloc_debug_file, "%s(%d,%p) malloc: ", getprogname(), getpid(), self);
990		}
991		_simple_vdprintf(malloc_debug_file, format, ap);
992		return;
993	}
994	if (!(flags & MALLOC_PRINTF_NOPREFIX)) {
995		void *self = _os_tsd_get_direct(__TSD_THREAD_SELF);
996		_simple_sprintf(b, "%s(%d,%p) malloc: ", getprogname(), getpid(), self);
997	}
998	_simple_vsprintf(b, format, ap);
999	_simple_put(b, malloc_debug_file);
1000	_simple_asl_log(flags & MALLOC_PRINTF_LEVEL_MASK, Malloc_Facility, _simple_string(b));
1001	_simple_sfree(b);
1002}
1003
1004__attribute__((visibility("hidden")))
1005void
1006_malloc_printf(int flags, const char *format, ...)
1007{
1008	va_list ap;
1009
1010	va_start(ap, format);
1011	_malloc_vprintf(flags, format, ap);
1012	va_end(ap);
1013}
1014
1015void
1016malloc_printf(const char *format, ...)
1017{
1018	va_list ap;
1019
1020	va_start(ap, format);
1021	_malloc_vprintf(ASL_LEVEL_ERR, format, ap);
1022	va_end(ap);
1023}
1024
1025/*********	Generic ANSI callouts	************/
1026
1027void *
1028malloc(size_t size) {
1029	void	*retval;
1030	retval = malloc_zone_malloc(inline_malloc_default_zone(), size);
1031	if (retval == NULL) {
1032		errno = ENOMEM;
1033	}
1034	return retval;
1035}
1036
1037void *
1038calloc(size_t num_items, size_t size) {
1039	void	*retval;
1040	retval = malloc_zone_calloc(inline_malloc_default_zone(), num_items, size);
1041	if (retval == NULL) {
1042		errno = ENOMEM;
1043	}
1044	return retval;
1045}
1046
1047void
1048free(void *ptr) {
1049	malloc_zone_t	*zone;
1050	size_t		size;
1051	if (!ptr)
1052		return;
1053	zone = find_registered_zone(ptr, &size);
1054	if (!zone) {
1055		malloc_printf("*** error for object %p: pointer being freed was not allocated\n"
1056					  "*** set a breakpoint in malloc_error_break to debug\n", ptr);
1057		malloc_error_break();
1058		if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
1059			_SIMPLE_STRING b = _simple_salloc();
1060			if (b) {
1061				_simple_sprintf(b, "*** error for object %p: pointer being freed was not allocated\n", ptr);
1062				CRSetCrashLogMessage(_simple_string(b));
1063			} else {
1064				CRSetCrashLogMessage("*** error: pointer being freed was not allocated\n");
1065			}
1066			abort();
1067		}
1068	} else if (zone->version >= 6 && zone->free_definite_size)
1069		malloc_zone_free_definite_size(zone, ptr, size);
1070	else
1071		malloc_zone_free(zone, ptr);
1072}
1073
1074void *
1075realloc(void *in_ptr, size_t new_size) {
1076	void		*retval = NULL;
1077	void		*old_ptr;
1078	malloc_zone_t	*zone;
1079
1080	// SUSv3: "If size is 0 and ptr is not a null pointer, the object
1081	// pointed to is freed. If the space cannot be allocated, the object
1082	// shall remain unchanged."  Also "If size is 0, either a null pointer
1083	// or a unique pointer that can be successfully passed to free() shall
1084	// be returned."  We choose to allocate a minimum size object by calling
1085	// malloc_zone_malloc with zero size, which matches "If ptr is a null
1086	// pointer, realloc() shall be equivalent to malloc() for the specified
1087	// size."  So we only free the original memory if the allocation succeeds.
1088	old_ptr = (new_size == 0) ? NULL : in_ptr;
1089	if (!old_ptr) {
1090		retval = malloc_zone_malloc(inline_malloc_default_zone(), new_size);
1091	} else {
1092		zone = find_registered_zone(old_ptr, NULL);
1093		if (!zone) {
1094			malloc_printf("*** error for object %p: pointer being realloc'd was not allocated\n"
1095						  "*** set a breakpoint in malloc_error_break to debug\n", old_ptr);
1096			malloc_error_break();
1097			if ((malloc_debug_flags & (SCALABLE_MALLOC_ABORT_ON_CORRUPTION|SCALABLE_MALLOC_ABORT_ON_ERROR))) {
1098				_SIMPLE_STRING b = _simple_salloc();
1099				if (b) {
1100					_simple_sprintf(b, "*** error for object %p: pointer being realloc'd was not allocated\n", old_ptr);
1101					CRSetCrashLogMessage(_simple_string(b));
1102				} else {
1103					CRSetCrashLogMessage("*** error: pointer being realloc'd was not allocated\n");
1104				}
1105				abort();
1106			}
1107		} else {
1108			retval = malloc_zone_realloc(zone, old_ptr, new_size);
1109		}
1110	}
1111	if (retval == NULL) {
1112		errno = ENOMEM;
1113	} else if (new_size == 0) {
1114		free(in_ptr);
1115	}
1116	return retval;
1117}
1118
1119void *
1120valloc(size_t size) {
1121	void	*retval;
1122	malloc_zone_t	*zone = inline_malloc_default_zone();
1123	retval = malloc_zone_valloc(zone, size);
1124	if (retval == NULL) {
1125		errno = ENOMEM;
1126	}
1127	return retval;
1128}
1129
1130extern void
1131vfree(void *ptr) {
1132	free(ptr);
1133}
1134
1135size_t
1136malloc_size(const void *ptr) {
1137	size_t	size = 0;
1138
1139	if (!ptr)
1140		return size;
1141
1142	(void)find_registered_zone(ptr, &size);
1143	return size;
1144}
1145
1146size_t
1147malloc_good_size (size_t size) {
1148	malloc_zone_t	*zone = inline_malloc_default_zone();
1149	return zone->introspect->good_size(zone, size);
1150}
1151
1152/*
1153 * The posix_memalign() function shall allocate size bytes aligned on a boundary specified by alignment,
1154 * and shall return a pointer to the allocated memory in memptr.
1155 * The value of alignment shall be a multiple of sizeof( void *), that is also a power of two.
1156 * Upon successful completion, the value pointed to by memptr shall be a multiple of alignment.
1157 *
1158 * Upon successful completion, posix_memalign() shall return zero; otherwise,
1159 * an error number shall be returned to indicate the error.
1160 *
1161 * The posix_memalign() function shall fail if:
1162 * EINVAL
1163 *	The value of the alignment parameter is not a power of two multiple of sizeof( void *).
1164 * ENOMEM
1165 *	There is insufficient memory available with the requested alignment.
1166 */
1167
1168int
1169posix_memalign(void **memptr, size_t alignment, size_t size)
1170{
1171	void	*retval;
1172
1173	/* POSIX is silent on NULL == memptr !?! */
1174
1175	retval = malloc_zone_memalign(inline_malloc_default_zone(), alignment, size);
1176	if (retval == NULL) {
1177		// To avoid testing the alignment constraints redundantly, we'll rely on the
1178		// test made in malloc_zone_memalign to vet each request. Only if that test fails
1179		// and returns NULL, do we arrive here to detect the bogus alignment and give the
1180		// required EINVAL return.
1181		if (alignment < sizeof( void *) ||		// excludes 0 == alignment
1182			0 != (alignment & (alignment - 1))) {	// relies on sizeof(void *) being a power of two.
1183			return EINVAL;
1184		}
1185		return ENOMEM;
1186	} else {
1187		*memptr = retval; // Set iff allocation succeeded
1188		return 0;
1189	}
1190}
1191
1192static malloc_zone_t *
1193find_registered_purgeable_zone(void *ptr) {
1194	if (!ptr)
1195		return NULL;
1196
1197	/*
1198	 * Look for a zone which contains ptr.  If that zone does not have the purgeable malloc flag
1199	 * set, or the allocation is too small, do nothing.  Otherwise, set the allocation volatile.
1200	 * FIXME: for performance reasons, we should probably keep a separate list of purgeable zones
1201	 * and only search those.
1202	 */
1203	size_t size = 0;
1204	malloc_zone_t *zone = find_registered_zone(ptr, &size);
1205
1206	/* FIXME: would really like a zone->introspect->flags->purgeable check, but haven't determined
1207	 * binary compatibility impact of changing the introspect struct yet. */
1208	if (!zone)
1209		return NULL;
1210
1211	/* Check to make sure pointer is page aligned and size is multiple of page size */
1212	if ((size < vm_page_size) || ((size % vm_page_size) != 0))
1213		return NULL;
1214
1215	return zone;
1216}
1217
1218void
1219malloc_make_purgeable(void *ptr) {
1220	malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1221	if (!zone)
1222		return;
1223
1224	int state = VM_PURGABLE_VOLATILE;
1225	vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1226	return;
1227}
1228
1229/* Returns true if ptr is valid.  Ignore the return value from vm_purgeable_control and only report
1230 * state. */
1231int
1232malloc_make_nonpurgeable(void *ptr) {
1233	malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
1234	if (!zone)
1235		return 0;
1236
1237	int state = VM_PURGABLE_NONVOLATILE;
1238	vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
1239
1240	if (state == VM_PURGABLE_EMPTY)
1241		return EFAULT;
1242
1243	return 0;
1244}
1245
1246size_t malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal)
1247{
1248	if (!zone) {
1249		unsigned index = 0;
1250		size_t total = 0;
1251
1252		// Take lock to defend against malloc_destroy_zone()
1253		MALLOC_LOCK();
1254		while (index < malloc_num_zones) {
1255			zone = malloc_zones[index++];
1256			if (zone->version < 8)
1257				continue;
1258			if (NULL == zone->pressure_relief)
1259				continue;
1260			if (0 == goal) /* Greedy */
1261				total += zone->pressure_relief(zone, 0);
1262			else if (goal > total)
1263				total += zone->pressure_relief(zone, goal - total);
1264			else /* total >= goal */
1265				break;
1266		}
1267		MALLOC_UNLOCK();
1268		return total;
1269	} else {
1270		// Assumes zone is not destroyed for the duration of this call
1271		if (zone->version < 8)
1272			return 0;
1273		if (NULL == zone->pressure_relief)
1274			return 0;
1275		return zone->pressure_relief(zone, goal);
1276	}
1277}
1278
1279/*********	Batch methods	************/
1280
1281unsigned
1282malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested) {
1283	unsigned	(*batch_malloc)(malloc_zone_t *, size_t, void **, unsigned) = zone-> batch_malloc;
1284	if (! batch_malloc)
1285		return 0;
1286	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1287		internal_check();
1288	}
1289	unsigned	batched = batch_malloc(zone, size, results, num_requested);
1290	if (malloc_logger) {
1291		unsigned	index = 0;
1292		while (index < batched) {
1293			malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)results[index], 0);
1294			index++;
1295		}
1296	}
1297	return batched;
1298}
1299
1300void
1301malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num) {
1302	if (malloc_check_start && (malloc_check_counter++ >= malloc_check_start)) {
1303		internal_check();
1304	}
1305	if (malloc_logger) {
1306		unsigned	index = 0;
1307		while (index < num) {
1308			malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0);
1309			index++;
1310		}
1311	}
1312	void	(*batch_free)(malloc_zone_t *, void **, unsigned) = zone-> batch_free;
1313	if (batch_free) {
1314		batch_free(zone, to_be_freed, num);
1315	} else {
1316		void 	(*free_fun)(malloc_zone_t *, void *) = zone->free;
1317		while (num--) {
1318			void	*ptr = *to_be_freed++;
1319			free_fun(zone, ptr);
1320		}
1321	}
1322}
1323
1324/*********	Functions for performance tools	************/
1325
1326static kern_return_t
1327_malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr) {
1328	*ptr = (void *)address;
1329	return 0;
1330}
1331
1332kern_return_t
1333malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count) {
1334	// Note that the 2 following addresses are not correct if the address of the target is different from your own.  This notably occurs if the address of System.framework is slid (e.g. different than at B & I )
1335	vm_address_t	remote_malloc_zones = (vm_address_t)&malloc_zones;
1336	vm_address_t	remote_malloc_num_zones = (vm_address_t)&malloc_num_zones;
1337	kern_return_t	err;
1338	vm_address_t	zones_address;
1339	vm_address_t	*zones_address_ref;
1340	unsigned		num_zones;
1341	unsigned		*num_zones_ref;
1342	if (!reader) reader = _malloc_default_reader;
1343	// printf("Read malloc_zones at address %p should be %p\n", &malloc_zones, malloc_zones);
1344	err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref);
1345	// printf("Read malloc_zones[%p]=%p\n", remote_malloc_zones, *zones_address_ref);
1346	if (err) {
1347		malloc_printf("*** malloc_get_all_zones: error reading zones_address at %p\n", (unsigned)remote_malloc_zones);
1348		return err;
1349	}
1350	zones_address = *zones_address_ref;
1351	// printf("Reading num_zones at address %p\n", remote_malloc_num_zones);
1352	err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref);
1353	if (err) {
1354		malloc_printf("*** malloc_get_all_zones: error reading num_zones at %p\n", (unsigned)remote_malloc_num_zones);
1355		return err;
1356	}
1357	num_zones = *num_zones_ref;
1358	// printf("Read malloc_num_zones[%p]=%d\n", remote_malloc_num_zones, num_zones);
1359	*count = num_zones;
1360	// printf("malloc_get_all_zones succesfully found %d zones\n", num_zones);
1361	err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses);
1362	if (err) {
1363		malloc_printf("*** malloc_get_all_zones: error reading zones at %p\n", &zones_address);
1364		return err;
1365	}
1366	// printf("malloc_get_all_zones succesfully read %d zones\n", num_zones);
1367	return err;
1368}
1369
1370/*********	Debug helpers	************/
1371
1372void
1373malloc_zone_print_ptr_info(void *ptr) {
1374	malloc_zone_t	*zone;
1375	if (!ptr) return;
1376	zone = malloc_zone_from_ptr(ptr);
1377	if (zone) {
1378		printf("ptr %p in registered zone %p\n", ptr, zone);
1379	} else {
1380		printf("ptr %p not in heap\n", ptr);
1381	}
1382}
1383
1384boolean_t
1385malloc_zone_check(malloc_zone_t *zone) {
1386	boolean_t	ok = 1;
1387	if (!zone) {
1388		unsigned	index = 0;
1389		while (index < malloc_num_zones) {
1390			zone = malloc_zones[index++];
1391			if (!zone->introspect->check(zone)) ok = 0;
1392		}
1393	} else {
1394		ok = zone->introspect->check(zone);
1395	}
1396	return ok;
1397}
1398
1399void
1400malloc_zone_print(malloc_zone_t *zone, boolean_t verbose) {
1401	if (!zone) {
1402		unsigned	index = 0;
1403		while (index < malloc_num_zones) {
1404			zone = malloc_zones[index++];
1405			zone->introspect->print(zone, verbose);
1406		}
1407	} else {
1408		zone->introspect->print(zone, verbose);
1409	}
1410}
1411
1412void
1413malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
1414	if (!zone) {
1415		memset(stats, 0, sizeof(*stats));
1416		unsigned	index = 0;
1417		while (index < malloc_num_zones) {
1418			zone = malloc_zones[index++];
1419			malloc_statistics_t	this_stats;
1420			zone->introspect->statistics(zone, &this_stats);
1421			stats->blocks_in_use += this_stats.blocks_in_use;
1422			stats->size_in_use += this_stats.size_in_use;
1423			stats->max_size_in_use += this_stats.max_size_in_use;
1424			stats->size_allocated += this_stats.size_allocated;
1425		}
1426	} else {
1427		zone->introspect->statistics(zone, stats);
1428	}
1429}
1430
1431void
1432malloc_zone_log(malloc_zone_t *zone, void *address) {
1433	if (!zone) {
1434		unsigned	index = 0;
1435		while (index < malloc_num_zones) {
1436			zone = malloc_zones[index++];
1437			zone->introspect->log(zone, address);
1438		}
1439	} else {
1440		zone->introspect->log(zone, address);
1441	}
1442}
1443
1444/*********	Misc other entry points	************/
1445
1446static void
1447DefaultMallocError(int x) {
1448#if USE_SLEEP_RATHER_THAN_ABORT
1449	malloc_printf("*** error %d\n", x);
1450	sleep(3600);
1451#else
1452	_SIMPLE_STRING b = _simple_salloc();
1453	if (b) {
1454		_simple_sprintf(b, "*** error %d", x);
1455		malloc_printf("%s\n", _simple_string(b));
1456		CRSetCrashLogMessage(_simple_string(b));
1457	} else {
1458		_malloc_printf(MALLOC_PRINTF_NOLOG, "*** error %d", x);
1459		CRSetCrashLogMessage("*** DefaultMallocError called");
1460	}
1461	abort();
1462#endif
1463}
1464
1465void (*
1466	  malloc_error(void (*func)(int)))(int) {
1467	return DefaultMallocError;
1468}
1469
1470/* Stack logging fork-handling prototypes */
1471extern void __stack_logging_fork_prepare();
1472extern void __stack_logging_fork_parent();
1473extern void __stack_logging_fork_child();
1474extern void __stack_logging_early_finished();
1475
1476static void
1477_malloc_lock_all(void (*callout)(void)) {
1478	unsigned index = 0;
1479	MALLOC_LOCK();
1480	while (index < malloc_num_zones) {
1481		malloc_zone_t *zone = malloc_zones[index++];
1482		zone->introspect->force_lock(zone);
1483	}
1484	callout();
1485}
1486
1487static void
1488_malloc_unlock_all(void (*callout)(void)) {
1489	unsigned index = 0;
1490	callout();
1491	while (index < malloc_num_zones) {
1492		malloc_zone_t *zone = malloc_zones[index++];
1493		zone->introspect->force_unlock(zone);
1494	}
1495	MALLOC_UNLOCK();
1496}
1497
1498// Called prior to fork() to guarantee that malloc is not in any critical
1499// sections during the fork(); prevent any locks from being held by non-
1500// surviving threads after the fork.
1501void
1502_malloc_fork_prepare(void) {
1503	return _malloc_lock_all(&__stack_logging_fork_prepare);
1504}
1505
1506// Called in the parent process after fork() to resume normal operation.
1507void
1508_malloc_fork_parent(void) {
1509	return _malloc_unlock_all(&__stack_logging_fork_parent);
1510}
1511
1512// Called in the child process after fork() to resume normal operation.
1513void
1514_malloc_fork_child(void) {
1515#if CONFIG_NANOZONE
1516	if (_malloc_is_initialized && _malloc_engaged_nano)
1517		nano_forked_zone(inline_malloc_default_zone());
1518#endif
1519	return _malloc_unlock_all(&__stack_logging_fork_child);
1520}
1521
1522/*
1523 * A Glibc-like mstats() interface.
1524 *
1525 * Note that this interface really isn't very good, as it doesn't understand
1526 * that we may have multiple allocators running at once.  We just massage
1527 * the result from malloc_zone_statistics in any case.
1528 */
1529struct mstats
1530mstats(void)
1531{
1532	malloc_statistics_t s;
1533	struct mstats m;
1534
1535	malloc_zone_statistics(NULL, &s);
1536	m.bytes_total = s.size_allocated;
1537	m.chunks_used = s.blocks_in_use;
1538	m.bytes_used = s.size_in_use;
1539	m.chunks_free = 0;
1540	m.bytes_free = m.bytes_total - m.bytes_used;	/* isn't this somewhat obvious? */
1541
1542	return(m);
1543}
1544
1545boolean_t
1546malloc_zone_enable_discharge_checking(malloc_zone_t *zone)
1547{
1548	if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1549		return FALSE;
1550	if (NULL == zone->introspect->enable_discharge_checking)
1551		return FALSE;
1552	return zone->introspect->enable_discharge_checking(zone);
1553}
1554
1555void
1556malloc_zone_disable_discharge_checking(malloc_zone_t *zone)
1557{
1558	if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1559		return;
1560	if (NULL == zone->introspect->disable_discharge_checking)
1561		return;
1562	zone->introspect->disable_discharge_checking(zone);
1563}
1564
1565void
1566malloc_zone_discharge(malloc_zone_t *zone, void *memory)
1567{
1568	if (NULL == zone)
1569		zone = malloc_zone_from_ptr(memory);
1570	if (NULL == zone)
1571		return;
1572	if (zone->version < 7) // Version must be >= 7 to look at the new discharge checking fields.
1573		return;
1574	if (NULL == zone->introspect->discharge)
1575		return;
1576	zone->introspect->discharge(zone, memory);
1577}
1578
1579void
1580malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info))
1581{
1582	if (!zone) {
1583		unsigned	index = 0;
1584		while (index < malloc_num_zones) {
1585			zone = malloc_zones[index++];
1586			if (zone->version < 7)
1587				continue;
1588			if (NULL == zone->introspect->enumerate_discharged_pointers)
1589				continue;
1590			zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1591		}
1592	} else {
1593		if (zone->version < 7)
1594			return;
1595		if (NULL == zone->introspect->enumerate_discharged_pointers)
1596			return;
1597		zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
1598	}
1599}
1600
1601/*****************	OBSOLETE ENTRY POINTS	********************/
1602
1603#if PHASE_OUT_OLD_MALLOC
1604#error PHASE OUT THE FOLLOWING FUNCTIONS
1605#endif
1606
1607void
1608set_malloc_singlethreaded(boolean_t single) {
1609	static boolean_t warned = 0;
1610	if (!warned) {
1611#if PHASE_OUT_OLD_MALLOC
1612		malloc_printf("*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single);
1613#endif
1614		warned = 1;
1615	}
1616}
1617
1618void
1619malloc_singlethreaded(void) {
1620	static boolean_t warned = 0;
1621	if (!warned) {
1622		malloc_printf("*** OBSOLETE: malloc_singlethreaded()\n");
1623		warned = 1;
1624	}
1625}
1626
1627int
1628malloc_debug(int level) {
1629	malloc_printf("*** OBSOLETE: malloc_debug()\n");
1630	return 0;
1631}
1632
1633/* vim: set noet:ts=4:sw=4:cindent: */
1634