1#include "jemalloc/internal/jemalloc_internal.h"
2#ifndef JEMALLOC_ZONE
3#  error "This source file is for zones on Darwin (OS X)."
4#endif
5
6/* Definitions of the following structs in malloc/malloc.h might be too old
7 * for the built binary to run on newer versions of OSX. So use the newest
8 * possible version of those structs.
9 */
10typedef struct _malloc_zone_t {
11	void *reserved1;
12	void *reserved2;
13	size_t (*size)(struct _malloc_zone_t *, const void *);
14	void *(*malloc)(struct _malloc_zone_t *, size_t);
15	void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
16	void *(*valloc)(struct _malloc_zone_t *, size_t);
17	void (*free)(struct _malloc_zone_t *, void *);
18	void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
19	void (*destroy)(struct _malloc_zone_t *);
20	const char *zone_name;
21	unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
22	void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
23	struct malloc_introspection_t *introspect;
24	unsigned version;
25	void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
26	void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
27	size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
28} malloc_zone_t;
29
30typedef struct {
31	vm_address_t address;
32	vm_size_t size;
33} vm_range_t;
34
35typedef struct malloc_statistics_t {
36	unsigned blocks_in_use;
37	size_t size_in_use;
38	size_t max_size_in_use;
39	size_t size_allocated;
40} malloc_statistics_t;
41
42typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
43
44typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
45
46typedef struct malloc_introspection_t {
47	kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
48	size_t (*good_size)(malloc_zone_t *, size_t);
49	boolean_t (*check)(malloc_zone_t *);
50	void (*print)(malloc_zone_t *, boolean_t);
51	void (*log)(malloc_zone_t *, void *);
52	void (*force_lock)(malloc_zone_t *);
53	void (*force_unlock)(malloc_zone_t *);
54	void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
55	boolean_t (*zone_locked)(malloc_zone_t *);
56	boolean_t (*enable_discharge_checking)(malloc_zone_t *);
57	boolean_t (*disable_discharge_checking)(malloc_zone_t *);
58	void (*discharge)(malloc_zone_t *, void *);
59#ifdef __BLOCKS__
60	void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
61#else
62	void *enumerate_unavailable_without_blocks;
63#endif
64	void (*reinit_lock)(malloc_zone_t *);
65} malloc_introspection_t;
66
67extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
68
69extern malloc_zone_t *malloc_default_zone(void);
70
71extern void malloc_zone_register(malloc_zone_t *zone);
72
73extern void malloc_zone_unregister(malloc_zone_t *zone);
74
75/*
76 * The malloc_default_purgeable_zone() function is only available on >= 10.6.
77 * We need to check whether it is present at runtime, thus the weak_import.
78 */
79extern malloc_zone_t *malloc_default_purgeable_zone(void)
80JEMALLOC_ATTR(weak_import);
81
82/******************************************************************************/
83/* Data. */
84
85static malloc_zone_t *default_zone, *purgeable_zone;
86static malloc_zone_t jemalloc_zone;
87static struct malloc_introspection_t jemalloc_zone_introspect;
88
89/******************************************************************************/
90/* Function prototypes for non-inline static functions. */
91
92static size_t	zone_size(malloc_zone_t *zone, const void *ptr);
93static void	*zone_malloc(malloc_zone_t *zone, size_t size);
94static void	*zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
95static void	*zone_valloc(malloc_zone_t *zone, size_t size);
96static void	zone_free(malloc_zone_t *zone, void *ptr);
97static void	*zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
98static void	*zone_memalign(malloc_zone_t *zone, size_t alignment,
99    size_t size);
100static void	zone_free_definite_size(malloc_zone_t *zone, void *ptr,
101    size_t size);
102static void	zone_destroy(malloc_zone_t *zone);
103static unsigned	zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
104    void **results, unsigned num_requested);
105static void	zone_batch_free(struct _malloc_zone_t *zone,
106    void **to_be_freed, unsigned num_to_be_freed);
107static size_t	zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
108static size_t	zone_good_size(malloc_zone_t *zone, size_t size);
109static kern_return_t	zone_enumerator(task_t task, void *data, unsigned type_mask,
110    vm_address_t zone_address, memory_reader_t reader,
111    vm_range_recorder_t recorder);
112static boolean_t	zone_check(malloc_zone_t *zone);
113static void	zone_print(malloc_zone_t *zone, boolean_t verbose);
114static void	zone_log(malloc_zone_t *zone, void *address);
115static void	zone_force_lock(malloc_zone_t *zone);
116static void	zone_force_unlock(malloc_zone_t *zone);
117static void	zone_statistics(malloc_zone_t *zone,
118    malloc_statistics_t *stats);
119static boolean_t	zone_locked(malloc_zone_t *zone);
120static void	zone_reinit_lock(malloc_zone_t *zone);
121
122/******************************************************************************/
123/*
124 * Functions.
125 */
126
127static size_t
128zone_size(malloc_zone_t *zone, const void *ptr)
129{
130	/*
131	 * There appear to be places within Darwin (such as setenv(3)) that
132	 * cause calls to this function with pointers that *no* zone owns.  If
133	 * we knew that all pointers were owned by *some* zone, we could split
134	 * our zone into two parts, and use one as the default allocator and
135	 * the other as the default deallocator/reallocator.  Since that will
136	 * not work in practice, we must check all pointers to assure that they
137	 * reside within a mapped extent before determining size.
138	 */
139	return (ivsalloc(tsdn_fetch(), ptr));
140}
141
142static void *
143zone_malloc(malloc_zone_t *zone, size_t size)
144{
145	return (je_malloc(size));
146}
147
148static void *
149zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
150{
151	return (je_calloc(num, size));
152}
153
154static void *
155zone_valloc(malloc_zone_t *zone, size_t size)
156{
157	void *ret = NULL; /* Assignment avoids useless compiler warning. */
158
159	je_posix_memalign(&ret, PAGE, size);
160
161	return (ret);
162}
163
164static void
165zone_free(malloc_zone_t *zone, void *ptr)
166{
167	if (ivsalloc(tsdn_fetch(), ptr) != 0) {
168		je_free(ptr);
169		return;
170	}
171
172	free(ptr);
173}
174
175static void *
176zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
177{
178	if (ivsalloc(tsdn_fetch(), ptr) != 0)
179		return (je_realloc(ptr, size));
180
181	return (realloc(ptr, size));
182}
183
184static void *
185zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
186{
187	void *ret = NULL; /* Assignment avoids useless compiler warning. */
188
189	je_posix_memalign(&ret, alignment, size);
190
191	return (ret);
192}
193
194static void
195zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
196{
197	size_t alloc_size;
198
199	alloc_size = ivsalloc(tsdn_fetch(), ptr);
200	if (alloc_size != 0) {
201		assert(alloc_size == size);
202		je_free(ptr);
203		return;
204	}
205
206	free(ptr);
207}
208
209static void
210zone_destroy(malloc_zone_t *zone)
211{
212	/* This function should never be called. */
213	not_reached();
214}
215
216static unsigned
217zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
218    unsigned num_requested)
219{
220	unsigned i;
221
222	for (i = 0; i < num_requested; i++) {
223		results[i] = je_malloc(size);
224		if (!results[i])
225			break;
226	}
227
228	return i;
229}
230
231static void
232zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
233    unsigned num_to_be_freed)
234{
235	unsigned i;
236
237	for (i = 0; i < num_to_be_freed; i++) {
238		zone_free(zone, to_be_freed[i]);
239		to_be_freed[i] = NULL;
240	}
241}
242
243static size_t
244zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal)
245{
246	return 0;
247}
248
249static size_t
250zone_good_size(malloc_zone_t *zone, size_t size)
251{
252	if (size == 0)
253		size = 1;
254	return (s2u(size));
255}
256
257static kern_return_t
258zone_enumerator(task_t task, void *data, unsigned type_mask,
259    vm_address_t zone_address, memory_reader_t reader,
260    vm_range_recorder_t recorder)
261{
262	return KERN_SUCCESS;
263}
264
265static boolean_t
266zone_check(malloc_zone_t *zone)
267{
268	return true;
269}
270
271static void
272zone_print(malloc_zone_t *zone, boolean_t verbose)
273{
274}
275
276static void
277zone_log(malloc_zone_t *zone, void *address)
278{
279}
280
281static void
282zone_force_lock(malloc_zone_t *zone)
283{
284	if (isthreaded)
285		jemalloc_prefork();
286}
287
288static void
289zone_force_unlock(malloc_zone_t *zone)
290{
291	/*
292	 * Call jemalloc_postfork_child() rather than
293	 * jemalloc_postfork_parent(), because this function is executed by both
294	 * parent and child.  The parent can tolerate having state
295	 * reinitialized, but the child cannot unlock mutexes that were locked
296	 * by the parent.
297	 */
298	if (isthreaded)
299		jemalloc_postfork_child();
300}
301
302static void
303zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
304{
305	/* We make no effort to actually fill the values */
306	stats->blocks_in_use = 0;
307	stats->size_in_use = 0;
308	stats->max_size_in_use = 0;
309	stats->size_allocated = 0;
310}
311
312static boolean_t
313zone_locked(malloc_zone_t *zone)
314{
315	/* Pretend no lock is being held */
316	return false;
317}
318
319static void
320zone_reinit_lock(malloc_zone_t *zone)
321{
322	/* As of OSX 10.12, this function is only used when force_unlock would
323	 * be used if the zone version were < 9. So just use force_unlock. */
324	zone_force_unlock(zone);
325}
326
327static void
328zone_init(void)
329{
330	jemalloc_zone.size = zone_size;
331	jemalloc_zone.malloc = zone_malloc;
332	jemalloc_zone.calloc = zone_calloc;
333	jemalloc_zone.valloc = zone_valloc;
334	jemalloc_zone.free = zone_free;
335	jemalloc_zone.realloc = zone_realloc;
336	jemalloc_zone.destroy = zone_destroy;
337	jemalloc_zone.zone_name = "jemalloc_zone";
338	jemalloc_zone.batch_malloc = zone_batch_malloc;
339	jemalloc_zone.batch_free = zone_batch_free;
340	jemalloc_zone.introspect = &jemalloc_zone_introspect;
341	jemalloc_zone.version = 9;
342	jemalloc_zone.memalign = zone_memalign;
343	jemalloc_zone.free_definite_size = zone_free_definite_size;
344	jemalloc_zone.pressure_relief = zone_pressure_relief;
345
346	jemalloc_zone_introspect.enumerator = zone_enumerator;
347	jemalloc_zone_introspect.good_size = zone_good_size;
348	jemalloc_zone_introspect.check = zone_check;
349	jemalloc_zone_introspect.print = zone_print;
350	jemalloc_zone_introspect.log = zone_log;
351	jemalloc_zone_introspect.force_lock = zone_force_lock;
352	jemalloc_zone_introspect.force_unlock = zone_force_unlock;
353	jemalloc_zone_introspect.statistics = zone_statistics;
354	jemalloc_zone_introspect.zone_locked = zone_locked;
355	jemalloc_zone_introspect.enable_discharge_checking = NULL;
356	jemalloc_zone_introspect.disable_discharge_checking = NULL;
357	jemalloc_zone_introspect.discharge = NULL;
358#ifdef __BLOCKS__
359	jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
360#else
361	jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
362#endif
363	jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
364}
365
366static malloc_zone_t *
367zone_default_get(void)
368{
369	malloc_zone_t **zones = NULL;
370	unsigned int num_zones = 0;
371
372	/*
373	 * On OSX 10.12, malloc_default_zone returns a special zone that is not
374	 * present in the list of registered zones. That zone uses a "lite zone"
375	 * if one is present (apparently enabled when malloc stack logging is
376	 * enabled), or the first registered zone otherwise. In practice this
377	 * means unless malloc stack logging is enabled, the first registered
378	 * zone is the default.  So get the list of zones to get the first one,
379	 * instead of relying on malloc_default_zone.
380	 */
381	if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
382	    (vm_address_t**)&zones, &num_zones)) {
383		/*
384		 * Reset the value in case the failure happened after it was
385		 * set.
386		 */
387		num_zones = 0;
388	}
389
390	if (num_zones)
391		return (zones[0]);
392
393	return (malloc_default_zone());
394}
395
396/* As written, this function can only promote jemalloc_zone. */
397static void
398zone_promote(void)
399{
400	malloc_zone_t *zone;
401
402	do {
403		/*
404		 * Unregister and reregister the default zone.  On OSX >= 10.6,
405		 * unregistering takes the last registered zone and places it
406		 * at the location of the specified zone.  Unregistering the
407		 * default zone thus makes the last registered one the default.
408		 * On OSX < 10.6, unregistering shifts all registered zones.
409		 * The first registered zone then becomes the default.
410		 */
411		malloc_zone_unregister(default_zone);
412		malloc_zone_register(default_zone);
413
414		/*
415		 * On OSX 10.6, having the default purgeable zone appear before
416		 * the default zone makes some things crash because it thinks it
417		 * owns the default zone allocated pointers.  We thus
418		 * unregister/re-register it in order to ensure it's always
419		 * after the default zone.  On OSX < 10.6, there is no purgeable
420		 * zone, so this does nothing.  On OSX >= 10.6, unregistering
421		 * replaces the purgeable zone with the last registered zone
422		 * above, i.e. the default zone.  Registering it again then puts
423		 * it at the end, obviously after the default zone.
424		 */
425		if (purgeable_zone != NULL) {
426			malloc_zone_unregister(purgeable_zone);
427			malloc_zone_register(purgeable_zone);
428		}
429
430		zone = zone_default_get();
431	} while (zone != &jemalloc_zone);
432}
433
434JEMALLOC_ATTR(constructor)
435void
436zone_register(void)
437{
438	/*
439	 * If something else replaced the system default zone allocator, don't
440	 * register jemalloc's.
441	 */
442	default_zone = zone_default_get();
443	if (!default_zone->zone_name || strcmp(default_zone->zone_name,
444	    "DefaultMallocZone") != 0)
445		return;
446
447	/*
448	 * The default purgeable zone is created lazily by OSX's libc.  It uses
449	 * the default zone when it is created for "small" allocations
450	 * (< 15 KiB), but assumes the default zone is a scalable_zone.  This
451	 * obviously fails when the default zone is the jemalloc zone, so
452	 * malloc_default_purgeable_zone() is called beforehand so that the
453	 * default purgeable zone is created when the default zone is still
454	 * a scalable_zone.  As purgeable zones only exist on >= 10.6, we need
455	 * to check for the existence of malloc_default_purgeable_zone() at
456	 * run time.
457	 */
458	purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
459	    malloc_default_purgeable_zone();
460
461	/* Register the custom zone.  At this point it won't be the default. */
462	zone_init();
463	malloc_zone_register(&jemalloc_zone);
464
465	/* Promote the custom zone to be default. */
466	zone_promote();
467}
468