malloc.c revision 1.233
1/*	$OpenBSD: malloc.c,v 1.233 2017/10/05 04:41:43 otto Exp $	*/
2/*
3 * Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net>
4 * Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@openbsd.org>
6 * Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * If we meet some day, and you think this stuff is worth it, you
23 * can buy me a beer in return. Poul-Henning Kamp
24 */
25
26/* #define MALLOC_STATS */
27
28#include <sys/types.h>
29#include <sys/param.h>	/* PAGE_SHIFT ALIGN */
30#include <sys/queue.h>
31#include <sys/mman.h>
32#include <sys/uio.h>
33#include <errno.h>
34#include <stdarg.h>
35#include <stdint.h>
36#include <stdlib.h>
37#include <string.h>
38#include <stdio.h>
39#include <unistd.h>
40
41#ifdef MALLOC_STATS
42#include <sys/tree.h>
43#include <fcntl.h>
44#endif
45
46#include "thread_private.h"
47#include <tib.h>
48
49#if defined(__mips64__)
50#define MALLOC_PAGESHIFT	(14U)
51#else
52#define MALLOC_PAGESHIFT	(PAGE_SHIFT)
53#endif
54
55#define MALLOC_MINSHIFT		4
56#define MALLOC_MAXSHIFT		(MALLOC_PAGESHIFT - 1)
57#define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
58#define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
59#define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
60#define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
61
62#define MALLOC_MAXCHUNK		(1 << MALLOC_MAXSHIFT)
63#define MALLOC_MAXCACHE		256
64#define MALLOC_DELAYED_CHUNK_MASK	15
65#define MALLOC_INITIAL_REGIONS	512
66#define MALLOC_DEFAULT_CACHE	64
67#define MALLOC_CHUNK_LISTS	4
68#define CHUNK_CHECK_LENGTH	32
69
70/*
71 * We move allocations between half a page and a whole page towards the end,
72 * subject to alignment constraints. This is the extra headroom we allow.
73 * Set to zero to be the most strict.
74 */
75#define MALLOC_LEEWAY		0
76#define MALLOC_MOVE_COND(sz)	((sz) - mopts.malloc_guard < 		\
77				    MALLOC_PAGESIZE - MALLOC_LEEWAY)
78#define MALLOC_MOVE(p, sz)  	(((char *)(p)) +			\
79				    ((MALLOC_PAGESIZE - MALLOC_LEEWAY -	\
80			    	    ((sz) - mopts.malloc_guard)) & 	\
81				    ~(MALLOC_MINSIZE - 1)))
82
83#define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
84
85/*
86 * What to use for Junk.  This is the byte value we use to fill with
87 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
88 * and SOME_FREEJUNK right before free.
89 */
90#define SOME_JUNK		0xdb	/* deadbeef */
91#define SOME_FREEJUNK		0xdf	/* dead, free */
92
93#define MMAP(sz)	mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
94    MAP_ANON | MAP_PRIVATE, -1, 0)
95
96#define MMAPA(a,sz)	mmap((a), (sz), PROT_READ | PROT_WRITE, \
97    MAP_ANON | MAP_PRIVATE, -1, 0)
98
99#define MQUERY(a, sz)	mquery((a), (sz), PROT_READ | PROT_WRITE, \
100    MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0)
101
102struct region_info {
103	void *p;		/* page; low bits used to mark chunks */
104	uintptr_t size;		/* size for pages, or chunk_info pointer */
105#ifdef MALLOC_STATS
106	void *f;		/* where allocated from */
107#endif
108};
109
110LIST_HEAD(chunk_head, chunk_info);
111
112struct dir_info {
113	u_int32_t canary1;
114	int active;			/* status of malloc */
115	struct region_info *r;		/* region slots */
116	size_t regions_total;		/* number of region slots */
117	size_t regions_free;		/* number of free slots */
118					/* lists of free chunk info structs */
119	struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1];
120					/* lists of chunks with free slots */
121	struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS];
122	size_t free_regions_size;	/* free pages cached */
123					/* free pages cache */
124	struct region_info free_regions[MALLOC_MAXCACHE];
125					/* delayed free chunk slots */
126	void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
127	size_t rbytesused;		/* random bytes used */
128	char *func;			/* current function */
129	int mutex;
130	u_char rbytes[32];		/* random bytes */
131	u_short chunk_start;
132#ifdef MALLOC_STATS
133	size_t inserts;
134	size_t insert_collisions;
135	size_t finds;
136	size_t find_collisions;
137	size_t deletes;
138	size_t delete_moves;
139	size_t cheap_realloc_tries;
140	size_t cheap_reallocs;
141	size_t malloc_used;		/* bytes allocated */
142	size_t malloc_guarded;		/* bytes used for guards */
143#define STATS_ADD(x,y)	((x) += (y))
144#define STATS_SUB(x,y)	((x) -= (y))
145#define STATS_INC(x)	((x)++)
146#define STATS_ZERO(x)	((x) = 0)
147#define STATS_SETF(x,y)	((x)->f = (y))
148#else
149#define STATS_ADD(x,y)	/* nothing */
150#define STATS_SUB(x,y)	/* nothing */
151#define STATS_INC(x)	/* nothing */
152#define STATS_ZERO(x)	/* nothing */
153#define STATS_SETF(x,y)	/* nothing */
154#endif /* MALLOC_STATS */
155	u_int32_t canary2;
156};
157#define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
158			~MALLOC_PAGEMASK)
159
160/*
161 * This structure describes a page worth of chunks.
162 *
163 * How many bits per u_short in the bitmap
164 */
165#define MALLOC_BITS		(NBBY * sizeof(u_short))
166struct chunk_info {
167	LIST_ENTRY(chunk_info) entries;
168	void *page;			/* pointer to the page */
169	u_int32_t canary;
170	u_short size;			/* size of this page's chunks */
171	u_short shift;			/* how far to shift for this size */
172	u_short free;			/* how many free chunks */
173	u_short total;			/* how many chunks */
174	u_short offset;			/* requested size table offset */
175					/* which chunks are free */
176	u_short bits[1];
177};
178
179struct malloc_readonly {
180	struct dir_info *malloc_pool[_MALLOC_MUTEXES];	/* Main bookkeeping information */
181	int	malloc_mt;		/* multi-threaded mode? */
182	int	malloc_freecheck;	/* Extensive double free check */
183	int	malloc_freeunmap;	/* mprotect free pages PROT_NONE? */
184	int	malloc_junk;		/* junk fill? */
185	int	malloc_realloc;		/* always realloc? */
186	int	malloc_xmalloc;		/* xmalloc behaviour? */
187	int	chunk_canaries;		/* use canaries after chunks? */
188	int	internal_funcs;		/* use better recallocarray/freezero? */
189	u_int	malloc_cache;		/* free pages we cache */
190	size_t	malloc_guard;		/* use guard pages after allocations? */
191#ifdef MALLOC_STATS
192	int	malloc_stats;		/* dump statistics at end */
193#endif
194	u_int32_t malloc_canary;	/* Matched against ones in malloc_pool */
195};
196
197/* This object is mapped PROT_READ after initialisation to prevent tampering */
198static union {
199	struct malloc_readonly mopts;
200	u_char _pad[MALLOC_PAGESIZE];
201} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
202#define mopts	malloc_readonly.mopts
203
204char		*malloc_options;	/* compile-time options */
205
206static u_char getrbyte(struct dir_info *d);
207static __dead void wrterror(struct dir_info *d, char *msg, ...)
208    __attribute__((__format__ (printf, 2, 3)));
209static void fill_canary(char *ptr, size_t sz, size_t allocated);
210
211#ifdef MALLOC_STATS
212void malloc_dump(int, int, struct dir_info *);
213PROTO_NORMAL(malloc_dump);
214void malloc_gdump(int);
215PROTO_NORMAL(malloc_gdump);
216static void malloc_exit(void);
217#define CALLER	__builtin_return_address(0)
218#else
219#define CALLER	NULL
220#endif
221
222/* low bits of r->p determine size: 0 means >= page size and r->size holding
223 * real size, otherwise low bits are a shift count, or 1 for malloc(0)
224 */
225#define REALSIZE(sz, r)						\
226	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
227	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
228
229static inline void
230_MALLOC_LEAVE(struct dir_info *d)
231{
232	if (mopts.malloc_mt) {
233		d->active--;
234		_MALLOC_UNLOCK(d->mutex);
235	}
236}
237
238static inline void
239_MALLOC_ENTER(struct dir_info *d)
240{
241	if (mopts.malloc_mt) {
242		_MALLOC_LOCK(d->mutex);
243		d->active++;
244	}
245}
246
247static inline size_t
248hash(void *p)
249{
250	size_t sum;
251	uintptr_t u;
252
253	u = (uintptr_t)p >> MALLOC_PAGESHIFT;
254	sum = u;
255	sum = (sum << 7) - sum + (u >> 16);
256#ifdef __LP64__
257	sum = (sum << 7) - sum + (u >> 32);
258	sum = (sum << 7) - sum + (u >> 48);
259#endif
260	return sum;
261}
262
263static inline
264struct dir_info *getpool(void)
265{
266	if (!mopts.malloc_mt)
267		return mopts.malloc_pool[0];
268	else
269		return mopts.malloc_pool[TIB_GET()->tib_tid &
270		    (_MALLOC_MUTEXES - 1)];
271}
272
273static __dead void
274wrterror(struct dir_info *d, char *msg, ...)
275{
276	int		saved_errno = errno;
277	va_list		ap;
278
279	dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
280	    getpid(), (d != NULL && d->func) ? d->func : "unknown");
281	va_start(ap, msg);
282	vdprintf(STDERR_FILENO, msg, ap);
283	va_end(ap);
284	dprintf(STDERR_FILENO, "\n");
285
286#ifdef MALLOC_STATS
287	if (mopts.malloc_stats)
288		malloc_gdump(STDERR_FILENO);
289#endif /* MALLOC_STATS */
290
291	errno = saved_errno;
292
293	abort();
294}
295
296static void
297rbytes_init(struct dir_info *d)
298{
299	arc4random_buf(d->rbytes, sizeof(d->rbytes));
300	/* add 1 to account for using d->rbytes[0] */
301	d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
302}
303
304static inline u_char
305getrbyte(struct dir_info *d)
306{
307	u_char x;
308
309	if (d->rbytesused >= sizeof(d->rbytes))
310		rbytes_init(d);
311	x = d->rbytes[d->rbytesused++];
312	return x;
313}
314
315/*
316 * Cache maintenance. We keep at most malloc_cache pages cached.
317 * If the cache is becoming full, unmap pages in the cache for real,
318 * and then add the region to the cache
319 * Opposed to the regular region data structure, the sizes in the
320 * cache are in MALLOC_PAGESIZE units.
321 */
322static void
323unmap(struct dir_info *d, void *p, size_t sz, int clear)
324{
325	size_t psz = sz >> MALLOC_PAGESHIFT;
326	size_t rsz, tounmap;
327	struct region_info *r;
328	u_int i, offset;
329
330	if (sz != PAGEROUND(sz))
331		wrterror(d, "munmap round");
332
333	rsz = mopts.malloc_cache - d->free_regions_size;
334
335	/*
336	 * normally the cache holds recently freed regions, but if the region
337	 * to unmap is larger than the cache size or we're clearing and the
338	 * cache is full, just munmap
339	 */
340	if (psz > mopts.malloc_cache || (clear && rsz == 0)) {
341		i = munmap(p, sz);
342		if (i)
343			wrterror(d, "munmap %p", p);
344		STATS_SUB(d->malloc_used, sz);
345		return;
346	}
347	tounmap = 0;
348	if (psz > rsz)
349		tounmap = psz - rsz;
350	offset = getrbyte(d);
351	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
352		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
353		if (r->p != NULL) {
354			rsz = r->size << MALLOC_PAGESHIFT;
355			if (munmap(r->p, rsz))
356				wrterror(d, "munmap %p", r->p);
357			r->p = NULL;
358			if (tounmap > r->size)
359				tounmap -= r->size;
360			else
361				tounmap = 0;
362			d->free_regions_size -= r->size;
363			r->size = 0;
364			STATS_SUB(d->malloc_used, rsz);
365		}
366	}
367	if (tounmap > 0)
368		wrterror(d, "malloc cache underflow");
369	for (i = 0; i < mopts.malloc_cache; i++) {
370		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
371		if (r->p == NULL) {
372			if (clear)
373				memset(p, 0, sz - mopts.malloc_guard);
374			if (mopts.malloc_junk && !mopts.malloc_freeunmap) {
375				size_t amt = mopts.malloc_junk == 1 ?
376				    MALLOC_MAXCHUNK : sz;
377				memset(p, SOME_FREEJUNK, amt);
378			}
379			if (mopts.malloc_freeunmap)
380				mprotect(p, sz, PROT_NONE);
381			r->p = p;
382			r->size = psz;
383			d->free_regions_size += psz;
384			break;
385		}
386	}
387	if (i == mopts.malloc_cache)
388		wrterror(d, "malloc free slot lost");
389	if (d->free_regions_size > mopts.malloc_cache)
390		wrterror(d, "malloc cache overflow");
391}
392
393static void
394zapcacheregion(struct dir_info *d, void *p, size_t len)
395{
396	u_int i;
397	struct region_info *r;
398	size_t rsz;
399
400	for (i = 0; i < mopts.malloc_cache; i++) {
401		r = &d->free_regions[i];
402		if (r->p >= p && r->p <= (void *)((char *)p + len)) {
403			rsz = r->size << MALLOC_PAGESHIFT;
404			if (munmap(r->p, rsz))
405				wrterror(d, "munmap %p", r->p);
406			r->p = NULL;
407			d->free_regions_size -= r->size;
408			r->size = 0;
409			STATS_SUB(d->malloc_used, rsz);
410		}
411	}
412}
413
414static void *
415map(struct dir_info *d, void *hint, size_t sz, int zero_fill)
416{
417	size_t psz = sz >> MALLOC_PAGESHIFT;
418	struct region_info *r, *big = NULL;
419	u_int i, offset;
420	void *p;
421
422	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
423	    d->canary1 != ~d->canary2)
424		wrterror(d, "internal struct corrupt");
425	if (sz != PAGEROUND(sz))
426		wrterror(d, "map round");
427
428	if (!hint && psz > d->free_regions_size) {
429		_MALLOC_LEAVE(d);
430		p = MMAP(sz);
431		_MALLOC_ENTER(d);
432		if (p != MAP_FAILED)
433			STATS_ADD(d->malloc_used, sz);
434		/* zero fill not needed */
435		return p;
436	}
437	offset = getrbyte(d);
438	for (i = 0; i < mopts.malloc_cache; i++) {
439		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
440		if (r->p != NULL) {
441			if (hint && r->p != hint)
442				continue;
443			if (r->size == psz) {
444				p = r->p;
445				r->p = NULL;
446				r->size = 0;
447				d->free_regions_size -= psz;
448				if (mopts.malloc_freeunmap)
449					mprotect(p, sz, PROT_READ | PROT_WRITE);
450				if (zero_fill)
451					memset(p, 0, sz);
452				else if (mopts.malloc_junk == 2 &&
453				    mopts.malloc_freeunmap)
454					memset(p, SOME_FREEJUNK, sz);
455				return p;
456			} else if (r->size > psz)
457				big = r;
458		}
459	}
460	if (big != NULL) {
461		r = big;
462		p = r->p;
463		r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT);
464		if (mopts.malloc_freeunmap)
465			mprotect(p, sz, PROT_READ | PROT_WRITE);
466		r->size -= psz;
467		d->free_regions_size -= psz;
468		if (zero_fill)
469			memset(p, 0, sz);
470		else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap)
471			memset(p, SOME_FREEJUNK, sz);
472		return p;
473	}
474	if (hint)
475		return MAP_FAILED;
476	if (d->free_regions_size > mopts.malloc_cache)
477		wrterror(d, "malloc cache");
478	_MALLOC_LEAVE(d);
479	p = MMAP(sz);
480	_MALLOC_ENTER(d);
481	if (p != MAP_FAILED)
482		STATS_ADD(d->malloc_used, sz);
483	/* zero fill not needed */
484	return p;
485}
486
487static void
488omalloc_parseopt(char opt)
489{
490	switch (opt) {
491	case '>':
492		mopts.malloc_cache <<= 1;
493		if (mopts.malloc_cache > MALLOC_MAXCACHE)
494			mopts.malloc_cache = MALLOC_MAXCACHE;
495		break;
496	case '<':
497		mopts.malloc_cache >>= 1;
498		break;
499	case 'c':
500		mopts.chunk_canaries = 0;
501		break;
502	case 'C':
503		mopts.chunk_canaries = 1;
504		break;
505#ifdef MALLOC_STATS
506	case 'd':
507		mopts.malloc_stats = 0;
508		break;
509	case 'D':
510		mopts.malloc_stats = 1;
511		break;
512#endif /* MALLOC_STATS */
513	case 'f':
514		mopts.malloc_freecheck = 0;
515		mopts.malloc_freeunmap = 0;
516		break;
517	case 'F':
518		mopts.malloc_freecheck = 1;
519		mopts.malloc_freeunmap = 1;
520		break;
521	case 'g':
522		mopts.malloc_guard = 0;
523		break;
524	case 'G':
525		mopts.malloc_guard = MALLOC_PAGESIZE;
526		break;
527	case 'j':
528		if (mopts.malloc_junk > 0)
529			mopts.malloc_junk--;
530		break;
531	case 'J':
532		if (mopts.malloc_junk < 2)
533			mopts.malloc_junk++;
534		break;
535	case 'r':
536		mopts.malloc_realloc = 0;
537		break;
538	case 'R':
539		mopts.malloc_realloc = 1;
540		break;
541	case 'u':
542		mopts.malloc_freeunmap = 0;
543		break;
544	case 'U':
545		mopts.malloc_freeunmap = 1;
546		break;
547	case 'x':
548		mopts.malloc_xmalloc = 0;
549		break;
550	case 'X':
551		mopts.malloc_xmalloc = 1;
552		break;
553	default: {
554		dprintf(STDERR_FILENO, "malloc() warning: "
555                    "unknown char in MALLOC_OPTIONS\n");
556		break;
557	}
558	}
559}
560
561static void
562omalloc_init(void)
563{
564	char *p, *q, b[64];
565	int i, j;
566
567	/*
568	 * Default options
569	 */
570	mopts.malloc_junk = 1;
571	mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
572
573	for (i = 0; i < 3; i++) {
574		switch (i) {
575		case 0:
576			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
577			if (j <= 0)
578				continue;
579			b[j] = '\0';
580			p = b;
581			break;
582		case 1:
583			if (issetugid() == 0)
584				p = getenv("MALLOC_OPTIONS");
585			else
586				continue;
587			break;
588		case 2:
589			p = malloc_options;
590			break;
591		default:
592			p = NULL;
593		}
594
595		for (; p != NULL && *p != '\0'; p++) {
596			switch (*p) {
597			case 'S':
598				for (q = "CFGJ"; *q != '\0'; q++)
599					omalloc_parseopt(*q);
600				mopts.malloc_cache = 0;
601				break;
602			case 's':
603				for (q = "cgj"; *q != '\0'; q++)
604					omalloc_parseopt(*q);
605				mopts.malloc_cache = MALLOC_DEFAULT_CACHE;
606				break;
607			default:
608				omalloc_parseopt(*p);
609				break;
610			}
611		}
612	}
613
614#ifdef MALLOC_STATS
615	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
616		dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed."
617		    " Will not be able to dump stats on exit\n");
618	}
619#endif /* MALLOC_STATS */
620
621	while ((mopts.malloc_canary = arc4random()) == 0)
622		;
623}
624
625/*
626 * Initialize a dir_info, which should have been cleared by caller
627 */
628static void
629omalloc_poolinit(struct dir_info **dp)
630{
631	void *p;
632	size_t d_avail, regioninfo_size;
633	struct dir_info *d;
634	int i, j;
635
636	/*
637	 * Allocate dir_info with a guard page on either side. Also
638	 * randomise offset inside the page at which the dir_info
639	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
640	 */
641	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
642		wrterror(NULL, "malloc init mmap failed");
643	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
644	mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
645	    MALLOC_PAGESIZE, PROT_NONE);
646	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
647	d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE +
648	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
649
650	rbytes_init(d);
651	d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS;
652	regioninfo_size = d->regions_total * sizeof(struct region_info);
653	d->r = MMAP(regioninfo_size);
654	if (d->r == MAP_FAILED) {
655		d->regions_total = 0;
656		wrterror(NULL, "malloc init mmap failed");
657	}
658	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
659		LIST_INIT(&d->chunk_info_list[i]);
660		for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
661			LIST_INIT(&d->chunk_dir[i][j]);
662	}
663	STATS_ADD(d->malloc_used, regioninfo_size);
664	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
665	d->canary2 = ~d->canary1;
666
667	*dp = d;
668}
669
670static int
671omalloc_grow(struct dir_info *d)
672{
673	size_t newtotal;
674	size_t newsize;
675	size_t mask;
676	size_t i;
677	struct region_info *p;
678
679	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
680		return 1;
681
682	newtotal = d->regions_total * 2;
683	newsize = newtotal * sizeof(struct region_info);
684	mask = newtotal - 1;
685
686	p = MMAP(newsize);
687	if (p == MAP_FAILED)
688		return 1;
689
690	STATS_ADD(d->malloc_used, newsize);
691	STATS_ZERO(d->inserts);
692	STATS_ZERO(d->insert_collisions);
693	for (i = 0; i < d->regions_total; i++) {
694		void *q = d->r[i].p;
695		if (q != NULL) {
696			size_t index = hash(q) & mask;
697			STATS_INC(d->inserts);
698			while (p[index].p != NULL) {
699				index = (index - 1) & mask;
700				STATS_INC(d->insert_collisions);
701			}
702			p[index] = d->r[i];
703		}
704	}
705	/* avoid pages containing meta info to end up in cache */
706	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
707		wrterror(d, "munmap %p", (void *)d->r);
708	else
709		STATS_SUB(d->malloc_used,
710		    d->regions_total * sizeof(struct region_info));
711	d->regions_free = d->regions_free + d->regions_total;
712	d->regions_total = newtotal;
713	d->r = p;
714	return 0;
715}
716
717static struct chunk_info *
718alloc_chunk_info(struct dir_info *d, int bits)
719{
720	struct chunk_info *p;
721	size_t size, count;
722
723	if (bits == 0)
724		count = MALLOC_PAGESIZE / MALLOC_MINSIZE;
725	else
726		count = MALLOC_PAGESIZE >> bits;
727
728	size = howmany(count, MALLOC_BITS);
729	size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short);
730	if (mopts.chunk_canaries)
731		size += count * sizeof(u_short);
732	size = ALIGN(size);
733
734	if (LIST_EMPTY(&d->chunk_info_list[bits])) {
735		char *q;
736		int i;
737
738		q = MMAP(MALLOC_PAGESIZE);
739		if (q == MAP_FAILED)
740			return NULL;
741		STATS_ADD(d->malloc_used, MALLOC_PAGESIZE);
742		count = MALLOC_PAGESIZE / size;
743		for (i = 0; i < count; i++, q += size)
744			LIST_INSERT_HEAD(&d->chunk_info_list[bits],
745			    (struct chunk_info *)q, entries);
746	}
747	p = LIST_FIRST(&d->chunk_info_list[bits]);
748	LIST_REMOVE(p, entries);
749	memset(p, 0, size);
750	p->canary = d->canary1;
751	return p;
752}
753
754
755/*
756 * The hashtable uses the assumption that p is never NULL. This holds since
757 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
758 */
759static int
760insert(struct dir_info *d, void *p, size_t sz, void *f)
761{
762	size_t index;
763	size_t mask;
764	void *q;
765
766	if (d->regions_free * 4 < d->regions_total) {
767		if (omalloc_grow(d))
768			return 1;
769	}
770	mask = d->regions_total - 1;
771	index = hash(p) & mask;
772	q = d->r[index].p;
773	STATS_INC(d->inserts);
774	while (q != NULL) {
775		index = (index - 1) & mask;
776		q = d->r[index].p;
777		STATS_INC(d->insert_collisions);
778	}
779	d->r[index].p = p;
780	d->r[index].size = sz;
781#ifdef MALLOC_STATS
782	d->r[index].f = f;
783#endif
784	d->regions_free--;
785	return 0;
786}
787
788static struct region_info *
789find(struct dir_info *d, void *p)
790{
791	size_t index;
792	size_t mask = d->regions_total - 1;
793	void *q, *r;
794
795	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
796	    d->canary1 != ~d->canary2)
797		wrterror(d, "internal struct corrupt");
798	p = MASK_POINTER(p);
799	index = hash(p) & mask;
800	r = d->r[index].p;
801	q = MASK_POINTER(r);
802	STATS_INC(d->finds);
803	while (q != p && r != NULL) {
804		index = (index - 1) & mask;
805		r = d->r[index].p;
806		q = MASK_POINTER(r);
807		STATS_INC(d->find_collisions);
808	}
809	return (q == p && r != NULL) ? &d->r[index] : NULL;
810}
811
812static void
813delete(struct dir_info *d, struct region_info *ri)
814{
815	/* algorithm R, Knuth Vol III section 6.4 */
816	size_t mask = d->regions_total - 1;
817	size_t i, j, r;
818
819	if (d->regions_total & (d->regions_total - 1))
820		wrterror(d, "regions_total not 2^x");
821	d->regions_free++;
822	STATS_INC(d->deletes);
823
824	i = ri - d->r;
825	for (;;) {
826		d->r[i].p = NULL;
827		d->r[i].size = 0;
828		j = i;
829		for (;;) {
830			i = (i - 1) & mask;
831			if (d->r[i].p == NULL)
832				return;
833			r = hash(d->r[i].p) & mask;
834			if ((i <= r && r < j) || (r < j && j < i) ||
835			    (j < i && i <= r))
836				continue;
837			d->r[j] = d->r[i];
838			STATS_INC(d->delete_moves);
839			break;
840		}
841
842	}
843}
844
845/*
846 * Allocate a page of chunks
847 */
848static struct chunk_info *
849omalloc_make_chunks(struct dir_info *d, int bits, int listnum)
850{
851	struct chunk_info *bp;
852	void		*pp;
853	int		i, k;
854
855	/* Allocate a new bucket */
856	pp = map(d, NULL, MALLOC_PAGESIZE, 0);
857	if (pp == MAP_FAILED)
858		return NULL;
859
860	bp = alloc_chunk_info(d, bits);
861	if (bp == NULL) {
862		unmap(d, pp, MALLOC_PAGESIZE, 0);
863		return NULL;
864	}
865
866	/* memory protect the page allocated in the malloc(0) case */
867	if (bits == 0) {
868		bp->size = 0;
869		bp->shift = 1;
870		i = MALLOC_MINSIZE - 1;
871		while (i >>= 1)
872			bp->shift++;
873		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
874		bp->offset = 0xdead;
875		bp->page = pp;
876
877		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
878		if (k < 0) {
879			unmap(d, pp, MALLOC_PAGESIZE, 0);
880			LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries);
881			return NULL;
882		}
883	} else {
884		bp->size = 1U << bits;
885		bp->shift = bits;
886		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
887		bp->offset = howmany(bp->total, MALLOC_BITS);
888		bp->page = pp;
889	}
890
891	/* set all valid bits in the bitmap */
892	k = bp->total;
893	i = 0;
894
895	/* Do a bunch at a time */
896	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
897		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
898
899	for (; i < k; i++)
900		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
901
902	LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries);
903
904	bits++;
905	if ((uintptr_t)pp & bits)
906		wrterror(d, "pp & bits %p", pp);
907
908	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL);
909	return bp;
910}
911
912static int
913find_chunksize(size_t size)
914{
915	int		i, j;
916
917	/* Don't bother with anything less than this */
918	/* unless we have a malloc(0) requests */
919	if (size != 0 && size < MALLOC_MINSIZE)
920		size = MALLOC_MINSIZE;
921
922	/* Find the right bucket */
923	if (size == 0)
924		j = 0;
925	else {
926		j = MALLOC_MINSHIFT;
927		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
928		while (i >>= 1)
929			j++;
930	}
931	return j;
932}
933
934/*
935 * Allocate a chunk
936 */
937static void *
938malloc_bytes(struct dir_info *d, size_t size, void *f)
939{
940	int		i, j, listnum;
941	size_t		k;
942	u_short		u, *lp;
943	struct chunk_info *bp;
944
945	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
946	    d->canary1 != ~d->canary2)
947		wrterror(d, "internal struct corrupt");
948
949	j = find_chunksize(size);
950
951	listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
952	/* If it's empty, make a page more of that size chunks */
953	if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) {
954		bp = omalloc_make_chunks(d, j, listnum);
955		if (bp == NULL)
956			return NULL;
957	}
958
959	if (bp->canary != d->canary1)
960		wrterror(d, "chunk info corrupted");
961
962	i = d->chunk_start;
963	if (bp->free > 1)
964		i += getrbyte(d);
965	if (i >= bp->total)
966		i &= bp->total - 1;
967	for (;;) {
968		for (;;) {
969			lp = &bp->bits[i / MALLOC_BITS];
970			if (!*lp) {
971				i += MALLOC_BITS;
972				i &= ~(MALLOC_BITS - 1);
973				if (i >= bp->total)
974					i = 0;
975			} else
976				break;
977		}
978		k = i % MALLOC_BITS;
979		u = 1 << k;
980		if (*lp & u)
981			break;
982		if (++i >= bp->total)
983			i = 0;
984	}
985	d->chunk_start += i + 1;
986#ifdef MALLOC_STATS
987	if (i == 0) {
988		struct region_info *r = find(d, bp->page);
989		r->f = f;
990	}
991#endif
992
993	*lp ^= u;
994
995	/* If there are no more free, remove from free-list */
996	if (!--bp->free)
997		LIST_REMOVE(bp, entries);
998
999	/* Adjust to the real offset of that chunk */
1000	k += (lp - bp->bits) * MALLOC_BITS;
1001
1002	if (mopts.chunk_canaries && size > 0)
1003		bp->bits[bp->offset + k] = size;
1004
1005	k <<= bp->shift;
1006
1007	if (bp->size > 0) {
1008		if (mopts.malloc_junk == 2)
1009			memset((char *)bp->page + k, SOME_JUNK, bp->size);
1010		else if (mopts.chunk_canaries)
1011			fill_canary((char *)bp->page + k, size, bp->size);
1012	}
1013	return ((char *)bp->page + k);
1014}
1015
1016static void
1017fill_canary(char *ptr, size_t sz, size_t allocated)
1018{
1019	size_t check_sz = allocated - sz;
1020
1021	if (check_sz > CHUNK_CHECK_LENGTH)
1022		check_sz = CHUNK_CHECK_LENGTH;
1023	memset(ptr + sz, SOME_JUNK, check_sz);
1024}
1025
1026static void
1027validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1028{
1029	size_t check_sz = allocated - sz;
1030	u_char *p, *q;
1031
1032	if (check_sz > CHUNK_CHECK_LENGTH)
1033		check_sz = CHUNK_CHECK_LENGTH;
1034	p = ptr + sz;
1035	q = p + check_sz;
1036
1037	while (p < q) {
1038		if (*p != SOME_JUNK) {
1039			wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s",
1040			    ptr, p - ptr, sz, *p == SOME_FREEJUNK ?
1041			        " (double free?)" : "");
1042		}
1043		p++;
1044	}
1045}
1046
1047static uint32_t
1048find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check)
1049{
1050	struct chunk_info *info;
1051	uint32_t chunknum;
1052
1053	info = (struct chunk_info *)r->size;
1054	if (info->canary != d->canary1)
1055		wrterror(d, "chunk info corrupted");
1056
1057	/* Find the chunk number on the page */
1058	chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1059
1060	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1))
1061		wrterror(d, "modified chunk-pointer %p", ptr);
1062	if (info->bits[chunknum / MALLOC_BITS] &
1063	    (1U << (chunknum % MALLOC_BITS)))
1064		wrterror(d, "chunk is already free %p", ptr);
1065	if (check && info->size > 0) {
1066		validate_canary(d, ptr, info->bits[info->offset + chunknum],
1067		    info->size);
1068	}
1069	return chunknum;
1070}
1071
1072/*
1073 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1074 */
1075static void
1076free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1077{
1078	struct chunk_head *mp;
1079	struct chunk_info *info;
1080	uint32_t chunknum;
1081	int listnum;
1082
1083	info = (struct chunk_info *)r->size;
1084	chunknum = find_chunknum(d, r, ptr, 0);
1085
1086	info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1087	info->free++;
1088
1089	if (info->free == 1) {
1090		/* Page became non-full */
1091		listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1092		if (info->size != 0)
1093			mp = &d->chunk_dir[info->shift][listnum];
1094		else
1095			mp = &d->chunk_dir[0][listnum];
1096
1097		LIST_INSERT_HEAD(mp, info, entries);
1098		return;
1099	}
1100
1101	if (info->free != info->total)
1102		return;
1103
1104	LIST_REMOVE(info, entries);
1105
1106	if (info->size == 0 && !mopts.malloc_freeunmap)
1107		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1108	unmap(d, info->page, MALLOC_PAGESIZE, 0);
1109
1110	delete(d, r);
1111	if (info->size != 0)
1112		mp = &d->chunk_info_list[info->shift];
1113	else
1114		mp = &d->chunk_info_list[0];
1115	LIST_INSERT_HEAD(mp, info, entries);
1116}
1117
1118
1119
1120static void *
1121omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f)
1122{
1123	void *p;
1124	size_t psz;
1125
1126	if (sz > MALLOC_MAXCHUNK) {
1127		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1128			errno = ENOMEM;
1129			return NULL;
1130		}
1131		sz += mopts.malloc_guard;
1132		psz = PAGEROUND(sz);
1133		p = map(pool, NULL, psz, zero_fill);
1134		if (p == MAP_FAILED) {
1135			errno = ENOMEM;
1136			return NULL;
1137		}
1138		if (insert(pool, p, sz, f)) {
1139			unmap(pool, p, psz, 0);
1140			errno = ENOMEM;
1141			return NULL;
1142		}
1143		if (mopts.malloc_guard) {
1144			if (mprotect((char *)p + psz - mopts.malloc_guard,
1145			    mopts.malloc_guard, PROT_NONE))
1146				wrterror(pool, "mprotect");
1147			STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1148		}
1149
1150		if (MALLOC_MOVE_COND(sz)) {
1151			/* fill whole allocation */
1152			if (mopts.malloc_junk == 2)
1153				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1154			/* shift towards the end */
1155			p = MALLOC_MOVE(p, sz);
1156			/* fill zeros if needed and overwritten above */
1157			if (zero_fill && mopts.malloc_junk == 2)
1158				memset(p, 0, sz - mopts.malloc_guard);
1159		} else {
1160			if (mopts.malloc_junk == 2) {
1161				if (zero_fill)
1162					memset((char *)p + sz - mopts.malloc_guard,
1163					    SOME_JUNK, psz - sz);
1164				else
1165					memset(p, SOME_JUNK,
1166					    psz - mopts.malloc_guard);
1167			}
1168			else if (mopts.chunk_canaries)
1169				fill_canary(p, sz - mopts.malloc_guard,
1170				    psz - mopts.malloc_guard);
1171		}
1172
1173	} else {
1174		/* takes care of SOME_JUNK */
1175		p = malloc_bytes(pool, sz, f);
1176		if (zero_fill && p != NULL && sz > 0)
1177			memset(p, 0, sz);
1178	}
1179
1180	return p;
1181}
1182
1183/*
1184 * Common function for handling recursion.  Only
1185 * print the error message once, to avoid making the problem
1186 * potentially worse.
1187 */
1188static void
1189malloc_recurse(struct dir_info *d)
1190{
1191	static int noprint;
1192
1193	if (noprint == 0) {
1194		noprint = 1;
1195		wrterror(d, "recursive call");
1196	}
1197	d->active--;
1198	_MALLOC_UNLOCK(d->mutex);
1199	errno = EDEADLK;
1200}
1201
1202void
1203_malloc_init(int from_rthreads)
1204{
1205	int i, max;
1206	struct dir_info *d;
1207
1208	_MALLOC_LOCK(0);
1209	if (!from_rthreads && mopts.malloc_pool[0]) {
1210		_MALLOC_UNLOCK(0);
1211		return;
1212	}
1213	if (!mopts.malloc_canary)
1214		omalloc_init();
1215
1216	max = from_rthreads ? _MALLOC_MUTEXES : 1;
1217	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1218		mprotect(&malloc_readonly, sizeof(malloc_readonly),
1219		     PROT_READ | PROT_WRITE);
1220	for (i = 0; i < max; i++) {
1221		if (mopts.malloc_pool[i])
1222			continue;
1223		omalloc_poolinit(&d);
1224		d->mutex = i;
1225		mopts.malloc_pool[i] = d;
1226	}
1227
1228	if (from_rthreads)
1229		mopts.malloc_mt = 1;
1230	else
1231		mopts.internal_funcs = 1;
1232
1233	/*
1234	 * Options have been set and will never be reset.
1235	 * Prevent further tampering with them.
1236	 */
1237	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
1238		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
1239	_MALLOC_UNLOCK(0);
1240}
1241DEF_STRONG(_malloc_init);
1242
1243void *
1244malloc(size_t size)
1245{
1246	void *r;
1247	struct dir_info *d;
1248	int saved_errno = errno;
1249
1250	d = getpool();
1251	if (d == NULL) {
1252		_malloc_init(0);
1253		d = getpool();
1254	}
1255	_MALLOC_LOCK(d->mutex);
1256	d->func = "malloc";
1257
1258	if (d->active++) {
1259		malloc_recurse(d);
1260		return NULL;
1261	}
1262	r = omalloc(d, size, 0, CALLER);
1263	d->active--;
1264	_MALLOC_UNLOCK(d->mutex);
1265	if (r == NULL && mopts.malloc_xmalloc)
1266		wrterror(d, "out of memory");
1267	if (r != NULL)
1268		errno = saved_errno;
1269	return r;
1270}
1271/*DEF_STRONG(malloc);*/
1272
1273static void
1274validate_junk(struct dir_info *pool, void *p)
1275{
1276	struct region_info *r;
1277	size_t byte, sz;
1278
1279	if (p == NULL)
1280		return;
1281	r = find(pool, p);
1282	if (r == NULL)
1283		wrterror(pool, "bogus pointer in validate_junk %p", p);
1284	REALSIZE(sz, r);
1285	if (sz > CHUNK_CHECK_LENGTH)
1286		sz = CHUNK_CHECK_LENGTH;
1287	for (byte = 0; byte < sz; byte++) {
1288		if (((unsigned char *)p)[byte] != SOME_FREEJUNK)
1289			wrterror(pool, "use after free %p", p);
1290	}
1291}
1292
1293static void
1294ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz)
1295{
1296	struct dir_info *pool;
1297	struct region_info *r;
1298	size_t sz;
1299	int i;
1300
1301	pool = argpool;
1302	r = find(pool, p);
1303	if (r == NULL) {
1304		if (mopts.malloc_mt)  {
1305			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1306				if (i == argpool->mutex)
1307					continue;
1308				pool->active--;
1309				_MALLOC_UNLOCK(pool->mutex);
1310				pool = mopts.malloc_pool[i];
1311				_MALLOC_LOCK(pool->mutex);
1312				pool->active++;
1313				r = find(pool, p);
1314				if (r != NULL)
1315					break;
1316			}
1317		}
1318		if (r == NULL)
1319			wrterror(pool, "bogus pointer (double free?) %p", p);
1320	}
1321
1322	REALSIZE(sz, r);
1323	if (check) {
1324		if (sz <= MALLOC_MAXCHUNK) {
1325			if (mopts.chunk_canaries && sz > 0) {
1326				struct chunk_info *info =
1327				    (struct chunk_info *)r->size;
1328				uint32_t chunknum =
1329				    find_chunknum(pool, r, p, 0);
1330
1331				if (info->bits[info->offset + chunknum] <
1332				    argsz)
1333					wrterror(pool, "recorded size %hu"
1334					    " < %zu",
1335					    info->bits[info->offset + chunknum],
1336					    argsz);
1337			} else {
1338				if (sz < argsz)
1339					wrterror(pool, "chunk size %zu < %zu",
1340					    sz, argsz);
1341			}
1342		} else if (sz - mopts.malloc_guard < argsz) {
1343			wrterror(pool, "recorded size %zu < %zu",
1344			    sz - mopts.malloc_guard, argsz);
1345		}
1346	}
1347	if (sz > MALLOC_MAXCHUNK) {
1348		if (!MALLOC_MOVE_COND(sz)) {
1349			if (r->p != p)
1350				wrterror(pool, "bogus pointer %p", p);
1351			if (mopts.chunk_canaries)
1352				validate_canary(pool, p,
1353				    sz - mopts.malloc_guard,
1354				    PAGEROUND(sz - mopts.malloc_guard));
1355		} else {
1356			/* shifted towards the end */
1357			if (p != MALLOC_MOVE(r->p, sz))
1358				wrterror(pool, "bogus moved pointer %p", p);
1359			p = r->p;
1360		}
1361		if (mopts.malloc_guard) {
1362			if (sz < mopts.malloc_guard)
1363				wrterror(pool, "guard size");
1364			if (!mopts.malloc_freeunmap) {
1365				if (mprotect((char *)p + PAGEROUND(sz) -
1366				    mopts.malloc_guard, mopts.malloc_guard,
1367				    PROT_READ | PROT_WRITE))
1368					wrterror(pool, "mprotect");
1369			}
1370			STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1371		}
1372		unmap(pool, p, PAGEROUND(sz), clear);
1373		delete(pool, r);
1374	} else {
1375		/* Validate and optionally canary check */
1376		find_chunknum(pool, r, p, mopts.chunk_canaries);
1377		if (!clear) {
1378			void *tmp;
1379			int i;
1380
1381			if (mopts.malloc_freecheck) {
1382				for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++)
1383					if (p == pool->delayed_chunks[i])
1384						wrterror(pool,
1385						    "double free %p", p);
1386			}
1387			if (mopts.malloc_junk && sz > 0)
1388				memset(p, SOME_FREEJUNK, sz);
1389			i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1390			tmp = p;
1391			p = pool->delayed_chunks[i];
1392			if (tmp == p)
1393				wrterror(pool, "double free %p", tmp);
1394			pool->delayed_chunks[i] = tmp;
1395			if (mopts.malloc_junk)
1396				validate_junk(pool, p);
1397		} else if (sz > 0)
1398			memset(p, 0, sz);
1399		if (p != NULL) {
1400			r = find(pool, p);
1401			if (r == NULL)
1402				wrterror(pool,
1403				    "bogus pointer (double free?) %p", p);
1404			free_bytes(pool, r, p);
1405		}
1406	}
1407
1408	if (argpool != pool) {
1409		pool->active--;
1410		_MALLOC_UNLOCK(pool->mutex);
1411		_MALLOC_LOCK(argpool->mutex);
1412		argpool->active++;
1413	}
1414}
1415
1416void
1417free(void *ptr)
1418{
1419	struct dir_info *d;
1420	int saved_errno = errno;
1421
1422	/* This is legal. */
1423	if (ptr == NULL)
1424		return;
1425
1426	d = getpool();
1427	if (d == NULL)
1428		wrterror(d, "free() called before allocation");
1429	_MALLOC_LOCK(d->mutex);
1430	d->func = "free";
1431	if (d->active++) {
1432		malloc_recurse(d);
1433		return;
1434	}
1435	ofree(d, ptr, 0, 0, 0);
1436	d->active--;
1437	_MALLOC_UNLOCK(d->mutex);
1438	errno = saved_errno;
1439}
1440/*DEF_STRONG(free);*/
1441
1442static void
1443freezero_p(void *ptr, size_t sz)
1444{
1445	explicit_bzero(ptr, sz);
1446	free(ptr);
1447}
1448
1449void
1450freezero(void *ptr, size_t sz)
1451{
1452	struct dir_info *d;
1453	int saved_errno = errno;
1454
1455	/* This is legal. */
1456	if (ptr == NULL)
1457		return;
1458
1459	if (!mopts.internal_funcs)
1460		return freezero_p(ptr, sz);
1461
1462	d = getpool();
1463	if (d == NULL)
1464		wrterror(d, "freezero() called before allocation");
1465	_MALLOC_LOCK(d->mutex);
1466	d->func = "freezero";
1467	if (d->active++) {
1468		malloc_recurse(d);
1469		return;
1470	}
1471	ofree(d, ptr, 1, 1, sz);
1472	d->active--;
1473	_MALLOC_UNLOCK(d->mutex);
1474	errno = saved_errno;
1475}
1476DEF_WEAK(freezero);
1477
1478static void *
1479orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f)
1480{
1481	struct dir_info *pool;
1482	struct region_info *r;
1483	struct chunk_info *info;
1484	size_t oldsz, goldsz, gnewsz;
1485	void *q, *ret;
1486	int i;
1487	uint32_t chunknum;
1488
1489	pool = argpool;
1490
1491	if (p == NULL)
1492		return omalloc(pool, newsz, 0, f);
1493
1494	r = find(pool, p);
1495	if (r == NULL) {
1496		if (mopts.malloc_mt) {
1497			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1498				if (i == argpool->mutex)
1499					continue;
1500				pool->active--;
1501				_MALLOC_UNLOCK(pool->mutex);
1502				pool = mopts.malloc_pool[i];
1503				_MALLOC_LOCK(pool->mutex);
1504				pool->active++;
1505				r = find(pool, p);
1506				if (r != NULL)
1507					break;
1508			}
1509		}
1510		if (r == NULL)
1511			wrterror(pool, "bogus pointer (double free?) %p", p);
1512	}
1513	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1514		errno = ENOMEM;
1515		ret = NULL;
1516		goto done;
1517	}
1518
1519	REALSIZE(oldsz, r);
1520	if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) {
1521		chunknum = find_chunknum(pool, r, p, 0);
1522		info = (struct chunk_info *)r->size;
1523	}
1524
1525	goldsz = oldsz;
1526	if (oldsz > MALLOC_MAXCHUNK) {
1527		if (oldsz < mopts.malloc_guard)
1528			wrterror(pool, "guard size");
1529		oldsz -= mopts.malloc_guard;
1530	}
1531
1532	gnewsz = newsz;
1533	if (gnewsz > MALLOC_MAXCHUNK)
1534		gnewsz += mopts.malloc_guard;
1535
1536	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK &&
1537	    !mopts.malloc_realloc) {
1538		/* First case: from n pages sized allocation to m pages sized
1539		   allocation, m > n */
1540		size_t roldsz = PAGEROUND(goldsz);
1541		size_t rnewsz = PAGEROUND(gnewsz);
1542
1543		if (rnewsz > roldsz) {
1544			/* try to extend existing region */
1545			if (!mopts.malloc_guard) {
1546				void *hint = (char *)r->p + roldsz;
1547				size_t needed = rnewsz - roldsz;
1548
1549				STATS_INC(pool->cheap_realloc_tries);
1550				q = map(pool, hint, needed, 0);
1551				if (q == hint)
1552					goto gotit;
1553				zapcacheregion(pool, hint, needed);
1554				q = MQUERY(hint, needed);
1555				if (q == hint)
1556					q = MMAPA(hint, needed);
1557				else
1558					q = MAP_FAILED;
1559				if (q == hint) {
1560gotit:
1561					STATS_ADD(pool->malloc_used, needed);
1562					if (mopts.malloc_junk == 2)
1563						memset(q, SOME_JUNK, needed);
1564					r->size = gnewsz;
1565					if (r->p != p) {
1566						/* old pointer is moved */
1567						memmove(r->p, p, oldsz);
1568						p = r->p;
1569					}
1570					if (mopts.chunk_canaries)
1571						fill_canary(p, newsz,
1572						    PAGEROUND(newsz));
1573					STATS_SETF(r, f);
1574					STATS_INC(pool->cheap_reallocs);
1575					ret = p;
1576					goto done;
1577				} else if (q != MAP_FAILED) {
1578					if (munmap(q, needed))
1579						wrterror(pool, "munmap %p", q);
1580				}
1581			}
1582		} else if (rnewsz < roldsz) {
1583			/* shrink number of pages */
1584			if (mopts.malloc_guard) {
1585				if (mprotect((char *)r->p + roldsz -
1586				    mopts.malloc_guard, mopts.malloc_guard,
1587				    PROT_READ | PROT_WRITE))
1588					wrterror(pool, "mprotect");
1589				if (mprotect((char *)r->p + rnewsz -
1590				    mopts.malloc_guard, mopts.malloc_guard,
1591				    PROT_NONE))
1592					wrterror(pool, "mprotect");
1593			}
1594			unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0);
1595			r->size = gnewsz;
1596			if (MALLOC_MOVE_COND(gnewsz)) {
1597				void *pp = MALLOC_MOVE(r->p, gnewsz);
1598				memmove(pp, p, newsz);
1599				p = pp;
1600			} else if (mopts.chunk_canaries)
1601				fill_canary(p, newsz, PAGEROUND(newsz));
1602			STATS_SETF(r, f);
1603			ret = p;
1604			goto done;
1605		} else {
1606			/* number of pages remains the same */
1607			void *pp = r->p;
1608
1609			r->size = gnewsz;
1610			if (MALLOC_MOVE_COND(gnewsz))
1611				pp = MALLOC_MOVE(r->p, gnewsz);
1612			if (p != pp) {
1613				memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1614				p = pp;
1615			}
1616			if (p == r->p) {
1617				if (newsz > oldsz && mopts.malloc_junk == 2)
1618					memset((char *)p + newsz, SOME_JUNK,
1619					    rnewsz - mopts.malloc_guard -
1620					    newsz);
1621				if (mopts.chunk_canaries)
1622					fill_canary(p, newsz, PAGEROUND(newsz));
1623			}
1624			STATS_SETF(r, f);
1625			ret = p;
1626			goto done;
1627		}
1628	}
1629	if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1630	    newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1631	    1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) {
1632		/* do not reallocate if new size fits good in existing chunk */
1633		if (mopts.malloc_junk == 2)
1634			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1635		if (mopts.chunk_canaries) {
1636			info->bits[info->offset + chunknum] = newsz;
1637			fill_canary(p, newsz, info->size);
1638		}
1639		STATS_SETF(r, f);
1640		ret = p;
1641	} else if (newsz != oldsz || mopts.malloc_realloc) {
1642		/* create new allocation */
1643		q = omalloc(pool, newsz, 0, f);
1644		if (q == NULL) {
1645			ret = NULL;
1646			goto done;
1647		}
1648		if (newsz != 0 && oldsz != 0)
1649			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1650		ofree(pool, p, 0, 0, 0);
1651		ret = q;
1652	} else {
1653		/* oldsz == newsz */
1654		if (newsz != 0)
1655			wrterror(pool, "realloc internal inconsistency");
1656		STATS_SETF(r, f);
1657		ret = p;
1658	}
1659done:
1660	if (argpool != pool) {
1661		pool->active--;
1662		_MALLOC_UNLOCK(pool->mutex);
1663		_MALLOC_LOCK(argpool->mutex);
1664		argpool->active++;
1665	}
1666	return ret;
1667}
1668
1669void *
1670realloc(void *ptr, size_t size)
1671{
1672	struct dir_info *d;
1673	void *r;
1674	int saved_errno = errno;
1675
1676	d = getpool();
1677	if (d == NULL) {
1678		_malloc_init(0);
1679		d = getpool();
1680	}
1681	_MALLOC_LOCK(d->mutex);
1682	d->func = "realloc";
1683	if (d->active++) {
1684		malloc_recurse(d);
1685		return NULL;
1686	}
1687	r = orealloc(d, ptr, size, CALLER);
1688
1689	d->active--;
1690	_MALLOC_UNLOCK(d->mutex);
1691	if (r == NULL && mopts.malloc_xmalloc)
1692		wrterror(d, "out of memory");
1693	if (r != NULL)
1694		errno = saved_errno;
1695	return r;
1696}
1697/*DEF_STRONG(realloc);*/
1698
1699
1700/*
1701 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1702 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1703 */
1704#define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1705
1706void *
1707calloc(size_t nmemb, size_t size)
1708{
1709	struct dir_info *d;
1710	void *r;
1711	int saved_errno = errno;
1712
1713	d = getpool();
1714	if (d == NULL) {
1715		_malloc_init(0);
1716		d = getpool();
1717	}
1718	_MALLOC_LOCK(d->mutex);
1719	d->func = "calloc";
1720	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1721	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1722		_MALLOC_UNLOCK(d->mutex);
1723		if (mopts.malloc_xmalloc)
1724			wrterror(d, "out of memory");
1725		errno = ENOMEM;
1726		return NULL;
1727	}
1728
1729	if (d->active++) {
1730		malloc_recurse(d);
1731		return NULL;
1732	}
1733
1734	size *= nmemb;
1735	r = omalloc(d, size, 1, CALLER);
1736
1737	d->active--;
1738	_MALLOC_UNLOCK(d->mutex);
1739	if (r == NULL && mopts.malloc_xmalloc)
1740		wrterror(d, "out of memory");
1741	if (r != NULL)
1742		errno = saved_errno;
1743	return r;
1744}
1745/*DEF_STRONG(calloc);*/
1746
1747static void *
1748orecallocarray(struct dir_info *argpool, void *p, size_t oldsize,
1749    size_t newsize, void *f)
1750{
1751	struct dir_info *pool;
1752	struct region_info *r;
1753	void *newptr;
1754	size_t sz;
1755	int i;
1756
1757	pool = argpool;
1758
1759	if (p == NULL)
1760		return omalloc(pool, newsize, 1, f);
1761
1762	if (oldsize == newsize)
1763		return p;
1764
1765	r = find(pool, p);
1766	if (r == NULL) {
1767		if (mopts.malloc_mt) {
1768			for (i = 0; i < _MALLOC_MUTEXES; i++) {
1769				if (i == argpool->mutex)
1770					continue;
1771				pool->active--;
1772				_MALLOC_UNLOCK(pool->mutex);
1773				pool = mopts.malloc_pool[i];
1774				_MALLOC_LOCK(pool->mutex);
1775				pool->active++;
1776				r = find(pool, p);
1777				if (r != NULL)
1778					break;
1779			}
1780		}
1781		if (r == NULL)
1782			wrterror(pool, "bogus pointer (double free?) %p", p);
1783	}
1784
1785	REALSIZE(sz, r);
1786	if (sz <= MALLOC_MAXCHUNK) {
1787		if (mopts.chunk_canaries && sz > 0) {
1788			struct chunk_info *info = (struct chunk_info *)r->size;
1789			uint32_t chunknum = find_chunknum(pool, r, p, 0);
1790
1791			if (info->bits[info->offset + chunknum] != oldsize)
1792				wrterror(pool, "recorded old size %hu != %zu",
1793				    info->bits[info->offset + chunknum],
1794				    oldsize);
1795		}
1796	} else if (oldsize != sz - mopts.malloc_guard)
1797		wrterror(pool, "recorded old size %zu != %zu",
1798		    sz - mopts.malloc_guard, oldsize);
1799
1800	newptr = omalloc(pool, newsize, 0, f);
1801	if (newptr == NULL)
1802		goto done;
1803
1804	if (newsize > oldsize) {
1805		memcpy(newptr, p, oldsize);
1806		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1807	} else
1808		memcpy(newptr, p, newsize);
1809
1810	ofree(pool, p, 1, 0, 0);
1811
1812done:
1813	if (argpool != pool) {
1814		pool->active--;
1815		_MALLOC_UNLOCK(pool->mutex);
1816		_MALLOC_LOCK(argpool->mutex);
1817		argpool->active++;
1818	}
1819
1820	return newptr;
1821}
1822
1823static void *
1824recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1825{
1826	size_t oldsize, newsize;
1827	void *newptr;
1828
1829	if (ptr == NULL)
1830		return calloc(newnmemb, size);
1831
1832	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1833	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1834		errno = ENOMEM;
1835		return NULL;
1836	}
1837	newsize = newnmemb * size;
1838
1839	if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1840	    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1841		errno = EINVAL;
1842		return NULL;
1843	}
1844	oldsize = oldnmemb * size;
1845
1846	/*
1847	 * Don't bother too much if we're shrinking just a bit,
1848	 * we do not shrink for series of small steps, oh well.
1849	 */
1850	if (newsize <= oldsize) {
1851		size_t d = oldsize - newsize;
1852
1853		if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
1854			memset((char *)ptr + newsize, 0, d);
1855			return ptr;
1856		}
1857	}
1858
1859	newptr = malloc(newsize);
1860	if (newptr == NULL)
1861		return NULL;
1862
1863	if (newsize > oldsize) {
1864		memcpy(newptr, ptr, oldsize);
1865		memset((char *)newptr + oldsize, 0, newsize - oldsize);
1866	} else
1867		memcpy(newptr, ptr, newsize);
1868
1869	explicit_bzero(ptr, oldsize);
1870	free(ptr);
1871
1872	return newptr;
1873}
1874
1875void *
1876recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
1877{
1878	struct dir_info *d;
1879	size_t oldsize = 0, newsize;
1880	void *r;
1881	int saved_errno = errno;
1882
1883	if (!mopts.internal_funcs)
1884		return recallocarray_p(ptr, oldnmemb, newnmemb, size);
1885
1886	d = getpool();
1887	if (d == NULL) {
1888		_malloc_init(0);
1889		d = getpool();
1890	}
1891
1892	_MALLOC_LOCK(d->mutex);
1893	d->func = "recallocarray";
1894
1895	if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1896	    newnmemb > 0 && SIZE_MAX / newnmemb < size) {
1897		_MALLOC_UNLOCK(d->mutex);
1898		if (mopts.malloc_xmalloc)
1899			wrterror(d, "out of memory");
1900		errno = ENOMEM;
1901		return NULL;
1902	}
1903	newsize = newnmemb * size;
1904
1905	if (ptr != NULL) {
1906		if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1907		    oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
1908			_MALLOC_UNLOCK(d->mutex);
1909			errno = EINVAL;
1910			return NULL;
1911		}
1912		oldsize = oldnmemb * size;
1913	}
1914
1915	if (d->active++) {
1916		malloc_recurse(d);
1917		return NULL;
1918	}
1919
1920	r = orecallocarray(d, ptr, oldsize, newsize, CALLER);
1921
1922	d->active--;
1923	_MALLOC_UNLOCK(d->mutex);
1924	if (r == NULL && mopts.malloc_xmalloc)
1925		wrterror(d, "out of memory");
1926	if (r != NULL)
1927		errno = saved_errno;
1928	return r;
1929}
1930DEF_WEAK(recallocarray);
1931
1932
1933static void *
1934mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
1935{
1936	char *p, *q;
1937
1938	if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
1939		wrterror(d, "mapalign bad alignment");
1940	if (sz != PAGEROUND(sz))
1941		wrterror(d, "mapalign round");
1942
1943	/* Allocate sz + alignment bytes of memory, which must include a
1944	 * subrange of size bytes that is properly aligned.  Unmap the
1945	 * other bytes, and then return that subrange.
1946	 */
1947
1948	/* We need sz + alignment to fit into a size_t. */
1949	if (alignment > SIZE_MAX - sz)
1950		return MAP_FAILED;
1951
1952	p = map(d, NULL, sz + alignment, zero_fill);
1953	if (p == MAP_FAILED)
1954		return MAP_FAILED;
1955	q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
1956	if (q != p) {
1957		if (munmap(p, q - p))
1958			wrterror(d, "munmap %p", p);
1959	}
1960	if (munmap(q + sz, alignment - (q - p)))
1961		wrterror(d, "munmap %p", q + sz);
1962	STATS_SUB(d->malloc_used, alignment);
1963
1964	return q;
1965}
1966
1967static void *
1968omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill,
1969    void *f)
1970{
1971	size_t psz;
1972	void *p;
1973
1974	/* If between half a page and a page, avoid MALLOC_MOVE. */
1975	if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
1976		sz = MALLOC_PAGESIZE;
1977	if (alignment <= MALLOC_PAGESIZE) {
1978		/*
1979		 * max(size, alignment) is enough to assure the requested
1980		 * alignment, since the allocator always allocates
1981		 * power-of-two blocks.
1982		 */
1983		if (sz < alignment)
1984			sz = alignment;
1985		return omalloc(pool, sz, zero_fill, f);
1986	}
1987
1988	if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1989		errno = ENOMEM;
1990		return NULL;
1991	}
1992
1993	sz += mopts.malloc_guard;
1994	psz = PAGEROUND(sz);
1995
1996	p = mapalign(pool, alignment, psz, zero_fill);
1997	if (p == MAP_FAILED) {
1998		errno = ENOMEM;
1999		return NULL;
2000	}
2001
2002	if (insert(pool, p, sz, f)) {
2003		unmap(pool, p, psz, 0);
2004		errno = ENOMEM;
2005		return NULL;
2006	}
2007
2008	if (mopts.malloc_guard) {
2009		if (mprotect((char *)p + psz - mopts.malloc_guard,
2010		    mopts.malloc_guard, PROT_NONE))
2011			wrterror(pool, "mprotect");
2012		STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2013	}
2014
2015	if (mopts.malloc_junk == 2) {
2016		if (zero_fill)
2017			memset((char *)p + sz - mopts.malloc_guard,
2018			    SOME_JUNK, psz - sz);
2019		else
2020			memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2021	}
2022	else if (mopts.chunk_canaries)
2023		fill_canary(p, sz - mopts.malloc_guard,
2024		    psz - mopts.malloc_guard);
2025
2026	return p;
2027}
2028
2029int
2030posix_memalign(void **memptr, size_t alignment, size_t size)
2031{
2032	struct dir_info *d;
2033	int res, saved_errno = errno;
2034	void *r;
2035
2036	/* Make sure that alignment is a large enough power of 2. */
2037	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2038		return EINVAL;
2039
2040	d = getpool();
2041	if (d == NULL) {
2042		_malloc_init(0);
2043		d = getpool();
2044	}
2045	_MALLOC_LOCK(d->mutex);
2046	d->func = "posix_memalign";
2047	if (d->active++) {
2048		malloc_recurse(d);
2049		goto err;
2050	}
2051	r = omemalign(d, alignment, size, 0, CALLER);
2052	d->active--;
2053	_MALLOC_UNLOCK(d->mutex);
2054	if (r == NULL) {
2055		if (mopts.malloc_xmalloc)
2056			wrterror(d, "out of memory");
2057		goto err;
2058	}
2059	errno = saved_errno;
2060	*memptr = r;
2061	return 0;
2062
2063err:
2064	res = errno;
2065	errno = saved_errno;
2066	return res;
2067}
2068/*DEF_STRONG(posix_memalign);*/
2069
2070#ifdef MALLOC_STATS
2071
2072struct malloc_leak {
2073	void *f;
2074	size_t total_size;
2075	int count;
2076};
2077
2078struct leaknode {
2079	RBT_ENTRY(leaknode) entry;
2080	struct malloc_leak d;
2081};
2082
2083static inline int
2084leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2085{
2086	return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2087}
2088
2089static RBT_HEAD(leaktree, leaknode) leakhead;
2090RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2091RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2092
2093static void
2094putleakinfo(void *f, size_t sz, int cnt)
2095{
2096	struct leaknode key, *p;
2097	static struct leaknode *page;
2098	static int used;
2099
2100	if (cnt == 0 || page == MAP_FAILED)
2101		return;
2102
2103	key.d.f = f;
2104	p = RBT_FIND(leaktree, &leakhead, &key);
2105	if (p == NULL) {
2106		if (page == NULL ||
2107		    used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2108			page = MMAP(MALLOC_PAGESIZE);
2109			if (page == MAP_FAILED)
2110				return;
2111			used = 0;
2112		}
2113		p = &page[used++];
2114		p->d.f = f;
2115		p->d.total_size = sz * cnt;
2116		p->d.count = cnt;
2117		RBT_INSERT(leaktree, &leakhead, p);
2118	} else {
2119		p->d.total_size += sz * cnt;
2120		p->d.count += cnt;
2121	}
2122}
2123
2124static struct malloc_leak *malloc_leaks;
2125
2126static void
2127dump_leaks(int fd)
2128{
2129	struct leaknode *p;
2130	int i = 0;
2131
2132	dprintf(fd, "Leak report\n");
2133	dprintf(fd, "                 f     sum      #    avg\n");
2134	/* XXX only one page of summary */
2135	if (malloc_leaks == NULL)
2136		malloc_leaks = MMAP(MALLOC_PAGESIZE);
2137	if (malloc_leaks != MAP_FAILED)
2138		memset(malloc_leaks, 0, MALLOC_PAGESIZE);
2139	RBT_FOREACH(p, leaktree, &leakhead) {
2140		dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f,
2141		    p->d.total_size, p->d.count, p->d.total_size / p->d.count);
2142		if (malloc_leaks == MAP_FAILED ||
2143		    i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak))
2144			continue;
2145		malloc_leaks[i].f = p->d.f;
2146		malloc_leaks[i].total_size = p->d.total_size;
2147		malloc_leaks[i].count = p->d.count;
2148		i++;
2149	}
2150}
2151
2152static void
2153dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist)
2154{
2155	while (p != NULL) {
2156		dprintf(fd, "chunk %18p %18p %4d %d/%d\n",
2157		    p->page, ((p->bits[0] & 1) ? NULL : f),
2158		    p->size, p->free, p->total);
2159		if (!fromfreelist) {
2160			if (p->bits[0] & 1)
2161				putleakinfo(NULL, p->size, p->total - p->free);
2162			else {
2163				putleakinfo(f, p->size, 1);
2164				putleakinfo(NULL, p->size,
2165				    p->total - p->free - 1);
2166			}
2167			break;
2168		}
2169		p = LIST_NEXT(p, entries);
2170		if (p != NULL)
2171			dprintf(fd, "        ");
2172	}
2173}
2174
2175static void
2176dump_free_chunk_info(int fd, struct dir_info *d)
2177{
2178	int i, j, count;
2179	struct chunk_info *p;
2180
2181	dprintf(fd, "Free chunk structs:\n");
2182	for (i = 0; i <= MALLOC_MAXSHIFT; i++) {
2183		count = 0;
2184		LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2185			count++;
2186		for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2187			p = LIST_FIRST(&d->chunk_dir[i][j]);
2188			if (p == NULL && count == 0)
2189				continue;
2190			dprintf(fd, "%2d) %3d ", i, count);
2191			if (p != NULL)
2192				dump_chunk(fd, p, NULL, 1);
2193			else
2194				dprintf(fd, "\n");
2195		}
2196	}
2197
2198}
2199
2200static void
2201dump_free_page_info(int fd, struct dir_info *d)
2202{
2203	int i;
2204
2205	dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size);
2206	for (i = 0; i < mopts.malloc_cache; i++) {
2207		if (d->free_regions[i].p != NULL) {
2208			dprintf(fd, "%2d) ", i);
2209			dprintf(fd, "free at %p: %zu\n",
2210			    d->free_regions[i].p, d->free_regions[i].size);
2211		}
2212	}
2213}
2214
2215static void
2216malloc_dump1(int fd, int poolno, struct dir_info *d)
2217{
2218	size_t i, realsize;
2219
2220	dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2221	if (d == NULL)
2222		return;
2223	dprintf(fd, "Region slots free %zu/%zu\n",
2224		d->regions_free, d->regions_total);
2225	dprintf(fd, "Finds %zu/%zu\n", d->finds,
2226	    d->find_collisions);
2227	dprintf(fd, "Inserts %zu/%zu\n", d->inserts,
2228	    d->insert_collisions);
2229	dprintf(fd, "Deletes %zu/%zu\n", d->deletes,
2230	    d->delete_moves);
2231	dprintf(fd, "Cheap reallocs %zu/%zu\n",
2232	    d->cheap_reallocs, d->cheap_realloc_tries);
2233	dprintf(fd, "In use %zu\n", d->malloc_used);
2234	dprintf(fd, "Guarded %zu\n", d->malloc_guarded);
2235	dump_free_chunk_info(fd, d);
2236	dump_free_page_info(fd, d);
2237	dprintf(fd,
2238	    "slot)  hash d  type               page                  f size [free/n]\n");
2239	for (i = 0; i < d->regions_total; i++) {
2240		if (d->r[i].p != NULL) {
2241			size_t h = hash(d->r[i].p) &
2242			    (d->regions_total - 1);
2243			dprintf(fd, "%4zx) #%4zx %zd ",
2244			    i, h, h - i);
2245			REALSIZE(realsize, &d->r[i]);
2246			if (realsize > MALLOC_MAXCHUNK) {
2247				putleakinfo(d->r[i].f, realsize, 1);
2248				dprintf(fd,
2249				    "pages %18p %18p %zu\n", d->r[i].p,
2250				    d->r[i].f, realsize);
2251			} else
2252				dump_chunk(fd,
2253				    (struct chunk_info *)d->r[i].size,
2254				    d->r[i].f, 0);
2255		}
2256	}
2257	dump_leaks(fd);
2258	dprintf(fd, "\n");
2259}
2260
2261void
2262malloc_dump(int fd, int poolno, struct dir_info *pool)
2263{
2264	int i;
2265	void *p;
2266	struct region_info *r;
2267	int saved_errno = errno;
2268
2269	if (pool == NULL)
2270		return;
2271	for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2272		p = pool->delayed_chunks[i];
2273		if (p == NULL)
2274			continue;
2275		r = find(pool, p);
2276		if (r == NULL)
2277			wrterror(pool, "bogus pointer in malloc_dump %p", p);
2278		free_bytes(pool, r, p);
2279		pool->delayed_chunks[i] = NULL;
2280	}
2281	/* XXX leak when run multiple times */
2282	RBT_INIT(leaktree, &leakhead);
2283	malloc_dump1(fd, poolno, pool);
2284	errno = saved_errno;
2285}
2286DEF_WEAK(malloc_dump);
2287
2288void
2289malloc_gdump(int fd)
2290{
2291	int i;
2292	int saved_errno = errno;
2293
2294	for (i = 0; i < _MALLOC_MUTEXES; i++)
2295		malloc_dump(fd, i, mopts.malloc_pool[i]);
2296
2297	errno = saved_errno;
2298}
2299DEF_WEAK(malloc_gdump);
2300
2301static void
2302malloc_exit(void)
2303{
2304	int save_errno = errno, fd, i;
2305
2306	fd = open("malloc.out", O_RDWR|O_APPEND);
2307	if (fd != -1) {
2308		dprintf(fd, "******** Start dump %s *******\n", __progname);
2309		dprintf(fd,
2310		    "MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n",
2311		    mopts.malloc_mt, mopts.internal_funcs,
2312		    mopts.malloc_freecheck,
2313		    mopts.malloc_freeunmap, mopts.malloc_junk,
2314		    mopts.malloc_realloc, mopts.malloc_xmalloc,
2315		    mopts.chunk_canaries, mopts.malloc_cache,
2316		    mopts.malloc_guard);
2317
2318		for (i = 0; i < _MALLOC_MUTEXES; i++)
2319			malloc_dump(fd, i, mopts.malloc_pool[i]);
2320		dprintf(fd, "******** End dump %s *******\n", __progname);
2321		close(fd);
2322	} else
2323		dprintf(STDERR_FILENO,
2324		    "malloc() warning: Couldn't dump stats\n");
2325	errno = save_errno;
2326}
2327
2328#endif /* MALLOC_STATS */
2329