malloc.c revision 1.130
1/*	$OpenBSD: malloc.c,v 1.130 2011/05/05 12:11:20 otto Exp $	*/
2/*
3 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18/*
19 * Parts of this code, mainly the sub page sized chunk management code is
20 * derived from the malloc implementation with the following license:
21 */
22/*
23 * ----------------------------------------------------------------------------
24 * "THE BEER-WARE LICENSE" (Revision 42):
25 * <phk@FreeBSD.ORG> wrote this file.  As long as you retain this notice you
26 * can do whatever you want with this stuff. If we meet some day, and you think
27 * this stuff is worth it, you can buy me a beer in return.  Poul-Henning Kamp
28 * ----------------------------------------------------------------------------
29 */
30
31/* #define MALLOC_STATS */
32
33#include <sys/types.h>
34#include <sys/param.h>
35#include <sys/queue.h>
36#include <sys/mman.h>
37#include <sys/uio.h>
38#include <errno.h>
39#include <stdint.h>
40#include <stdlib.h>
41#include <string.h>
42#include <stdio.h>
43#include <unistd.h>
44
45#ifdef MALLOC_STATS
46#include <fcntl.h>
47#endif
48
49#include "thread_private.h"
50
51#define MALLOC_MINSHIFT		4
52#define MALLOC_MAXSHIFT		16
53
54#if defined(__sparc__) && !defined(__sparcv9__)
55#define MALLOC_PAGESHIFT	(13U)
56#elif defined(__mips64__)
57#define MALLOC_PAGESHIFT	(14U)
58#else
59#define MALLOC_PAGESHIFT	(PGSHIFT)
60#endif
61
62#define MALLOC_PAGESIZE		(1UL << MALLOC_PAGESHIFT)
63#define MALLOC_MINSIZE		(1UL << MALLOC_MINSHIFT)
64#define MALLOC_PAGEMASK		(MALLOC_PAGESIZE - 1)
65#define MASK_POINTER(p)		((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
66
67#define MALLOC_MAXCHUNK		(1 << (MALLOC_PAGESHIFT-1))
68#define MALLOC_MAXCACHE		256
69#define MALLOC_DELAYED_CHUNKS	15	/* max of getrnibble() */
70/*
71 * When the P option is active, we move allocations between half a page
72 * and a whole page towards the end, subject to alignment constraints.
73 * This is the extra headroom we allow. Set to zero to be the most
74 * strict.
75 */
76#define MALLOC_LEEWAY		0
77
78#define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
79
80/*
81 * What to use for Junk.  This is the byte value we use to fill with
82 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
83 * and SOME_FREEJUNK right before free.
84 */
85#define SOME_JUNK		0xd0	/* as in "Duh" :-) */
86#define SOME_FREEJUNK		0xdf
87
88#define MMAP(sz)	mmap(NULL, (size_t)(sz), PROT_READ | PROT_WRITE, \
89    MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
90
91#define MMAPA(a,sz)	mmap((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
92    MAP_ANON | MAP_PRIVATE, -1, (off_t) 0)
93
94struct region_info {
95	void *p;		/* page; low bits used to mark chunks */
96	uintptr_t size;		/* size for pages, or chunk_info pointer */
97};
98
99LIST_HEAD(chunk_head, chunk_info);
100
101struct dir_info {
102	u_int32_t canary1;
103	struct region_info *r;		/* region slots */
104	size_t regions_total;		/* number of region slots */
105	size_t regions_bits;		/* log2 of total */
106	size_t regions_free;		/* number of free slots */
107					/* list of free chunk info structs */
108	struct chunk_head chunk_info_list;
109					/* lists of chunks with free slots */
110	struct chunk_head chunk_dir[MALLOC_MAXSHIFT];
111	size_t free_regions_size;	/* free pages cached */
112					/* free pages cache */
113	struct region_info free_regions[MALLOC_MAXCACHE];
114					/* delayed free chunk slots */
115	void *delayed_chunks[MALLOC_DELAYED_CHUNKS + 1];
116	u_short chunk_start;
117#ifdef MALLOC_STATS
118	size_t inserts;
119	size_t insert_collisions;
120	size_t finds;
121	size_t find_collisions;
122	size_t deletes;
123	size_t delete_moves;
124	size_t cheap_realloc_tries;
125	size_t cheap_reallocs;
126#define STATS_INC(x) ((x)++)
127#define STATS_ZERO(x) ((x) = 0)
128#else
129#define STATS_INC(x)	/* nothing */
130#define STATS_ZERO(x)	/* nothing */
131#endif /* MALLOC_STATS */
132	u_int32_t canary2;
133};
134#define DIR_INFO_RSZ	((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \
135			~MALLOC_PAGEMASK)
136
137/*
138 * This structure describes a page worth of chunks.
139 *
140 * How many bits per u_long in the bitmap
141 */
142#define MALLOC_BITS		(NBBY * sizeof(u_short))
143struct chunk_info {
144	LIST_ENTRY(chunk_info) entries;
145	void *page;			/* pointer to the page */
146	u_int32_t canary;
147	u_short size;			/* size of this page's chunks */
148	u_short shift;			/* how far to shift for this size */
149	u_short free;			/* how many free chunks */
150	u_short total;			/* how many chunk */
151					/* which chunks are free */
152	u_short bits[(MALLOC_PAGESIZE / MALLOC_MINSIZE) / MALLOC_BITS];
153};
154
155struct malloc_readonly {
156	struct dir_info *g_pool;	/* Main bookkeeping information */
157	int	malloc_abort;		/* abort() on error */
158	int	malloc_freeprot;	/* mprotect free pages PROT_NONE? */
159	int	malloc_hint;		/* call madvice on free pages?  */
160	int	malloc_junk;		/* junk fill? */
161	int	malloc_move;		/* move allocations to end of page? */
162	int	malloc_realloc;		/* always realloc? */
163	int	malloc_xmalloc;		/* xmalloc behaviour? */
164	int	malloc_zero;		/* zero fill? */
165	size_t	malloc_guard;		/* use guard pages after allocations? */
166	u_int	malloc_cache;		/* free pages we cache */
167#ifdef MALLOC_STATS
168	int	malloc_stats;		/* dump statistics at end */
169#endif
170	u_int32_t malloc_canary;	/* Matched against ones in g_pool */
171};
172
173/* This object is mapped PROT_READ after initialisation to prevent tampering */
174static union {
175	struct malloc_readonly mopts;
176	u_char _pad[MALLOC_PAGESIZE];
177} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)));
178#define mopts	malloc_readonly.mopts
179#define g_pool	mopts.g_pool
180
181char		*malloc_options;	/* compile-time options */
182
183static char	*malloc_func;		/* current function */
184static int	malloc_active;		/* status of malloc */
185
186static size_t	malloc_guarded;		/* bytes used for guards */
187static size_t	malloc_used;		/* bytes allocated */
188
189static size_t rnibblesused;		/* random nibbles used */
190static u_char rbytes[512];		/* random bytes */
191static u_char getrnibble(void);
192
193extern char	*__progname;
194
195/* low bits of r->p determine size: 0 means >= page size and p->size holding
196 *  real size, otherwise r->size is a shift count, or 1 for malloc(0)
197 */
198#define REALSIZE(sz, r) 					\
199	(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,		\
200	(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1))))
201
202static inline size_t
203hash(void *p)
204{
205	size_t sum;
206	union {
207		uintptr_t p;
208		unsigned short a[sizeof(void *) / sizeof(short)];
209	} u;
210	u.p = (uintptr_t)p >> MALLOC_PAGESHIFT;
211	sum = u.a[0];
212	sum = (sum << 7) - sum + u.a[1];
213#ifdef __LP64__
214	sum = (sum << 7) - sum + u.a[2];
215	sum = (sum << 7) - sum + u.a[3];
216#endif
217	return sum;
218}
219
220#ifdef MALLOC_STATS
221static void
222dump_chunk(int fd, struct chunk_info *p, int fromfreelist)
223{
224	char buf[64];
225
226	while (p != NULL) {
227		snprintf(buf, sizeof(buf), "chunk %d %d/%d %p\n", p->size,
228		    p->free, p->total, p->page);
229		write(fd, buf, strlen(buf));
230		if (!fromfreelist)
231			break;
232		p = LIST_NEXT(p, entries);
233		if (p != NULL) {
234			snprintf(buf, sizeof(buf), "    ");
235			write(fd, buf, strlen(buf));
236		}
237	}
238}
239
240static void
241dump_free_chunk_info(int fd, struct dir_info *d)
242{
243	char buf[64];
244	int i;
245
246	snprintf(buf, sizeof(buf), "Free chunk structs:\n");
247	write(fd, buf, strlen(buf));
248	for (i = 0; i < MALLOC_MAXSHIFT; i++) {
249		struct chunk_info *p = LIST_FIRST(&d->chunk_dir[i]);
250		if (p != NULL) {
251			snprintf(buf, sizeof(buf), "%2d) ", i);
252			write(fd, buf, strlen(buf));
253			dump_chunk(fd, p, 1);
254		}
255	}
256
257}
258
259static void
260dump_free_page_info(int fd, struct dir_info *d)
261{
262	char buf[64];
263	int i;
264
265	snprintf(buf, sizeof(buf), "Free pages cached: %zu\n",
266	    d->free_regions_size);
267	write(fd, buf, strlen(buf));
268	for (i = 0; i < mopts.malloc_cache; i++) {
269		if (d->free_regions[i].p != NULL) {
270			snprintf(buf, sizeof(buf), "%2d) ", i);
271			write(fd, buf, strlen(buf));
272			snprintf(buf, sizeof(buf), "free at %p: %zu\n",
273			    d->free_regions[i].p, d->free_regions[i].size);
274			write(fd, buf, strlen(buf));
275		}
276	}
277}
278
279static void
280malloc_dump1(int fd, struct dir_info *d)
281{
282	char buf[64];
283	size_t i, realsize;
284
285	snprintf(buf, sizeof(buf), "Malloc dir of %s at %p\n", __progname, d);
286	write(fd, buf, strlen(buf));
287	if (d == NULL)
288		return;
289	snprintf(buf, sizeof(buf), "Regions slots %zu\n", d->regions_total);
290	write(fd, buf, strlen(buf));
291	snprintf(buf, sizeof(buf), "Finds %zu/%zu %f\n", d->finds,
292	    d->find_collisions,
293	    1.0 + (double)d->find_collisions / d->finds);
294	write(fd, buf, strlen(buf));
295	snprintf(buf, sizeof(buf), "Inserts %zu/%zu %f\n", d->inserts,
296	    d->insert_collisions,
297	    1.0 + (double)d->insert_collisions / d->inserts);
298	write(fd, buf, strlen(buf));
299	snprintf(buf, sizeof(buf), "Deletes %zu/%zu\n", d->deletes,
300	     d->delete_moves);
301	write(fd, buf, strlen(buf));
302	snprintf(buf, sizeof(buf), "Cheap reallocs %zu/%zu\n",
303	    d->cheap_reallocs, d->cheap_realloc_tries);
304	write(fd, buf, strlen(buf));
305	snprintf(buf, sizeof(buf), "Regions slots free %zu\n", d->regions_free);
306	write(fd, buf, strlen(buf));
307	for (i = 0; i < d->regions_total; i++) {
308		if (d->r[i].p != NULL) {
309			size_t h = hash(d->r[i].p) &
310			    (d->regions_total - 1);
311			snprintf(buf, sizeof(buf), "%4zx) #%zx %zd ",
312			    i, h, h - i);
313			write(fd, buf, strlen(buf));
314			REALSIZE(realsize, &d->r[i]);
315			if (realsize > MALLOC_MAXCHUNK) {
316				snprintf(buf, sizeof(buf),
317				    "%p: %zu\n", d->r[i].p, realsize);
318				write(fd, buf, strlen(buf));
319			} else
320				dump_chunk(fd,
321				    (struct chunk_info *)d->r[i].size, 0);
322		}
323	}
324	dump_free_chunk_info(fd, d);
325	dump_free_page_info(fd, d);
326	snprintf(buf, sizeof(buf), "In use %zu\n", malloc_used);
327	write(fd, buf, strlen(buf));
328	snprintf(buf, sizeof(buf), "Guarded %zu\n", malloc_guarded);
329	write(fd, buf, strlen(buf));
330}
331
332
333void
334malloc_dump(int fd)
335{
336	malloc_dump1(fd, g_pool);
337}
338
339static void
340malloc_exit(void)
341{
342	static const char q[] = "malloc() warning: Couldn't dump stats\n";
343	int save_errno = errno, fd;
344
345	fd = open("malloc.out", O_RDWR|O_APPEND);
346	if (fd != -1) {
347		malloc_dump(fd);
348		close(fd);
349	} else
350		write(STDERR_FILENO, q, sizeof(q) - 1);
351	errno = save_errno;
352}
353#endif /* MALLOC_STATS */
354
355
356static void
357wrterror(char *msg, void *p)
358{
359	char		*q = " error: ";
360	struct iovec	iov[6];
361	char		buf[20];
362
363	iov[0].iov_base = __progname;
364	iov[0].iov_len = strlen(__progname);
365	iov[1].iov_base = malloc_func;
366	iov[1].iov_len = strlen(malloc_func);
367	iov[2].iov_base = q;
368	iov[2].iov_len = strlen(q);
369	iov[3].iov_base = msg;
370	iov[3].iov_len = strlen(msg);
371	iov[4].iov_base = buf;
372	if (p == NULL)
373		iov[4].iov_len = 0;
374	else {
375		snprintf(buf, sizeof(buf), " %p", p);
376		iov[4].iov_len = strlen(buf);
377	}
378	iov[5].iov_base = "\n";
379	iov[5].iov_len = 1;
380	writev(STDERR_FILENO, iov, 6);
381
382#ifdef MALLOC_STATS
383	if (mopts.malloc_stats)
384		malloc_dump(STDERR_FILENO);
385#endif /* MALLOC_STATS */
386	//malloc_active--;
387	if (mopts.malloc_abort)
388		abort();
389}
390
391static void
392rbytes_init(void)
393{
394	arc4random_buf(rbytes, sizeof(rbytes));
395	rnibblesused = 0;
396}
397
398static inline u_char
399getrnibble(void)
400{
401	u_char x;
402
403	if (rnibblesused >= 2 * sizeof(rbytes))
404		rbytes_init();
405	x = rbytes[rnibblesused++ / 2];
406	return (rnibblesused & 1 ? x & 0xf : x >> 4);
407}
408
409/*
410 * Cache maintenance. We keep at most malloc_cache pages cached.
411 * If the cache is becoming full, unmap pages in the cache for real,
412 * and then add the region to the cache
413 * Opposed to the regular region data structure, the sizes in the
414 * cache are in MALLOC_PAGESIZE units.
415 */
416static void
417unmap(struct dir_info *d, void *p, size_t sz)
418{
419	size_t psz = sz >> MALLOC_PAGESHIFT;
420	size_t rsz, tounmap;
421	struct region_info *r;
422	u_int i, offset;
423
424	if (sz != PAGEROUND(sz)) {
425		wrterror("munmap round", NULL);
426		return;
427	}
428
429	if (psz > mopts.malloc_cache) {
430		if (munmap(p, sz))
431			wrterror("munmap", p);
432		malloc_used -= sz;
433		return;
434	}
435	tounmap = 0;
436	rsz = mopts.malloc_cache - d->free_regions_size;
437	if (psz > rsz)
438		tounmap = psz - rsz;
439	offset = getrnibble();
440	for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) {
441		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
442		if (r->p != NULL) {
443			rsz = r->size << MALLOC_PAGESHIFT;
444			if (munmap(r->p, rsz))
445				wrterror("munmap", r->p);
446			r->p = NULL;
447			if (tounmap > r->size)
448				tounmap -= r->size;
449			else
450				tounmap = 0;
451			d->free_regions_size -= r->size;
452			r->size = 0;
453			malloc_used -= rsz;
454		}
455	}
456	if (tounmap > 0)
457		wrterror("malloc cache underflow", NULL);
458	for (i = 0; i < mopts.malloc_cache; i++) {
459		r = &d->free_regions[i];
460		if (r->p == NULL) {
461			if (mopts.malloc_hint)
462				madvise(p, sz, MADV_FREE);
463			if (mopts.malloc_freeprot)
464				mprotect(p, sz, PROT_NONE);
465			r->p = p;
466			r->size = psz;
467			d->free_regions_size += psz;
468			break;
469		}
470	}
471	if (i == mopts.malloc_cache)
472		wrterror("malloc free slot lost", NULL);
473	if (d->free_regions_size > mopts.malloc_cache)
474		wrterror("malloc cache overflow", NULL);
475}
476
477static void
478zapcacheregion(struct dir_info *d, void *p)
479{
480	u_int i;
481	struct region_info *r;
482	size_t rsz;
483
484	for (i = 0; i < mopts.malloc_cache; i++) {
485		r = &d->free_regions[i];
486		if (r->p == p) {
487			rsz = r->size << MALLOC_PAGESHIFT;
488			if (munmap(r->p, rsz))
489				wrterror("munmap", r->p);
490			r->p = NULL;
491			d->free_regions_size -= r->size;
492			r->size = 0;
493			malloc_used -= rsz;
494		}
495	}
496}
497
498static void *
499map(struct dir_info *d, size_t sz, int zero_fill)
500{
501	size_t psz = sz >> MALLOC_PAGESHIFT;
502	struct region_info *r, *big = NULL;
503	u_int i, offset;
504	void *p;
505
506	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
507	    d->canary1 != ~d->canary2)
508		wrterror("internal struct corrupt", NULL);
509	if (sz != PAGEROUND(sz)) {
510		wrterror("map round", NULL);
511		return NULL;
512	}
513	if (psz > d->free_regions_size) {
514		p = MMAP(sz);
515		if (p != MAP_FAILED)
516			malloc_used += sz;
517		/* zero fill not needed */
518		return p;
519	}
520	offset = getrnibble();
521	for (i = 0; i < mopts.malloc_cache; i++) {
522		r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)];
523		if (r->p != NULL) {
524			if (r->size == psz) {
525				p = r->p;
526				if (mopts.malloc_freeprot)
527					mprotect(p, sz, PROT_READ | PROT_WRITE);
528				if (mopts.malloc_hint)
529					madvise(p, sz, MADV_NORMAL);
530				r->p = NULL;
531				r->size = 0;
532				d->free_regions_size -= psz;
533				if (zero_fill)
534					memset(p, 0, sz);
535				else if (mopts.malloc_junk &&
536				    mopts.malloc_freeprot)
537					memset(p, SOME_FREEJUNK, sz);
538				return p;
539			} else if (r->size > psz)
540				big = r;
541		}
542	}
543	if (big != NULL) {
544		r = big;
545		p = (char *)r->p + ((r->size - psz) << MALLOC_PAGESHIFT);
546		if (mopts.malloc_freeprot)
547			mprotect(p, sz, PROT_READ | PROT_WRITE);
548		if (mopts.malloc_hint)
549			madvise(p, sz, MADV_NORMAL);
550		r->size -= psz;
551		d->free_regions_size -= psz;
552		if (zero_fill)
553			memset(p, 0, sz);
554		else if (mopts.malloc_junk && mopts.malloc_freeprot)
555			memset(p, SOME_FREEJUNK, sz);
556		return p;
557	}
558	p = MMAP(sz);
559	if (p != MAP_FAILED)
560		malloc_used += sz;
561	if (d->free_regions_size > mopts.malloc_cache)
562		wrterror("malloc cache", NULL);
563	/* zero fill not needed */
564	return p;
565}
566
567/*
568 * Initialize a dir_info, which should have been cleared by caller
569 */
570static int
571omalloc_init(struct dir_info **dp)
572{
573	char *p, b[64];
574	int i, j;
575	size_t d_avail, regioninfo_size;
576	struct dir_info *d;
577
578	rbytes_init();
579
580	/*
581	 * Default options
582	 */
583	mopts.malloc_abort = 1;
584	mopts.malloc_move = 1;
585	mopts.malloc_cache = 64;
586
587	for (i = 0; i < 3; i++) {
588		switch (i) {
589		case 0:
590			j = readlink("/etc/malloc.conf", b, sizeof b - 1);
591			if (j <= 0)
592				continue;
593			b[j] = '\0';
594			p = b;
595			break;
596		case 1:
597			if (issetugid() == 0)
598				p = getenv("MALLOC_OPTIONS");
599			else
600				continue;
601			break;
602		case 2:
603			p = malloc_options;
604			break;
605		default:
606			p = NULL;
607		}
608
609		for (; p != NULL && *p != '\0'; p++) {
610			switch (*p) {
611			case '>':
612				mopts.malloc_cache <<= 1;
613				if (mopts.malloc_cache > MALLOC_MAXCACHE)
614					mopts.malloc_cache = MALLOC_MAXCACHE;
615				break;
616			case '<':
617				mopts.malloc_cache >>= 1;
618				break;
619			case 'a':
620				mopts.malloc_abort = 0;
621				break;
622			case 'A':
623				mopts.malloc_abort = 1;
624				break;
625#ifdef MALLOC_STATS
626			case 'd':
627				mopts.malloc_stats = 0;
628				break;
629			case 'D':
630				mopts.malloc_stats = 1;
631				break;
632#endif /* MALLOC_STATS */
633			case 'f':
634				mopts.malloc_freeprot = 0;
635				break;
636			case 'F':
637				mopts.malloc_freeprot = 1;
638				break;
639			case 'g':
640				mopts.malloc_guard = 0;
641				break;
642			case 'G':
643				mopts.malloc_guard = MALLOC_PAGESIZE;
644				break;
645			case 'h':
646				mopts.malloc_hint = 0;
647				break;
648			case 'H':
649				mopts.malloc_hint = 1;
650				break;
651			case 'j':
652				mopts.malloc_junk = 0;
653				break;
654			case 'J':
655				mopts.malloc_junk = 1;
656				break;
657			case 'n':
658			case 'N':
659				break;
660			case 'p':
661				mopts.malloc_move = 0;
662				break;
663			case 'P':
664				mopts.malloc_move = 1;
665				break;
666			case 'r':
667				mopts.malloc_realloc = 0;
668				break;
669			case 'R':
670				mopts.malloc_realloc = 1;
671				break;
672			case 'S':
673				mopts.malloc_freeprot = mopts.malloc_junk = 1;
674				mopts.malloc_guard = MALLOC_PAGESIZE;
675				break;
676			case 'x':
677				mopts.malloc_xmalloc = 0;
678				break;
679			case 'X':
680				mopts.malloc_xmalloc = 1;
681				break;
682			case 'z':
683				mopts.malloc_zero = 0;
684				break;
685			case 'Z':
686				mopts.malloc_zero = 1;
687				break;
688			default: {
689				static const char q[] = "malloc() warning: "
690				    "unknown char in MALLOC_OPTIONS\n";
691				write(STDERR_FILENO, q, sizeof(q) - 1);
692				break;
693			}
694			}
695		}
696	}
697
698	/*
699	 * We want junk in the entire allocation, and zero only in the part
700	 * the user asked for.
701	 */
702	if (mopts.malloc_zero)
703		mopts.malloc_junk = 1;
704
705#ifdef MALLOC_STATS
706	if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) {
707		static const char q[] = "malloc() warning: atexit(2) failed."
708		    " Will not be able to dump stats on exit\n";
709		write(STDERR_FILENO, q, sizeof(q) - 1);
710	}
711#endif /* MALLOC_STATS */
712
713	while ((mopts.malloc_canary = arc4random()) == 0)
714		;
715
716	/*
717	 * Allocate dir_info with a guard page on either side. Also
718	 * randomise offset inside the page at which the dir_info
719	 * lies (subject to alignment by 1 << MALLOC_MINSHIFT)
720	 */
721	if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED)
722		return -1;
723	mprotect(p, MALLOC_PAGESIZE, PROT_NONE);
724	mprotect(p + MALLOC_PAGESIZE + DIR_INFO_RSZ,
725	    MALLOC_PAGESIZE, PROT_NONE);
726	d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT;
727	d = (struct dir_info *)(p + MALLOC_PAGESIZE +
728	    (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
729
730	d->regions_bits = 9;
731	d->regions_free = d->regions_total = 1 << d->regions_bits;
732	regioninfo_size = d->regions_total * sizeof(struct region_info);
733	d->r = MMAP(regioninfo_size);
734	if (d->r == MAP_FAILED) {
735		wrterror("malloc init mmap failed", NULL);
736		d->regions_total = 0;
737		return 1;
738	}
739	LIST_INIT(&d->chunk_info_list);
740	for (i = 0; i < MALLOC_MAXSHIFT; i++)
741		LIST_INIT(&d->chunk_dir[i]);
742	malloc_used += regioninfo_size;
743	d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
744	d->canary2 = ~d->canary1;
745
746	*dp = d;
747
748	/*
749	 * Options have been set and will never be reset.
750	 * Prevent further tampering with them.
751	 */
752	if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0)
753		mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ);
754
755	return 0;
756}
757
758static int
759omalloc_grow(struct dir_info *d)
760{
761	size_t newbits;
762	size_t newtotal;
763	size_t newsize;
764	size_t mask;
765	size_t i;
766	struct region_info *p;
767
768	if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 )
769		return 1;
770
771	newbits = d->regions_bits + 1;
772	newtotal = d->regions_total * 2;
773	newsize = newtotal * sizeof(struct region_info);
774	mask = newtotal - 1;
775
776	p = MMAP(newsize);
777	if (p == MAP_FAILED)
778		return 1;
779
780	malloc_used += newsize;
781	memset(p, 0, newsize);
782	STATS_ZERO(d->inserts);
783	STATS_ZERO(d->insert_collisions);
784	for (i = 0; i < d->regions_total; i++) {
785		void *q = d->r[i].p;
786		if (q != NULL) {
787			size_t index = hash(q) & mask;
788			STATS_INC(d->inserts);
789			while (p[index].p != NULL) {
790				index = (index - 1) & mask;
791				STATS_INC(d->insert_collisions);
792			}
793			p[index] = d->r[i];
794		}
795	}
796	/* avoid pages containing meta info to end up in cache */
797	if (munmap(d->r, d->regions_total * sizeof(struct region_info)))
798		wrterror("munmap", d->r);
799	else
800		malloc_used -= d->regions_total * sizeof(struct region_info);
801	d->regions_free = d->regions_free + d->regions_total;
802	d->regions_total = newtotal;
803	d->regions_bits = newbits;
804	d->r = p;
805	return 0;
806}
807
808static struct chunk_info *
809alloc_chunk_info(struct dir_info *d)
810{
811	struct chunk_info *p;
812	int i;
813
814	if (LIST_EMPTY(&d->chunk_info_list)) {
815		p = MMAP(MALLOC_PAGESIZE);
816		if (p == MAP_FAILED)
817			return NULL;
818		malloc_used += MALLOC_PAGESIZE;
819		for (i = 0; i < MALLOC_PAGESIZE / sizeof(*p); i++)
820			LIST_INSERT_HEAD(&d->chunk_info_list, &p[i], entries);
821	}
822	p = LIST_FIRST(&d->chunk_info_list);
823	LIST_REMOVE(p, entries);
824	memset(p, 0, sizeof *p);
825	p->canary = d->canary1;
826	return p;
827}
828
829static int
830insert(struct dir_info *d, void *p, size_t sz)
831{
832	size_t index;
833	size_t mask;
834	void *q;
835
836	if (d->regions_free * 4 < d->regions_total) {
837		if (omalloc_grow(d))
838			return 1;
839	}
840	mask = d->regions_total - 1;
841	index = hash(p) & mask;
842	q = d->r[index].p;
843	STATS_INC(d->inserts);
844	while (q != NULL) {
845		index = (index - 1) & mask;
846		q = d->r[index].p;
847		STATS_INC(d->insert_collisions);
848	}
849	d->r[index].p = p;
850	d->r[index].size = sz;
851	d->regions_free--;
852	return 0;
853}
854
855static struct region_info *
856find(struct dir_info *d, void *p)
857{
858	size_t index;
859	size_t mask = d->regions_total - 1;
860	void *q, *r;
861
862	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
863	    d->canary1 != ~d->canary2)
864		wrterror("internal struct corrupt", NULL);
865	p = MASK_POINTER(p);
866	index = hash(p) & mask;
867	r = d->r[index].p;
868	q = MASK_POINTER(r);
869	STATS_INC(d->finds);
870	while (q != p && r != NULL) {
871		index = (index - 1) & mask;
872		r = d->r[index].p;
873		q = MASK_POINTER(r);
874		STATS_INC(d->find_collisions);
875	}
876	return q == p ? &d->r[index] : NULL;
877}
878
879static void
880delete(struct dir_info *d, struct region_info *ri)
881{
882	/* algorithm R, Knuth Vol III section 6.4 */
883	size_t mask = d->regions_total - 1;
884	size_t i, j, r;
885
886	if (d->regions_total & (d->regions_total - 1))
887		wrterror("regions_total not 2^x", NULL);
888	d->regions_free++;
889	STATS_INC(g_pool->deletes);
890
891	i = ri - d->r;
892	for (;;) {
893		d->r[i].p = NULL;
894		d->r[i].size = 0;
895		j = i;
896		for (;;) {
897			i = (i - 1) & mask;
898			if (d->r[i].p == NULL)
899				return;
900			r = hash(d->r[i].p) & mask;
901			if ((i <= r && r < j) || (r < j && j < i) ||
902			    (j < i && i <= r))
903				continue;
904			d->r[j] = d->r[i];
905			STATS_INC(g_pool->delete_moves);
906			break;
907		}
908
909	}
910}
911
912/*
913 * Allocate a page of chunks
914 */
915static struct chunk_info *
916omalloc_make_chunks(struct dir_info *d, int bits)
917{
918	struct chunk_info *bp;
919	void		*pp;
920	long		i, k;
921
922	/* Allocate a new bucket */
923	pp = map(d, MALLOC_PAGESIZE, 0);
924	if (pp == MAP_FAILED)
925		return NULL;
926
927	bp = alloc_chunk_info(d);
928	if (bp == NULL) {
929		unmap(d, pp, MALLOC_PAGESIZE);
930		return NULL;
931	}
932
933	/* memory protect the page allocated in the malloc(0) case */
934	if (bits == 0) {
935		bp->size = 0;
936		bp->shift = 1;
937		i = MALLOC_MINSIZE - 1;
938		while (i >>= 1)
939			bp->shift++;
940		bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift;
941		bp->page = pp;
942
943		k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE);
944		if (k < 0) {
945			unmap(d, pp, MALLOC_PAGESIZE);
946			LIST_INSERT_HEAD(&d->chunk_info_list, bp, entries);
947			return NULL;
948		}
949	} else {
950		bp->size = 1U << bits;
951		bp->shift = bits;
952		bp->total = bp->free = MALLOC_PAGESIZE >> bits;
953		bp->page = pp;
954	}
955
956	/* set all valid bits in the bitmap */
957	k = bp->total;
958	i = 0;
959
960	/* Do a bunch at a time */
961	for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS)
962		bp->bits[i / MALLOC_BITS] = (u_short)~0U;
963
964	for (; i < k; i++)
965		bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS);
966
967	LIST_INSERT_HEAD(&d->chunk_dir[bits], bp, entries);
968
969	bits++;
970	if ((uintptr_t)pp & bits)
971		wrterror("pp & bits", pp);
972
973	insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp);
974	return bp;
975}
976
977
978/*
979 * Allocate a chunk
980 */
981static void *
982malloc_bytes(struct dir_info *d, size_t size)
983{
984	int		i, j;
985	size_t		k;
986	u_short		u, *lp;
987	struct chunk_info *bp;
988
989	if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
990	    d->canary1 != ~d->canary2)
991		wrterror("internal struct corrupt", NULL);
992	/* Don't bother with anything less than this */
993	/* unless we have a malloc(0) requests */
994	if (size != 0 && size < MALLOC_MINSIZE)
995		size = MALLOC_MINSIZE;
996
997	/* Find the right bucket */
998	if (size == 0)
999		j = 0;
1000	else {
1001		j = MALLOC_MINSHIFT;
1002		i = (size - 1) >> (MALLOC_MINSHIFT - 1);
1003		while (i >>= 1)
1004			j++;
1005	}
1006
1007	/* If it's empty, make a page more of that size chunks */
1008	if (LIST_EMPTY(&d->chunk_dir[j])) {
1009		bp = omalloc_make_chunks(d, j);
1010		if (bp == NULL)
1011			return NULL;
1012	} else
1013		bp = LIST_FIRST(&d->chunk_dir[j]);
1014
1015	if (bp->canary != d->canary1)
1016		wrterror("chunk info corrupted", NULL);
1017
1018	i = d->chunk_start;
1019	if (bp->free > 1)
1020		i += getrnibble();
1021	if (i >= bp->total)
1022		i &= bp->total - 1;
1023	for (;;) {
1024		for (;;) {
1025			lp = &bp->bits[i / MALLOC_BITS];
1026			if (!*lp) {
1027				i += MALLOC_BITS;
1028				i &= ~(MALLOC_BITS - 1);
1029				if (i >= bp->total)
1030					i = 0;
1031			} else
1032				break;
1033		}
1034		k = i % MALLOC_BITS;
1035		u = 1 << k;
1036		if (*lp & u)
1037			break;
1038		if (++i >= bp->total)
1039			i = 0;
1040	}
1041	d->chunk_start += i + 1;
1042
1043	*lp ^= u;
1044
1045	/* If there are no more free, remove from free-list */
1046	if (!--bp->free)
1047		LIST_REMOVE(bp, entries);
1048
1049	/* Adjust to the real offset of that chunk */
1050	k += (lp - bp->bits) * MALLOC_BITS;
1051	k <<= bp->shift;
1052
1053	if (mopts.malloc_junk && bp->size > 0)
1054		memset((char *)bp->page + k, SOME_JUNK, bp->size);
1055	return ((char *)bp->page + k);
1056}
1057
1058
1059/*
1060 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1061 */
1062static void
1063free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1064{
1065	struct chunk_head *mp;
1066	struct chunk_info *info;
1067	long i;
1068
1069	info = (struct chunk_info *)r->size;
1070	if (info->canary != d->canary1)
1071		wrterror("chunk info corrupted", NULL);
1072
1073	/* Find the chunk number on the page */
1074	i = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift;
1075
1076	if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) {
1077		wrterror("modified chunk-pointer", ptr);
1078		return;
1079	}
1080	if (info->bits[i / MALLOC_BITS] & (1U << (i % MALLOC_BITS))) {
1081		wrterror("chunk is already free", ptr);
1082		return;
1083	}
1084
1085	info->bits[i / MALLOC_BITS] |= 1U << (i % MALLOC_BITS);
1086	info->free++;
1087
1088	if (info->size != 0)
1089		mp = d->chunk_dir + info->shift;
1090	else
1091		mp = d->chunk_dir;
1092
1093	if (info->free == 1) {
1094		/* Page became non-full */
1095		LIST_INSERT_HEAD(mp, info, entries);
1096		return;
1097	}
1098	if (info->free != info->total)
1099		return;
1100
1101	LIST_REMOVE(info, entries);
1102
1103	if (info->size == 0 && !mopts.malloc_freeprot)
1104		mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1105	unmap(d, info->page, MALLOC_PAGESIZE);
1106
1107	delete(d, r);
1108	LIST_INSERT_HEAD(&d->chunk_info_list, info, entries);
1109}
1110
1111
1112
1113static void *
1114omalloc(size_t sz, int zero_fill)
1115{
1116	void *p;
1117	size_t psz;
1118
1119	if (sz > MALLOC_MAXCHUNK) {
1120		if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1121			errno = ENOMEM;
1122			return NULL;
1123		}
1124		sz += mopts.malloc_guard;
1125		psz = PAGEROUND(sz);
1126		p = map(g_pool, psz, zero_fill);
1127		if (p == MAP_FAILED) {
1128			errno = ENOMEM;
1129			return NULL;
1130		}
1131		if (insert(g_pool, p, sz)) {
1132			unmap(g_pool, p, psz);
1133			errno = ENOMEM;
1134			return NULL;
1135		}
1136		if (mopts.malloc_guard) {
1137			if (mprotect((char *)p + psz - mopts.malloc_guard,
1138			    mopts.malloc_guard, PROT_NONE))
1139				wrterror("mprotect", NULL);
1140			malloc_guarded += mopts.malloc_guard;
1141		}
1142
1143		if (mopts.malloc_move &&
1144		    sz - mopts.malloc_guard < MALLOC_PAGESIZE -
1145		    MALLOC_LEEWAY) {
1146			/* fill whole allocation */
1147			if (mopts.malloc_junk)
1148				memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1149			/* shift towards the end */
1150			p = ((char *)p) + ((MALLOC_PAGESIZE - MALLOC_LEEWAY -
1151			    (sz - mopts.malloc_guard)) & ~(MALLOC_MINSIZE-1));
1152			/* fill zeros if needed and overwritten above */
1153			if (zero_fill && mopts.malloc_junk)
1154				memset(p, 0, sz - mopts.malloc_guard);
1155		} else {
1156			if (mopts.malloc_junk) {
1157				if (zero_fill)
1158					memset((char *)p + sz - mopts.malloc_guard,
1159					    SOME_JUNK, psz - sz);
1160				else
1161					memset(p, SOME_JUNK,
1162					    psz - mopts.malloc_guard);
1163			}
1164		}
1165
1166	} else {
1167		/* takes care of SOME_JUNK */
1168		p = malloc_bytes(g_pool, sz);
1169		if (zero_fill && p != NULL && sz > 0)
1170			memset(p, 0, sz);
1171	}
1172
1173	return p;
1174}
1175
1176/*
1177 * Common function for handling recursion.  Only
1178 * print the error message once, to avoid making the problem
1179 * potentially worse.
1180 */
1181static void
1182malloc_recurse(void)
1183{
1184	static int noprint;
1185
1186	if (noprint == 0) {
1187		noprint = 1;
1188		wrterror("recursive call", NULL);
1189	}
1190	malloc_active--;
1191	_MALLOC_UNLOCK();
1192	errno = EDEADLK;
1193}
1194
1195static int
1196malloc_init(void)
1197{
1198	if (omalloc_init(&g_pool)) {
1199		_MALLOC_UNLOCK();
1200		if (mopts.malloc_xmalloc)
1201			wrterror("out of memory", NULL);
1202		errno = ENOMEM;
1203		return -1;
1204	}
1205	return 0;
1206}
1207
1208void *
1209malloc(size_t size)
1210{
1211	void *r;
1212	int saved_errno = errno;
1213
1214	_MALLOC_LOCK();
1215	malloc_func = " in malloc():";
1216	if (g_pool == NULL) {
1217		if (malloc_init() != 0)
1218			return NULL;
1219	}
1220	if (malloc_active++) {
1221		malloc_recurse();
1222		return NULL;
1223	}
1224	r = omalloc(size, mopts.malloc_zero);
1225	malloc_active--;
1226	_MALLOC_UNLOCK();
1227	if (r == NULL && mopts.malloc_xmalloc) {
1228		wrterror("out of memory", NULL);
1229		errno = ENOMEM;
1230	}
1231	if (r != NULL)
1232		errno = saved_errno;
1233	return r;
1234}
1235
1236static void
1237ofree(void *p)
1238{
1239	struct region_info *r;
1240	size_t sz;
1241
1242	r = find(g_pool, p);
1243	if (r == NULL) {
1244		wrterror("bogus pointer (double free?)", p);
1245		return;
1246	}
1247	REALSIZE(sz, r);
1248	if (sz > MALLOC_MAXCHUNK) {
1249		if (sz - mopts.malloc_guard >= MALLOC_PAGESIZE -
1250		    MALLOC_LEEWAY) {
1251			if (r->p != p) {
1252				wrterror("bogus pointer", p);
1253				return;
1254			}
1255		} else {
1256#if notyetbecause_of_realloc
1257			/* shifted towards the end */
1258			if (p != ((char *)r->p) + ((MALLOC_PAGESIZE -
1259			    MALLOC_MINSIZE - sz - mopts.malloc_guard) &
1260			    ~(MALLOC_MINSIZE-1))) {
1261			}
1262#endif
1263			p = r->p;
1264		}
1265		if (mopts.malloc_guard) {
1266			if (sz < mopts.malloc_guard)
1267				wrterror("guard size", NULL);
1268			if (!mopts.malloc_freeprot) {
1269				if (mprotect((char *)p + PAGEROUND(sz) -
1270				    mopts.malloc_guard, mopts.malloc_guard,
1271				    PROT_READ | PROT_WRITE))
1272					wrterror("mprotect", NULL);
1273			}
1274			malloc_guarded -= mopts.malloc_guard;
1275		}
1276		if (mopts.malloc_junk && !mopts.malloc_freeprot)
1277			memset(p, SOME_FREEJUNK,
1278			    PAGEROUND(sz) - mopts.malloc_guard);
1279		unmap(g_pool, p, PAGEROUND(sz));
1280		delete(g_pool, r);
1281	} else {
1282		void *tmp;
1283		int i;
1284
1285		if (mopts.malloc_junk && sz > 0)
1286			memset(p, SOME_FREEJUNK, sz);
1287		if (!mopts.malloc_freeprot) {
1288			i = getrnibble();
1289			tmp = p;
1290			p = g_pool->delayed_chunks[i];
1291			g_pool->delayed_chunks[i] = tmp;
1292		}
1293		if (p != NULL) {
1294			r = find(g_pool, p);
1295			if (r == NULL) {
1296				wrterror("bogus pointer (double free?)", p);
1297				return;
1298			}
1299			free_bytes(g_pool, r, p);
1300		}
1301	}
1302}
1303
1304void
1305free(void *ptr)
1306{
1307	int saved_errno = errno;
1308
1309	/* This is legal. */
1310	if (ptr == NULL)
1311		return;
1312
1313	_MALLOC_LOCK();
1314	malloc_func = " in free():";
1315	if (g_pool == NULL) {
1316		_MALLOC_UNLOCK();
1317		wrterror("free() called before allocation", NULL);
1318		return;
1319	}
1320	if (malloc_active++) {
1321		malloc_recurse();
1322		return;
1323	}
1324	ofree(ptr);
1325	malloc_active--;
1326	_MALLOC_UNLOCK();
1327	errno = saved_errno;
1328}
1329
1330
1331static void *
1332orealloc(void *p, size_t newsz)
1333{
1334	struct region_info *r;
1335	size_t oldsz, goldsz, gnewsz;
1336	void *q;
1337
1338	if (p == NULL)
1339		return omalloc(newsz, 0);
1340
1341	r = find(g_pool, p);
1342	if (r == NULL) {
1343		wrterror("bogus pointer (double free?)", p);
1344		return NULL;
1345	}
1346	if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1347		errno = ENOMEM;
1348		return NULL;
1349	}
1350
1351	REALSIZE(oldsz, r);
1352	goldsz = oldsz;
1353	if (oldsz > MALLOC_MAXCHUNK) {
1354		if (oldsz < mopts.malloc_guard)
1355			wrterror("guard size", NULL);
1356		oldsz -= mopts.malloc_guard;
1357	}
1358
1359	gnewsz = newsz;
1360	if (gnewsz > MALLOC_MAXCHUNK)
1361		gnewsz += mopts.malloc_guard;
1362
1363	if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && p == r->p &&
1364	    !mopts.malloc_realloc) {
1365		size_t roldsz = PAGEROUND(goldsz);
1366		size_t rnewsz = PAGEROUND(gnewsz);
1367
1368		if (rnewsz > roldsz) {
1369			if (!mopts.malloc_guard) {
1370				STATS_INC(g_pool->cheap_realloc_tries);
1371				zapcacheregion(g_pool, (char *)p + roldsz);
1372				q = MMAPA((char *)p + roldsz, rnewsz - roldsz);
1373				if (q == (char *)p + roldsz) {
1374					malloc_used += rnewsz - roldsz;
1375					if (mopts.malloc_junk)
1376						memset(q, SOME_JUNK,
1377						    rnewsz - roldsz);
1378					r->size = newsz;
1379					STATS_INC(g_pool->cheap_reallocs);
1380					return p;
1381				} else if (q != MAP_FAILED)
1382					munmap(q, rnewsz - roldsz);
1383			}
1384		} else if (rnewsz < roldsz) {
1385			if (mopts.malloc_guard) {
1386				if (mprotect((char *)p + roldsz -
1387				    mopts.malloc_guard, mopts.malloc_guard,
1388				    PROT_READ | PROT_WRITE))
1389					wrterror("mprotect", NULL);
1390				if (mprotect((char *)p + rnewsz -
1391				    mopts.malloc_guard, mopts.malloc_guard,
1392				    PROT_NONE))
1393					wrterror("mprotect", NULL);
1394			}
1395			unmap(g_pool, (char *)p + rnewsz, roldsz - rnewsz);
1396			r->size = gnewsz;
1397			return p;
1398		} else {
1399			if (newsz > oldsz && mopts.malloc_junk)
1400				memset((char *)p + newsz, SOME_JUNK,
1401				    rnewsz - mopts.malloc_guard - newsz);
1402			r->size = gnewsz;
1403			return p;
1404		}
1405	}
1406	if (newsz <= oldsz && newsz > oldsz / 2 && !mopts.malloc_realloc) {
1407		if (mopts.malloc_junk && newsz > 0)
1408			memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1409		return p;
1410	} else if (newsz != oldsz || mopts.malloc_realloc) {
1411		q = omalloc(newsz, 0);
1412		if (q == NULL)
1413			return NULL;
1414		if (newsz != 0 && oldsz != 0)
1415			memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1416		ofree(p);
1417		return q;
1418	} else
1419		return p;
1420}
1421
1422void *
1423realloc(void *ptr, size_t size)
1424{
1425	void *r;
1426	int saved_errno = errno;
1427
1428	_MALLOC_LOCK();
1429	malloc_func = " in realloc():";
1430	if (g_pool == NULL) {
1431		if (malloc_init() != 0)
1432			return NULL;
1433	}
1434	if (malloc_active++) {
1435		malloc_recurse();
1436		return NULL;
1437	}
1438	r = orealloc(ptr, size);
1439
1440	malloc_active--;
1441	_MALLOC_UNLOCK();
1442	if (r == NULL && mopts.malloc_xmalloc) {
1443		wrterror("out of memory", NULL);
1444		errno = ENOMEM;
1445	}
1446	if (r != NULL)
1447		errno = saved_errno;
1448	return r;
1449}
1450
1451
1452#define MUL_NO_OVERFLOW	(1UL << (sizeof(size_t) * 4))
1453
1454void *
1455calloc(size_t nmemb, size_t size)
1456{
1457	void *r;
1458	int saved_errno = errno;
1459
1460	_MALLOC_LOCK();
1461	malloc_func = " in calloc():";
1462	if (g_pool == NULL) {
1463		if (malloc_init() != 0)
1464			return NULL;
1465	}
1466	if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1467	    nmemb > 0 && SIZE_MAX / nmemb < size) {
1468		_MALLOC_UNLOCK();
1469		if (mopts.malloc_xmalloc)
1470			wrterror("out of memory", NULL);
1471		errno = ENOMEM;
1472		return NULL;
1473	}
1474
1475	if (malloc_active++) {
1476		malloc_recurse();
1477		return NULL;
1478	}
1479
1480	size *= nmemb;
1481	r = omalloc(size, 1);
1482
1483	malloc_active--;
1484	_MALLOC_UNLOCK();
1485	if (r == NULL && mopts.malloc_xmalloc) {
1486		wrterror("out of memory", NULL);
1487		errno = ENOMEM;
1488	}
1489	if (r != NULL)
1490		errno = saved_errno;
1491	return r;
1492}
1493
1494int
1495posix_memalign(void **memptr, size_t alignment, size_t size)
1496{
1497	void *result;
1498
1499	/* Make sure that alignment is a large enough power of 2. */
1500	if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1501	    alignment > MALLOC_PAGESIZE)
1502		return EINVAL;
1503
1504	/*
1505	 * max(size, alignment) is enough to assure the requested alignment,
1506	 * since the allocator always allocates power-of-two blocks.
1507	 */
1508	if (size < alignment)
1509		size = alignment;
1510	result = malloc(size);
1511
1512	if (result == NULL)
1513		return ENOMEM;
1514
1515	*memptr = result;
1516	return 0;
1517}
1518
1519