malloc.c revision 34192
134192Sjdp/*- 234192Sjdp * Copyright (c) 1983 Regents of the University of California. 334192Sjdp * All rights reserved. 434192Sjdp * 534192Sjdp * Redistribution and use in source and binary forms, with or without 634192Sjdp * modification, are permitted provided that the following conditions 734192Sjdp * are met: 834192Sjdp * 1. Redistributions of source code must retain the above copyright 934192Sjdp * notice, this list of conditions and the following disclaimer. 1034192Sjdp * 2. Redistributions in binary form must reproduce the above copyright 1134192Sjdp * notice, this list of conditions and the following disclaimer in the 1234192Sjdp * documentation and/or other materials provided with the distribution. 1334192Sjdp * 3. All advertising materials mentioning features or use of this software 1434192Sjdp * must display the following acknowledgement: 1534192Sjdp * This product includes software developed by the University of 1634192Sjdp * California, Berkeley and its contributors. 1734192Sjdp * 4. Neither the name of the University nor the names of its contributors 1834192Sjdp * may be used to endorse or promote products derived from this software 1934192Sjdp * without specific prior written permission. 2034192Sjdp * 2134192Sjdp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2234192Sjdp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2334192Sjdp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2434192Sjdp * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2534192Sjdp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2634192Sjdp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2734192Sjdp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2834192Sjdp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2934192Sjdp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3034192Sjdp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3134192Sjdp * SUCH DAMAGE. 3234192Sjdp */ 3334192Sjdp 3434192Sjdp#if defined(LIBC_SCCS) && !defined(lint) 3534192Sjdp/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ 3634192Sjdpstatic char *rcsid = "$Id: malloc.c,v 1.2 1996/01/19 18:36:54 jdp Exp $"; 3734192Sjdp#endif /* LIBC_SCCS and not lint */ 3834192Sjdp 3934192Sjdp/* 4034192Sjdp * malloc.c (Caltech) 2/21/82 4134192Sjdp * Chris Kingsley, kingsley@cit-20. 4234192Sjdp * 4334192Sjdp * This is a very fast storage allocator. It allocates blocks of a small 4434192Sjdp * number of different sizes, and keeps free lists of each size. Blocks that 4534192Sjdp * don't exactly fit are passed up to the next larger size. In this 4634192Sjdp * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. 4734192Sjdp * This is designed for use in a virtual memory environment. 4834192Sjdp */ 4934192Sjdp 5034192Sjdp#include <sys/types.h> 5134192Sjdp#include <err.h> 5234192Sjdp#include <stdlib.h> 5334192Sjdp#include <string.h> 5434192Sjdp#include <unistd.h> 5534192Sjdp#include <sys/param.h> 5634192Sjdp#include <sys/mman.h> 5734192Sjdp#ifndef BSD 5834192Sjdp#define MAP_COPY MAP_PRIVATE 5934192Sjdp#define MAP_FILE 0 6034192Sjdp#define MAP_ANON 0 6134192Sjdp#endif 6234192Sjdp 6334192Sjdp#ifndef BSD /* Need do better than this */ 6434192Sjdp#define NEED_DEV_ZERO 1 6534192Sjdp#endif 6634192Sjdp 6734192Sjdp#define NULL 0 6834192Sjdp 6934192Sjdpstatic void morecore(); 7034192Sjdpstatic int findbucket(); 7134192Sjdp 7234192Sjdp/* 7334192Sjdp * Pre-allocate mmap'ed pages 7434192Sjdp */ 7534192Sjdp#define NPOOLPAGES (32*1024/pagesz) 7634192Sjdpstatic caddr_t pagepool_start, pagepool_end; 7734192Sjdpstatic int morepages(); 7834192Sjdp 7934192Sjdp/* 8034192Sjdp * The overhead on a block is at least 4 bytes. When free, this space 8134192Sjdp * contains a pointer to the next free block, and the bottom two bits must 8234192Sjdp * be zero. When in use, the first byte is set to MAGIC, and the second 8334192Sjdp * byte is the size index. The remaining bytes are for alignment. 8434192Sjdp * If range checking is enabled then a second word holds the size of the 8534192Sjdp * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). 8634192Sjdp * The order of elements is critical: ov_magic must overlay the low order 8734192Sjdp * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. 8834192Sjdp */ 8934192Sjdpunion overhead { 9034192Sjdp union overhead *ov_next; /* when free */ 9134192Sjdp struct { 9234192Sjdp u_char ovu_magic; /* magic number */ 9334192Sjdp u_char ovu_index; /* bucket # */ 9434192Sjdp#ifdef RCHECK 9534192Sjdp u_short ovu_rmagic; /* range magic number */ 9634192Sjdp u_int ovu_size; /* actual block size */ 9734192Sjdp#endif 9834192Sjdp } ovu; 9934192Sjdp#define ov_magic ovu.ovu_magic 10034192Sjdp#define ov_index ovu.ovu_index 10134192Sjdp#define ov_rmagic ovu.ovu_rmagic 10234192Sjdp#define ov_size ovu.ovu_size 10334192Sjdp}; 10434192Sjdp 10534192Sjdp#define MAGIC 0xef /* magic # on accounting info */ 10634192Sjdp#define RMAGIC 0x5555 /* magic # on range info */ 10734192Sjdp 10834192Sjdp#ifdef RCHECK 10934192Sjdp#define RSLOP sizeof (u_short) 11034192Sjdp#else 11134192Sjdp#define RSLOP 0 11234192Sjdp#endif 11334192Sjdp 11434192Sjdp/* 11534192Sjdp * nextf[i] is the pointer to the next free block of size 2^(i+3). The 11634192Sjdp * smallest allocatable block is 8 bytes. The overhead information 11734192Sjdp * precedes the data area returned to the user. 11834192Sjdp */ 11934192Sjdp#define NBUCKETS 30 12034192Sjdpstatic union overhead *nextf[NBUCKETS]; 12134192Sjdpextern char *sbrk(); 12234192Sjdp 12334192Sjdpstatic int pagesz; /* page size */ 12434192Sjdpstatic int pagebucket; /* page size bucket */ 12534192Sjdp 12634192Sjdp#ifdef MSTATS 12734192Sjdp/* 12834192Sjdp * nmalloc[i] is the difference between the number of mallocs and frees 12934192Sjdp * for a given block size. 13034192Sjdp */ 13134192Sjdpstatic u_int nmalloc[NBUCKETS]; 13234192Sjdp#include <stdio.h> 13334192Sjdp#endif 13434192Sjdp 13534192Sjdp#if defined(MALLOC_DEBUG) || defined(RCHECK) 13634192Sjdp#define ASSERT(p) if (!(p)) botch("p") 13734192Sjdp#include <stdio.h> 13834192Sjdpstatic void 13934192Sjdpbotch(s) 14034192Sjdp char *s; 14134192Sjdp{ 14234192Sjdp fprintf(stderr, "\r\nassertion botched: %s\r\n", s); 14334192Sjdp (void) fflush(stderr); /* just in case user buffered it */ 14434192Sjdp abort(); 14534192Sjdp} 14634192Sjdp#else 14734192Sjdp#define ASSERT(p) 14834192Sjdp#endif 14934192Sjdp 15034192Sjdp/* Debugging stuff */ 15134192Sjdpextern void xprintf(const char *, ...); 15234192Sjdp#define TRACE() xprintf("TRACE %s:%d\n", __FILE__, __LINE__) 15334192Sjdp 15434192Sjdpvoid * 15534192Sjdpmalloc(nbytes) 15634192Sjdp size_t nbytes; 15734192Sjdp{ 15834192Sjdp register union overhead *op; 15934192Sjdp register int bucket, n; 16034192Sjdp register unsigned amt; 16134192Sjdp 16234192Sjdp /* 16334192Sjdp * First time malloc is called, setup page size and 16434192Sjdp * align break pointer so all data will be page aligned. 16534192Sjdp */ 16634192Sjdp if (pagesz == 0) { 16734192Sjdp pagesz = n = getpagesize(); 16834192Sjdp if (morepages(NPOOLPAGES) == 0) 16934192Sjdp return NULL; 17034192Sjdp op = (union overhead *)(pagepool_start); 17134192Sjdp n = n - sizeof (*op) - ((int)op & (n - 1)); 17234192Sjdp if (n < 0) 17334192Sjdp n += pagesz; 17434192Sjdp if (n) { 17534192Sjdp pagepool_start += n; 17634192Sjdp } 17734192Sjdp bucket = 0; 17834192Sjdp amt = 8; 17934192Sjdp while (pagesz > amt) { 18034192Sjdp amt <<= 1; 18134192Sjdp bucket++; 18234192Sjdp } 18334192Sjdp pagebucket = bucket; 18434192Sjdp } 18534192Sjdp /* 18634192Sjdp * Convert amount of memory requested into closest block size 18734192Sjdp * stored in hash buckets which satisfies request. 18834192Sjdp * Account for space used per block for accounting. 18934192Sjdp */ 19034192Sjdp if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) { 19134192Sjdp#ifndef RCHECK 19234192Sjdp amt = 8; /* size of first bucket */ 19334192Sjdp bucket = 0; 19434192Sjdp#else 19534192Sjdp amt = 16; /* size of first bucket */ 19634192Sjdp bucket = 1; 19734192Sjdp#endif 19834192Sjdp n = -(sizeof (*op) + RSLOP); 19934192Sjdp } else { 20034192Sjdp amt = pagesz; 20134192Sjdp bucket = pagebucket; 20234192Sjdp } 20334192Sjdp while (nbytes > amt + n) { 20434192Sjdp amt <<= 1; 20534192Sjdp if (amt == 0) 20634192Sjdp return (NULL); 20734192Sjdp bucket++; 20834192Sjdp } 20934192Sjdp /* 21034192Sjdp * If nothing in hash bucket right now, 21134192Sjdp * request more memory from the system. 21234192Sjdp */ 21334192Sjdp if ((op = nextf[bucket]) == NULL) { 21434192Sjdp morecore(bucket); 21534192Sjdp if ((op = nextf[bucket]) == NULL) 21634192Sjdp return (NULL); 21734192Sjdp } 21834192Sjdp /* remove from linked list */ 21934192Sjdp nextf[bucket] = op->ov_next; 22034192Sjdp op->ov_magic = MAGIC; 22134192Sjdp op->ov_index = bucket; 22234192Sjdp#ifdef MSTATS 22334192Sjdp nmalloc[bucket]++; 22434192Sjdp#endif 22534192Sjdp#ifdef RCHECK 22634192Sjdp /* 22734192Sjdp * Record allocated size of block and 22834192Sjdp * bound space with magic numbers. 22934192Sjdp */ 23034192Sjdp op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 23134192Sjdp op->ov_rmagic = RMAGIC; 23234192Sjdp *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 23334192Sjdp#endif 23434192Sjdp return ((char *)(op + 1)); 23534192Sjdp} 23634192Sjdp 23734192Sjdp/* 23834192Sjdp * Allocate more memory to the indicated bucket. 23934192Sjdp */ 24034192Sjdpstatic void 24134192Sjdpmorecore(bucket) 24234192Sjdp int bucket; 24334192Sjdp{ 24434192Sjdp register union overhead *op; 24534192Sjdp register int sz; /* size of desired block */ 24634192Sjdp int amt; /* amount to allocate */ 24734192Sjdp int nblks; /* how many blocks we get */ 24834192Sjdp 24934192Sjdp /* 25034192Sjdp * sbrk_size <= 0 only for big, FLUFFY, requests (about 25134192Sjdp * 2^30 bytes on a VAX, I think) or for a negative arg. 25234192Sjdp */ 25334192Sjdp sz = 1 << (bucket + 3); 25434192Sjdp#ifdef MALLOC_DEBUG 25534192Sjdp ASSERT(sz > 0); 25634192Sjdp#else 25734192Sjdp if (sz <= 0) 25834192Sjdp return; 25934192Sjdp#endif 26034192Sjdp if (sz < pagesz) { 26134192Sjdp amt = pagesz; 26234192Sjdp nblks = amt / sz; 26334192Sjdp } else { 26434192Sjdp amt = sz + pagesz; 26534192Sjdp nblks = 1; 26634192Sjdp } 26734192Sjdp if (amt > pagepool_end - pagepool_start) 26834192Sjdp if (morepages(amt/pagesz + NPOOLPAGES) == 0) 26934192Sjdp return; 27034192Sjdp op = (union overhead *)pagepool_start; 27134192Sjdp pagepool_start += amt; 27234192Sjdp 27334192Sjdp /* 27434192Sjdp * Add new memory allocated to that on 27534192Sjdp * free list for this hash bucket. 27634192Sjdp */ 27734192Sjdp nextf[bucket] = op; 27834192Sjdp while (--nblks > 0) { 27934192Sjdp op->ov_next = (union overhead *)((caddr_t)op + sz); 28034192Sjdp op = (union overhead *)((caddr_t)op + sz); 28134192Sjdp } 28234192Sjdp} 28334192Sjdp 28434192Sjdpvoid 28534192Sjdpfree(cp) 28634192Sjdp void *cp; 28734192Sjdp{ 28834192Sjdp register int size; 28934192Sjdp register union overhead *op; 29034192Sjdp 29134192Sjdp if (cp == NULL) 29234192Sjdp return; 29334192Sjdp op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 29434192Sjdp#ifdef MALLOC_DEBUG 29534192Sjdp ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ 29634192Sjdp#else 29734192Sjdp if (op->ov_magic != MAGIC) 29834192Sjdp return; /* sanity */ 29934192Sjdp#endif 30034192Sjdp#ifdef RCHECK 30134192Sjdp ASSERT(op->ov_rmagic == RMAGIC); 30234192Sjdp ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); 30334192Sjdp#endif 30434192Sjdp size = op->ov_index; 30534192Sjdp ASSERT(size < NBUCKETS); 30634192Sjdp op->ov_next = nextf[size]; /* also clobbers ov_magic */ 30734192Sjdp nextf[size] = op; 30834192Sjdp#ifdef MSTATS 30934192Sjdp nmalloc[size]--; 31034192Sjdp#endif 31134192Sjdp} 31234192Sjdp 31334192Sjdp/* 31434192Sjdp * When a program attempts "storage compaction" as mentioned in the 31534192Sjdp * old malloc man page, it realloc's an already freed block. Usually 31634192Sjdp * this is the last block it freed; occasionally it might be farther 31734192Sjdp * back. We have to search all the free lists for the block in order 31834192Sjdp * to determine its bucket: 1st we make one pass thru the lists 31934192Sjdp * checking only the first block in each; if that fails we search 32034192Sjdp * ``realloc_srchlen'' blocks in each list for a match (the variable 32134192Sjdp * is extern so the caller can modify it). If that fails we just copy 32234192Sjdp * however many bytes was given to realloc() and hope it's not huge. 32334192Sjdp */ 32434192Sjdpint realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ 32534192Sjdp 32634192Sjdpvoid * 32734192Sjdprealloc(cp, nbytes) 32834192Sjdp void *cp; 32934192Sjdp size_t nbytes; 33034192Sjdp{ 33134192Sjdp register u_int onb; 33234192Sjdp register int i; 33334192Sjdp union overhead *op; 33434192Sjdp char *res; 33534192Sjdp int was_alloced = 0; 33634192Sjdp 33734192Sjdp if (cp == NULL) 33834192Sjdp return (malloc(nbytes)); 33934192Sjdp op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); 34034192Sjdp if (op->ov_magic == MAGIC) { 34134192Sjdp was_alloced++; 34234192Sjdp i = op->ov_index; 34334192Sjdp } else { 34434192Sjdp /* 34534192Sjdp * Already free, doing "compaction". 34634192Sjdp * 34734192Sjdp * Search for the old block of memory on the 34834192Sjdp * free list. First, check the most common 34934192Sjdp * case (last element free'd), then (this failing) 35034192Sjdp * the last ``realloc_srchlen'' items free'd. 35134192Sjdp * If all lookups fail, then assume the size of 35234192Sjdp * the memory block being realloc'd is the 35334192Sjdp * largest possible (so that all "nbytes" of new 35434192Sjdp * memory are copied into). Note that this could cause 35534192Sjdp * a memory fault if the old area was tiny, and the moon 35634192Sjdp * is gibbous. However, that is very unlikely. 35734192Sjdp */ 35834192Sjdp if ((i = findbucket(op, 1)) < 0 && 35934192Sjdp (i = findbucket(op, realloc_srchlen)) < 0) 36034192Sjdp i = NBUCKETS; 36134192Sjdp } 36234192Sjdp onb = 1 << (i + 3); 36334192Sjdp if (onb < pagesz) 36434192Sjdp onb -= sizeof (*op) + RSLOP; 36534192Sjdp else 36634192Sjdp onb += pagesz - sizeof (*op) - RSLOP; 36734192Sjdp /* avoid the copy if same size block */ 36834192Sjdp if (was_alloced) { 36934192Sjdp if (i) { 37034192Sjdp i = 1 << (i + 2); 37134192Sjdp if (i < pagesz) 37234192Sjdp i -= sizeof (*op) + RSLOP; 37334192Sjdp else 37434192Sjdp i += pagesz - sizeof (*op) - RSLOP; 37534192Sjdp } 37634192Sjdp if (nbytes <= onb && nbytes > i) { 37734192Sjdp#ifdef RCHECK 37834192Sjdp op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); 37934192Sjdp *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; 38034192Sjdp#endif 38134192Sjdp return(cp); 38234192Sjdp } else 38334192Sjdp free(cp); 38434192Sjdp } 38534192Sjdp if ((res = malloc(nbytes)) == NULL) 38634192Sjdp return (NULL); 38734192Sjdp if (cp != res) /* common optimization if "compacting" */ 38834192Sjdp bcopy(cp, res, (nbytes < onb) ? nbytes : onb); 38934192Sjdp return (res); 39034192Sjdp} 39134192Sjdp 39234192Sjdp/* 39334192Sjdp * Search ``srchlen'' elements of each free list for a block whose 39434192Sjdp * header starts at ``freep''. If srchlen is -1 search the whole list. 39534192Sjdp * Return bucket number, or -1 if not found. 39634192Sjdp */ 39734192Sjdpstatic int 39834192Sjdpfindbucket(freep, srchlen) 39934192Sjdp union overhead *freep; 40034192Sjdp int srchlen; 40134192Sjdp{ 40234192Sjdp register union overhead *p; 40334192Sjdp register int i, j; 40434192Sjdp 40534192Sjdp for (i = 0; i < NBUCKETS; i++) { 40634192Sjdp j = 0; 40734192Sjdp for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { 40834192Sjdp if (p == freep) 40934192Sjdp return (i); 41034192Sjdp j++; 41134192Sjdp } 41234192Sjdp } 41334192Sjdp return (-1); 41434192Sjdp} 41534192Sjdp 41634192Sjdp#ifdef MSTATS 41734192Sjdp/* 41834192Sjdp * mstats - print out statistics about malloc 41934192Sjdp * 42034192Sjdp * Prints two lines of numbers, one showing the length of the free list 42134192Sjdp * for each size category, the second showing the number of mallocs - 42234192Sjdp * frees for each size category. 42334192Sjdp */ 42434192Sjdpmstats(s) 42534192Sjdp char *s; 42634192Sjdp{ 42734192Sjdp register int i, j; 42834192Sjdp register union overhead *p; 42934192Sjdp int totfree = 0, 43034192Sjdp totused = 0; 43134192Sjdp 43234192Sjdp fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); 43334192Sjdp for (i = 0; i < NBUCKETS; i++) { 43434192Sjdp for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) 43534192Sjdp ; 43634192Sjdp fprintf(stderr, " %d", j); 43734192Sjdp totfree += j * (1 << (i + 3)); 43834192Sjdp } 43934192Sjdp fprintf(stderr, "\nused:\t"); 44034192Sjdp for (i = 0; i < NBUCKETS; i++) { 44134192Sjdp fprintf(stderr, " %d", nmalloc[i]); 44234192Sjdp totused += nmalloc[i] * (1 << (i + 3)); 44334192Sjdp } 44434192Sjdp fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", 44534192Sjdp totused, totfree); 44634192Sjdp} 44734192Sjdp#endif 44834192Sjdp 44934192Sjdp 45034192Sjdpstatic int 45134192Sjdpmorepages(n) 45234192Sjdpint n; 45334192Sjdp{ 45434192Sjdp int fd = -1; 45534192Sjdp int offset; 45634192Sjdp 45734192Sjdp#ifdef NEED_DEV_ZERO 45834192Sjdp fd = open("/dev/zero", O_RDWR, 0); 45934192Sjdp if (fd == -1) 46034192Sjdp perror("/dev/zero"); 46134192Sjdp#endif 46234192Sjdp 46334192Sjdp if (pagepool_end - pagepool_start > pagesz) { 46434192Sjdp caddr_t addr = (caddr_t) 46534192Sjdp (((int)pagepool_start + pagesz - 1) & ~(pagesz - 1)); 46634192Sjdp if (munmap(addr, pagepool_end - addr) != 0) 46734192Sjdp warn("morepages: munmap %p", addr); 46834192Sjdp } 46934192Sjdp 47034192Sjdp offset = (int)pagepool_start - ((int)pagepool_start & ~(pagesz - 1)); 47134192Sjdp 47234192Sjdp if ((pagepool_start = mmap(0, n * pagesz, 47334192Sjdp PROT_READ|PROT_WRITE, 47434192Sjdp MAP_ANON|MAP_COPY, fd, 0)) == (caddr_t)-1) { 47534192Sjdp xprintf("Cannot map anonymous memory"); 47634192Sjdp return 0; 47734192Sjdp } 47834192Sjdp pagepool_end = pagepool_start + n * pagesz; 47934192Sjdp pagepool_start += offset; 48034192Sjdp 48134192Sjdp#ifdef NEED_DEV_ZERO 48234192Sjdp close(fd); 48334192Sjdp#endif 48434192Sjdp return n; 48534192Sjdp} 486