tc.alloc.c revision 167465
1/* $Header: /p/tcsh/cvsroot/tcsh/tc.alloc.c,v 3.46 2006/03/02 18:46:44 christos Exp $ */
2/*
3 * tc.alloc.c (Caltech) 2/21/82
4 * Chris Kingsley, kingsley@cit-20.
5 *
6 * This is a very fast storage allocator.  It allocates blocks of a small
7 * number of different sizes, and keeps free lists of each size.  Blocks that
8 * don't exactly fit are passed up to the next larger size.  In this
9 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10 * This is designed for use in a program that uses vast quantities of memory,
11 * but bombs when it runs out.
12 */
13/*-
14 * Copyright (c) 1980, 1991 The Regents of the University of California.
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 */
41#include "sh.h"
42
43RCSID("$tcsh: tc.alloc.c,v 3.46 2006/03/02 18:46:44 christos Exp $")
44
45#define RCHECK
46#define DEBUG
47
48static char   *memtop = NULL;		/* PWP: top of current memory */
49static char   *membot = NULL;		/* PWP: bottom of allocatable memory */
50
51int dont_free = 0;
52
53#ifdef WINNT_NATIVE
54# define malloc		fmalloc
55# define free		ffree
56# define calloc		fcalloc
57# define realloc	frealloc
58#endif /* WINNT_NATIVE */
59
60#if !defined(DEBUG) || defined(SYSMALLOC)
61static void
62out_of_memory (void)
63{
64    static const char msg[] = "Out of memory\n";
65
66    write(didfds ? 2 : SHDIAG, msg, strlen(msg));
67    _exit(1);
68}
69#endif
70
71#ifndef SYSMALLOC
72
73#ifdef SX
74extern void* sbrk();
75#endif
76/*
77 * Lots of os routines are busted and try to free invalid pointers.
78 * Although our free routine is smart enough and it will pick bad
79 * pointers most of the time, in cases where we know we are going to get
80 * a bad pointer, we'd rather leak.
81 */
82
83#ifndef NULL
84#define	NULL 0
85#endif
86
87typedef unsigned char U_char;	/* we don't really have signed chars */
88typedef unsigned int U_int;
89typedef unsigned short U_short;
90typedef unsigned long U_long;
91
92
93/*
94 * The overhead on a block is at least 4 bytes.  When free, this space
95 * contains a pointer to the next free block, and the bottom two bits must
96 * be zero.  When in use, the first byte is set to MAGIC, and the second
97 * byte is the size index.  The remaining bytes are for alignment.
98 * If range checking is enabled and the size of the block fits
99 * in two bytes, then the top two bytes hold the size of the requested block
100 * plus the range checking words, and the header word MINUS ONE.
101 */
102
103
104#define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
105
106union overhead {
107    union overhead *ov_next;	/* when free */
108    struct {
109	U_char  ovu_magic;	/* magic number */
110	U_char  ovu_index;	/* bucket # */
111#ifdef RCHECK
112	U_short ovu_size;	/* actual block size */
113	U_int   ovu_rmagic;	/* range magic number */
114#endif
115    }       ovu;
116#define	ov_magic	ovu.ovu_magic
117#define	ov_index	ovu.ovu_index
118#define	ov_size		ovu.ovu_size
119#define	ov_rmagic	ovu.ovu_rmagic
120};
121
122#define	MAGIC		0xfd	/* magic # on accounting info */
123#define RMAGIC		0x55555555	/* magic # on range info */
124#ifdef RCHECK
125#define	RSLOP		sizeof (U_int)
126#else
127#define	RSLOP		0
128#endif
129
130
131#define ROUNDUP	7
132
133/*
134 * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
135 * smallest allocatable block is 8 bytes.  The overhead information
136 * precedes the data area returned to the user.
137 */
138#define	NBUCKETS ((sizeof(long) << 3) - 3)
139static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
140
141/*
142 * nmalloc[i] is the difference between the number of mallocs and frees
143 * for a given block size.
144 */
145static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
146
147#ifndef lint
148static	int	findbucket	(union overhead *, int);
149static	void	morecore	(int);
150#endif
151
152
153#ifdef DEBUG
154# define CHECK(a, str, p) \
155    if (a) { \
156	xprintf(str, p);	\
157	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
158	abort(); \
159    }
160#else
161# define CHECK(a, str, p) \
162    if (a) { \
163	xprintf(str, p);	\
164	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
165	return; \
166    }
167#endif
168
169memalign_t
170malloc(size_t nbytes)
171{
172#ifndef lint
173    union overhead *p;
174    int bucket = 0;
175    unsigned shiftr;
176
177    /*
178     * Convert amount of memory requested into closest block size stored in
179     * hash buckets which satisfies request.  Account for space used per block
180     * for accounting.
181     */
182#ifdef SUNOS4
183    /*
184     * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
185     * so we get one more...
186     * From Michael Schroeder: This is not true. It depends on the
187     * timezone string. In Europe it can overwrite the 13th byte on a
188     * 12 byte malloc.
189     * So we punt and we always allocate an extra byte.
190     */
191    nbytes++;
192#endif
193
194    nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
195    shiftr = (nbytes - 1) >> 2;
196
197    /* apart from this loop, this is O(1) */
198    while ((shiftr >>= 1) != 0)
199	bucket++;
200    /*
201     * If nothing in hash bucket right now, request more memory from the
202     * system.
203     */
204    if (nextf[bucket] == NULL)
205	morecore(bucket);
206    if ((p = nextf[bucket]) == NULL) {
207	child++;
208#ifndef DEBUG
209	out_of_memory();
210#else
211	showall(NULL, NULL);
212	xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes);
213	abort();
214#endif
215	/* fool lint */
216	return ((memalign_t) 0);
217    }
218    /* remove from linked list */
219    nextf[bucket] = nextf[bucket]->ov_next;
220    p->ov_magic = MAGIC;
221    p->ov_index = bucket;
222    nmalloc[bucket]++;
223#ifdef RCHECK
224    /*
225     * Record allocated size of block and bound space with magic numbers.
226     */
227    p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
228    p->ov_rmagic = RMAGIC;
229    *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
230#endif
231    return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
232#else
233    if (nbytes)
234	return ((memalign_t) 0);
235    else
236	return ((memalign_t) 0);
237#endif /* !lint */
238}
239
240#ifndef lint
241/*
242 * Allocate more memory to the indicated bucket.
243 */
244static void
245morecore(int bucket)
246{
247    union overhead *op;
248    int rnu;		/* 2^rnu bytes will be requested */
249    int nblks;		/* become nblks blocks of the desired size */
250    int siz;
251
252    if (nextf[bucket])
253	return;
254    /*
255     * Insure memory is allocated on a page boundary.  Should make getpageize
256     * call?
257     */
258    op = (union overhead *) sbrk(0);
259    memtop = (char *) op;
260    if (membot == NULL)
261	membot = memtop;
262    if ((long) op & 0x3ff) {
263	memtop = sbrk((int) (1024 - ((long) op & 0x3ff)));
264	memtop += (long) (1024 - ((long) op & 0x3ff));
265    }
266
267    /* take 2k unless the block is bigger than that */
268    rnu = (bucket <= 8) ? 11 : bucket + 3;
269    nblks = 1 << (rnu - (bucket + 3));	/* how many blocks to get */
270    memtop = sbrk(1 << rnu);	/* PWP */
271    op = (union overhead *) memtop;
272    /* no more room! */
273    if ((long) op == -1)
274	return;
275    memtop += (long) (1 << rnu);
276    /*
277     * Round up to minimum allocation size boundary and deduct from block count
278     * to reflect.
279     */
280    if (((U_long) op) & ROUNDUP) {
281	op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
282	nblks--;
283    }
284    /*
285     * Add new memory allocated to that on free list for this hash bucket.
286     */
287    nextf[bucket] = op;
288    siz = 1 << (bucket + 3);
289    while (--nblks > 0) {
290	op->ov_next = (union overhead *) (((caddr_t) op) + siz);
291	op = (union overhead *) (((caddr_t) op) + siz);
292    }
293    op->ov_next = NULL;
294}
295
296#endif
297
298void
299free(ptr_t cp)
300{
301#ifndef lint
302    int size;
303    union overhead *op;
304
305    /*
306     * the don't free flag is there so that we avoid os bugs in routines
307     * that free invalid pointers!
308     */
309    if (cp == NULL || dont_free)
310	return;
311    CHECK(!memtop || !membot,
312	  CGETS(19, 2, "free(%p) called before any allocations."), cp);
313    CHECK(cp > (ptr_t) memtop,
314	  CGETS(19, 3, "free(%p) above top of memory."), cp);
315    CHECK(cp < (ptr_t) membot,
316	  CGETS(19, 4, "free(%p) below bottom of memory."), cp);
317    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
318    CHECK(op->ov_magic != MAGIC,
319	  CGETS(19, 5, "free(%p) bad block."), cp);
320
321#ifdef RCHECK
322    if (op->ov_index <= 13)
323	CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
324	      CGETS(19, 6, "free(%p) bad range check."), cp);
325#endif
326    CHECK(op->ov_index >= NBUCKETS,
327	  CGETS(19, 7, "free(%p) bad block index."), cp);
328    size = op->ov_index;
329    op->ov_next = nextf[size];
330    nextf[size] = op;
331
332    nmalloc[size]--;
333
334#else
335    if (cp == NULL)
336	return;
337#endif
338}
339
340memalign_t
341calloc(size_t i, size_t j)
342{
343#ifndef lint
344    char *cp;
345
346    i *= j;
347    cp = xmalloc(i);
348    memset(cp, 0, i);
349
350    return ((memalign_t) cp);
351#else
352    if (i && j)
353	return ((memalign_t) 0);
354    else
355	return ((memalign_t) 0);
356#endif
357}
358
359/*
360 * When a program attempts "storage compaction" as mentioned in the
361 * old malloc man page, it realloc's an already freed block.  Usually
362 * this is the last block it freed; occasionally it might be farther
363 * back.  We have to search all the free lists for the block in order
364 * to determine its bucket: 1st we make one pass thru the lists
365 * checking only the first block in each; if that fails we search
366 * ``realloc_srchlen'' blocks in each list for a match (the variable
367 * is extern so the caller can modify it).  If that fails we just copy
368 * however many bytes was given to realloc() and hope it's not huge.
369 */
370#ifndef lint
371/* 4 should be plenty, -1 =>'s whole list */
372static int     realloc_srchlen = 4;
373#endif /* lint */
374
375memalign_t
376realloc(ptr_t cp, size_t nbytes)
377{
378#ifndef lint
379    U_int onb;
380    union overhead *op;
381    ptr_t res;
382    int i;
383    int     was_alloced = 0;
384
385    if (cp == NULL)
386	return (malloc(nbytes));
387    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
388    if (op->ov_magic == MAGIC) {
389	was_alloced++;
390	i = op->ov_index;
391    }
392    else
393	/*
394	 * Already free, doing "compaction".
395	 *
396	 * Search for the old block of memory on the free list.  First, check the
397	 * most common case (last element free'd), then (this failing) the last
398	 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
399	 * the size of the memory block being realloc'd is the smallest
400	 * possible.
401	 */
402	if ((i = findbucket(op, 1)) < 0 &&
403	    (i = findbucket(op, realloc_srchlen)) < 0)
404	    i = 0;
405
406    onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
407
408    /* avoid the copy if same size block */
409    if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
410	(onb > (U_int) (1 << (i + 2)))) {
411#ifdef RCHECK
412	/* JMR: formerly this wasn't updated ! */
413	nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
414	*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
415	op->ov_rmagic = RMAGIC;
416	op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
417#endif
418	return ((memalign_t) cp);
419    }
420    if ((res = malloc(nbytes)) == NULL)
421	return ((memalign_t) NULL);
422    if (cp != res) {		/* common optimization */
423	/*
424	 * christos: this used to copy nbytes! It should copy the
425	 * smaller of the old and new size
426	 */
427	onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
428	(void) memmove(res, cp, onb < nbytes ? onb : nbytes);
429    }
430    if (was_alloced)
431	free(cp);
432    return ((memalign_t) res);
433#else
434    if (cp && nbytes)
435	return ((memalign_t) 0);
436    else
437	return ((memalign_t) 0);
438#endif /* !lint */
439}
440
441
442
443#ifndef lint
444/*
445 * Search ``srchlen'' elements of each free list for a block whose
446 * header starts at ``freep''.  If srchlen is -1 search the whole list.
447 * Return bucket number, or -1 if not found.
448 */
449static int
450findbucket(union overhead *freep, int srchlen)
451{
452    union overhead *p;
453    size_t i;
454    int j;
455
456    for (i = 0; i < NBUCKETS; i++) {
457	j = 0;
458	for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
459	    if (p == freep)
460		return (i);
461	    j++;
462	}
463    }
464    return (-1);
465}
466
467#endif
468
469
470#else				/* SYSMALLOC */
471
472/**
473 ** ``Protected versions'' of malloc, realloc, calloc, and free
474 **
475 ** On many systems:
476 **
477 ** 1. malloc(0) is bad
478 ** 2. free(0) is bad
479 ** 3. realloc(0, n) is bad
480 ** 4. realloc(n, 0) is bad
481 **
482 ** Also we call our error routine if we run out of memory.
483 **/
484memalign_t
485smalloc(size_t n)
486{
487    ptr_t   ptr;
488
489    n = n ? n : 1;
490
491#ifdef HAVE_SBRK
492    if (membot == NULL)
493	membot = sbrk(0);
494#endif /* HAVE_SBRK */
495
496    if ((ptr = malloc(n)) == NULL)
497	out_of_memory();
498#ifndef HAVE_SBRK
499    if (memtop < ((char *) ptr) + n)
500	memtop = ((char *) ptr) + n;
501    if (membot == NULL)
502	membot = ptr;
503#endif /* !HAVE_SBRK */
504    return ((memalign_t) ptr);
505}
506
507memalign_t
508srealloc(ptr_t p, size_t n)
509{
510    ptr_t   ptr;
511
512    n = n ? n : 1;
513
514#ifdef HAVE_SBRK
515    if (membot == NULL)
516	membot = sbrk(0);
517#endif /* HAVE_SBRK */
518
519    if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL)
520	out_of_memory();
521#ifndef HAVE_SBRK
522    if (memtop < ((char *) ptr) + n)
523	memtop = ((char *) ptr) + n;
524    if (membot == NULL)
525	membot = ptr;
526#endif /* !HAVE_SBRK */
527    return ((memalign_t) ptr);
528}
529
530memalign_t
531scalloc(size_t s, size_t n)
532{
533    ptr_t   ptr;
534
535    n *= s;
536    n = n ? n : 1;
537
538#ifdef HAVE_SBRK
539    if (membot == NULL)
540	membot = sbrk(0);
541#endif /* HAVE_SBRK */
542
543    if ((ptr = malloc(n)) == NULL)
544	out_of_memory();
545
546    memset (ptr, 0, n);
547
548#ifndef HAVE_SBRK
549    if (memtop < ((char *) ptr) + n)
550	memtop = ((char *) ptr) + n;
551    if (membot == NULL)
552	membot = ptr;
553#endif /* !HAVE_SBRK */
554
555    return ((memalign_t) ptr);
556}
557
558void
559sfree(ptr_t p)
560{
561    if (p && !dont_free)
562	free(p);
563}
564
565#endif /* SYSMALLOC */
566
567/*
568 * mstats - print out statistics about malloc
569 *
570 * Prints two lines of numbers, one showing the length of the free list
571 * for each size category, the second showing the number of mallocs -
572 * frees for each size category.
573 */
574/*ARGSUSED*/
575void
576showall(Char **v, struct command *c)
577{
578#ifndef SYSMALLOC
579    size_t i, j;
580    union overhead *p;
581    int     totfree = 0, totused = 0;
582
583    xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
584    for (i = 0; i < NBUCKETS; i++) {
585	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
586	    continue;
587	xprintf(" %4zd", j);
588	totfree += j * (1 << (i + 3));
589    }
590    xprintf(CGETS(19, 9, "\nused:\t"));
591    for (i = 0; i < NBUCKETS; i++) {
592	xprintf(" %4d", nmalloc[i]);
593	totused += nmalloc[i] * (1 << (i + 3));
594    }
595    xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
596	    totused, totfree);
597    xprintf(CGETS(19, 11,
598	    "\tAllocated memory from 0x%lx to 0x%lx.  Real top at 0x%lx\n"),
599	    (unsigned long) membot, (unsigned long) memtop,
600	    (unsigned long) sbrk(0));
601#else
602#ifdef HAVE_SBRK
603    memtop = sbrk(0);
604#endif /* HAVE_SBRK */
605    xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
606	    (unsigned long) membot, (unsigned long) memtop,
607	    (unsigned long) (memtop - membot));
608#endif /* SYSMALLOC */
609    USE(c);
610    USE(v);
611}
612