tc.alloc.c revision 69408
1/* $Header: /src/pub/tcsh/tc.alloc.c,v 3.35 2000/11/11 23:03:38 christos Exp $ */
2/*
3 * tc.alloc.c (Caltech) 2/21/82
4 * Chris Kingsley, kingsley@cit-20.
5 *
6 * This is a very fast storage allocator.  It allocates blocks of a small
7 * number of different sizes, and keeps free lists of each size.  Blocks that
8 * don't exactly fit are passed up to the next larger size.  In this
9 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
10 * This is designed for use in a program that uses vast quantities of memory,
11 * but bombs when it runs out.
12 */
13/*-
14 * Copyright (c) 1980, 1991 The Regents of the University of California.
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 *    must display the following acknowledgement:
27 *	This product includes software developed by the University of
28 *	California, Berkeley and its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 *    may be used to endorse or promote products derived from this software
31 *    without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 */
45#include "sh.h"
46
47RCSID("$Id: tc.alloc.c,v 3.35 2000/11/11 23:03:38 christos Exp $")
48
49static char   *memtop = NULL;		/* PWP: top of current memory */
50static char   *membot = NULL;		/* PWP: bottom of allocatable memory */
51
52int dont_free = 0;
53
54#if defined(_VMS_POSIX) || defined(_AMIGA_MEMORY)
55# define NO_SBRK
56#endif
57
58#ifdef WINNT_NATIVE
59# define malloc		fmalloc
60# define free		ffree
61# define calloc		fcalloc
62# define realloc	frealloc
63#endif /* WINNT_NATIVE */
64
65#ifndef SYSMALLOC
66
67#undef RCHECK
68#undef DEBUG
69
70#ifdef SX
71extern void* sbrk();
72#endif
73/*
74 * Lots of os routines are busted and try to free invalid pointers.
75 * Although our free routine is smart enough and it will pick bad
76 * pointers most of the time, in cases where we know we are going to get
77 * a bad pointer, we'd rather leak.
78 */
79
80#ifndef NULL
81#define	NULL 0
82#endif
83
84typedef unsigned char U_char;	/* we don't really have signed chars */
85typedef unsigned int U_int;
86typedef unsigned short U_short;
87typedef unsigned long U_long;
88
89
90/*
91 * The overhead on a block is at least 4 bytes.  When free, this space
92 * contains a pointer to the next free block, and the bottom two bits must
93 * be zero.  When in use, the first byte is set to MAGIC, and the second
94 * byte is the size index.  The remaining bytes are for alignment.
95 * If range checking is enabled and the size of the block fits
96 * in two bytes, then the top two bytes hold the size of the requested block
97 * plus the range checking words, and the header word MINUS ONE.
98 */
99
100
101#define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
102
103union overhead {
104    union overhead *ov_next;	/* when free */
105    struct {
106	U_char  ovu_magic;	/* magic number */
107	U_char  ovu_index;	/* bucket # */
108#ifdef RCHECK
109	U_short ovu_size;	/* actual block size */
110	U_int   ovu_rmagic;	/* range magic number */
111#endif
112    }       ovu;
113#define	ov_magic	ovu.ovu_magic
114#define	ov_index	ovu.ovu_index
115#define	ov_size		ovu.ovu_size
116#define	ov_rmagic	ovu.ovu_rmagic
117};
118
119#define	MAGIC		0xfd	/* magic # on accounting info */
120#define RMAGIC		0x55555555	/* magic # on range info */
121#ifdef RCHECK
122#define	RSLOP		sizeof (U_int)
123#else
124#define	RSLOP		0
125#endif
126
127
128#define ROUNDUP	7
129
130/*
131 * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
132 * smallest allocatable block is 8 bytes.  The overhead information
133 * precedes the data area returned to the user.
134 */
135#define	NBUCKETS ((sizeof(long) << 3) - 3)
136static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
137
138/*
139 * nmalloc[i] is the difference between the number of mallocs and frees
140 * for a given block size.
141 */
142static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
143
144#ifndef lint
145static	int	findbucket	__P((union overhead *, int));
146static	void	morecore	__P((int));
147#endif
148
149
150#ifdef DEBUG
151# define CHECK(a, str, p) \
152    if (a) { \
153	xprintf(str, p);	\
154	xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot);	\
155	abort(); \
156    }
157#else
158# define CHECK(a, str, p) \
159    if (a) { \
160	xprintf(str, p);	\
161	xprintf(" (memtop = %lx membot = %lx)\n", memtop, membot);	\
162	return; \
163    }
164#endif
165
166memalign_t
167malloc(nbytes)
168    register size_t nbytes;
169{
170#ifndef lint
171    register union overhead *p;
172    register int bucket = 0;
173    register unsigned shiftr;
174
175    /*
176     * Convert amount of memory requested into closest block size stored in
177     * hash buckets which satisfies request.  Account for space used per block
178     * for accounting.
179     */
180#ifdef SUNOS4
181    /*
182     * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
183     * so we get one more...
184     * From Michael Schroeder: This is not true. It depends on the
185     * timezone string. In Europe it can overwrite the 13th byte on a
186     * 12 byte malloc.
187     * So we punt and we always allocate an extra byte.
188     */
189    nbytes++;
190#endif
191
192    nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
193    shiftr = (nbytes - 1) >> 2;
194
195    /* apart from this loop, this is O(1) */
196    while ((shiftr >>= 1) != 0)
197	bucket++;
198    /*
199     * If nothing in hash bucket right now, request more memory from the
200     * system.
201     */
202    if (nextf[bucket] == NULL)
203	morecore(bucket);
204    if ((p = (union overhead *) nextf[bucket]) == NULL) {
205	child++;
206#ifndef DEBUG
207	stderror(ERR_NOMEM);
208#else
209	showall(NULL, NULL);
210	xprintf(CGETS(19, 1, "nbytes=%d: Out of memory\n"), nbytes);
211	abort();
212#endif
213	/* fool lint */
214	return ((memalign_t) 0);
215    }
216    /* remove from linked list */
217    nextf[bucket] = nextf[bucket]->ov_next;
218    p->ov_magic = MAGIC;
219    p->ov_index = bucket;
220    nmalloc[bucket]++;
221#ifdef RCHECK
222    /*
223     * Record allocated size of block and bound space with magic numbers.
224     */
225    p->ov_size = (p->ov_index <= 13) ? nbytes - 1 : 0;
226    p->ov_rmagic = RMAGIC;
227    *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
228#endif
229    return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
230#else
231    if (nbytes)
232	return ((memalign_t) 0);
233    else
234	return ((memalign_t) 0);
235#endif /* !lint */
236}
237
238#ifndef lint
239/*
240 * Allocate more memory to the indicated bucket.
241 */
242static void
243morecore(bucket)
244    register int bucket;
245{
246    register union overhead *op;
247    register int rnu;		/* 2^rnu bytes will be requested */
248    register int nblks;		/* become nblks blocks of the desired size */
249    register int siz;
250
251    if (nextf[bucket])
252	return;
253    /*
254     * Insure memory is allocated on a page boundary.  Should make getpageize
255     * call?
256     */
257    op = (union overhead *) sbrk(0);
258    memtop = (char *) op;
259    if (membot == NULL)
260	membot = memtop;
261    if ((long) op & 0x3ff) {
262	memtop = (char *) sbrk((int) (1024 - ((long) op & 0x3ff)));
263	memtop += (long) (1024 - ((long) op & 0x3ff));
264    }
265
266    /* take 2k unless the block is bigger than that */
267    rnu = (bucket <= 8) ? 11 : bucket + 3;
268    nblks = 1 << (rnu - (bucket + 3));	/* how many blocks to get */
269    memtop = (char *) sbrk(1 << rnu);	/* PWP */
270    op = (union overhead *) memtop;
271    /* no more room! */
272    if ((long) op == -1)
273	return;
274    memtop += (long) (1 << rnu);
275    /*
276     * Round up to minimum allocation size boundary and deduct from block count
277     * to reflect.
278     */
279    if (((U_long) op) & ROUNDUP) {
280	op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
281	nblks--;
282    }
283    /*
284     * Add new memory allocated to that on free list for this hash bucket.
285     */
286    nextf[bucket] = op;
287    siz = 1 << (bucket + 3);
288    while (--nblks > 0) {
289	op->ov_next = (union overhead *) (((caddr_t) op) + siz);
290	op = (union overhead *) (((caddr_t) op) + siz);
291    }
292    op->ov_next = NULL;
293}
294
295#endif
296
297void
298free(cp)
299    ptr_t   cp;
300{
301#ifndef lint
302    register int size;
303    register union overhead *op;
304
305    /*
306     * the don't free flag is there so that we avoid os bugs in routines
307     * that free invalid pointers!
308     */
309    if (cp == NULL || dont_free)
310	return;
311    CHECK(!memtop || !membot,
312	  CGETS(19, 2, "free(%lx) called before any allocations."), cp);
313    CHECK(cp > (ptr_t) memtop,
314	  CGETS(19, 3, "free(%lx) above top of memory."), cp);
315    CHECK(cp < (ptr_t) membot,
316	  CGETS(19, 4, "free(%lx) below bottom of memory."), cp);
317    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
318    CHECK(op->ov_magic != MAGIC,
319	  CGETS(19, 5, "free(%lx) bad block."), cp);
320
321#ifdef RCHECK
322    if (op->ov_index <= 13)
323	CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
324	      CGETS(19, 6, "free(%lx) bad range check."), cp);
325#endif
326    CHECK(op->ov_index >= NBUCKETS,
327	  CGETS(19, 7, "free(%lx) bad block index."), cp);
328    size = op->ov_index;
329    op->ov_next = nextf[size];
330    nextf[size] = op;
331
332    nmalloc[size]--;
333
334#else
335    if (cp == NULL)
336	return;
337#endif
338}
339
340memalign_t
341calloc(i, j)
342    size_t  i, j;
343{
344#ifndef lint
345    register char *cp, *scp;
346
347    i *= j;
348    scp = cp = (char *) xmalloc((size_t) i);
349    if (i != 0)
350	do
351	    *cp++ = 0;
352	while (--i);
353
354    return ((memalign_t) scp);
355#else
356    if (i && j)
357	return ((memalign_t) 0);
358    else
359	return ((memalign_t) 0);
360#endif
361}
362
363/*
364 * When a program attempts "storage compaction" as mentioned in the
365 * old malloc man page, it realloc's an already freed block.  Usually
366 * this is the last block it freed; occasionally it might be farther
367 * back.  We have to search all the free lists for the block in order
368 * to determine its bucket: 1st we make one pass thru the lists
369 * checking only the first block in each; if that fails we search
370 * ``realloc_srchlen'' blocks in each list for a match (the variable
371 * is extern so the caller can modify it).  If that fails we just copy
372 * however many bytes was given to realloc() and hope it's not huge.
373 */
374#ifndef lint
375/* 4 should be plenty, -1 =>'s whole list */
376static int     realloc_srchlen = 4;
377#endif /* lint */
378
379memalign_t
380realloc(cp, nbytes)
381    ptr_t   cp;
382    size_t  nbytes;
383{
384#ifndef lint
385    register U_int onb;
386    union overhead *op;
387    ptr_t res;
388    register int i;
389    int     was_alloced = 0;
390
391    if (cp == NULL)
392	return (malloc(nbytes));
393    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
394    if (op->ov_magic == MAGIC) {
395	was_alloced++;
396	i = op->ov_index;
397    }
398    else
399	/*
400	 * Already free, doing "compaction".
401	 *
402	 * Search for the old block of memory on the free list.  First, check the
403	 * most common case (last element free'd), then (this failing) the last
404	 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
405	 * the size of the memory block being realloc'd is the smallest
406	 * possible.
407	 */
408	if ((i = findbucket(op, 1)) < 0 &&
409	    (i = findbucket(op, realloc_srchlen)) < 0)
410	    i = 0;
411
412    onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
413
414    /* avoid the copy if same size block */
415    if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
416	(onb > (U_int) (1 << (i + 2)))) {
417#ifdef RCHECK
418	/* JMR: formerly this wasn't updated ! */
419	nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
420	*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
421	op->ov_rmagic = RMAGIC;
422	op->ov_size = (op->ov_index <= 13) ? nbytes - 1 : 0;
423#endif
424	return ((memalign_t) cp);
425    }
426    if ((res = malloc(nbytes)) == NULL)
427	return ((memalign_t) NULL);
428    if (cp != res) {		/* common optimization */
429	/*
430	 * christos: this used to copy nbytes! It should copy the
431	 * smaller of the old and new size
432	 */
433	onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
434	(void) memmove((ptr_t) res, (ptr_t) cp,
435		       (size_t) (onb < nbytes ? onb : nbytes));
436    }
437    if (was_alloced)
438	free(cp);
439    return ((memalign_t) res);
440#else
441    if (cp && nbytes)
442	return ((memalign_t) 0);
443    else
444	return ((memalign_t) 0);
445#endif /* !lint */
446}
447
448
449
450#ifndef lint
451/*
452 * Search ``srchlen'' elements of each free list for a block whose
453 * header starts at ``freep''.  If srchlen is -1 search the whole list.
454 * Return bucket number, or -1 if not found.
455 */
456static int
457findbucket(freep, srchlen)
458    union overhead *freep;
459    int     srchlen;
460{
461    register union overhead *p;
462    register int i, j;
463
464    for (i = 0; i < NBUCKETS; i++) {
465	j = 0;
466	for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
467	    if (p == freep)
468		return (i);
469	    j++;
470	}
471    }
472    return (-1);
473}
474
475#endif
476
477
478#else				/* SYSMALLOC */
479
480/**
481 ** ``Protected versions'' of malloc, realloc, calloc, and free
482 **
483 ** On many systems:
484 **
485 ** 1. malloc(0) is bad
486 ** 2. free(0) is bad
487 ** 3. realloc(0, n) is bad
488 ** 4. realloc(n, 0) is bad
489 **
490 ** Also we call our error routine if we run out of memory.
491 **/
492memalign_t
493smalloc(n)
494    size_t  n;
495{
496    ptr_t   ptr;
497
498    n = n ? n : 1;
499
500#ifndef NO_SBRK
501    if (membot == NULL)
502	membot = (char*) sbrk(0);
503#endif /* !NO_SBRK */
504
505    if ((ptr = malloc(n)) == (ptr_t) 0) {
506	child++;
507	stderror(ERR_NOMEM);
508    }
509#ifdef NO_SBRK
510    if (memtop < ((char *) ptr) + n)
511	memtop = ((char *) ptr) + n;
512    if (membot == NULL)
513	membot = (char*) ptr;
514#endif /* NO_SBRK */
515    return ((memalign_t) ptr);
516}
517
518memalign_t
519srealloc(p, n)
520    ptr_t   p;
521    size_t  n;
522{
523    ptr_t   ptr;
524
525    n = n ? n : 1;
526
527#ifndef NO_SBRK
528    if (membot == NULL)
529	membot = (char*) sbrk(0);
530#endif /* NO_SBRK */
531
532    if ((ptr = (p ? realloc(p, n) : malloc(n))) == (ptr_t) 0) {
533	child++;
534	stderror(ERR_NOMEM);
535    }
536#ifdef NO_SBRK
537    if (memtop < ((char *) ptr) + n)
538	memtop = ((char *) ptr) + n;
539    if (membot == NULL)
540	membot = (char*) ptr;
541#endif /* NO_SBRK */
542    return ((memalign_t) ptr);
543}
544
545memalign_t
546scalloc(s, n)
547    size_t  s, n;
548{
549    char   *sptr;
550    ptr_t   ptr;
551
552    n *= s;
553    n = n ? n : 1;
554
555#ifndef NO_SBRK
556    if (membot == NULL)
557	membot = (char*) sbrk(0);
558#endif /* NO_SBRK */
559
560    if ((ptr = malloc(n)) == (ptr_t) 0) {
561	child++;
562	stderror(ERR_NOMEM);
563    }
564
565    sptr = (char *) ptr;
566    if (n != 0)
567	do
568	    *sptr++ = 0;
569	while (--n);
570
571#ifdef NO_SBRK
572    if (memtop < ((char *) ptr) + n)
573	memtop = ((char *) ptr) + n;
574    if (membot == NULL)
575	membot = (char*) ptr;
576#endif /* NO_SBRK */
577
578    return ((memalign_t) ptr);
579}
580
581void
582sfree(p)
583    ptr_t   p;
584{
585    if (p && !dont_free)
586	free(p);
587}
588
589#endif /* SYSMALLOC */
590
591/*
592 * mstats - print out statistics about malloc
593 *
594 * Prints two lines of numbers, one showing the length of the free list
595 * for each size category, the second showing the number of mallocs -
596 * frees for each size category.
597 */
598/*ARGSUSED*/
599void
600showall(v, c)
601    Char **v;
602    struct command *c;
603{
604#ifndef SYSMALLOC
605    register int i, j;
606    register union overhead *p;
607    int     totfree = 0, totused = 0;
608
609    xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
610    for (i = 0; i < NBUCKETS; i++) {
611	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
612	    continue;
613	xprintf(" %4d", j);
614	totfree += j * (1 << (i + 3));
615    }
616    xprintf(CGETS(19, 9, "\nused:\t"));
617    for (i = 0; i < NBUCKETS; i++) {
618	xprintf(" %4u", nmalloc[i]);
619	totused += nmalloc[i] * (1 << (i + 3));
620    }
621    xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
622	    totused, totfree);
623    xprintf(CGETS(19, 11,
624	    "\tAllocated memory from 0x%lx to 0x%lx.  Real top at 0x%lx\n"),
625	    (unsigned long) membot, (unsigned long) memtop,
626	    (unsigned long) sbrk(0));
627#else
628#ifndef NO_SBRK
629    memtop = (char *) sbrk(0);
630#endif /* !NO_SBRK */
631    xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
632	    (unsigned long) membot, (unsigned long) memtop,
633	    (unsigned long) (memtop - membot));
634#endif /* SYSMALLOC */
635    USE(c);
636    USE(v);
637}
638