1/*
2 * tc.alloc.c (Caltech) 2/21/82
3 * Chris Kingsley, kingsley@cit-20.
4 *
5 * This is a very fast storage allocator.  It allocates blocks of a small
6 * number of different sizes, and keeps free lists of each size.  Blocks that
7 * don't exactly fit are passed up to the next larger size.  In this
8 * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
9 * This is designed for use in a program that uses vast quantities of memory,
10 * but bombs when it runs out.
11 */
12/*-
13 * Copyright (c) 1980, 1991 The Regents of the University of California.
14 * All rights reserved.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40#include "sh.h"
41#ifdef HAVE_MALLINFO
42#include <malloc.h>
43#endif
44#if defined(HAVE_SBRK) && !defined(__APPLE__)
45#define USE_SBRK
46#endif
47
48#define RCHECK
49#define DEBUG
50
51static char   *memtop = NULL;		/* PWP: top of current memory */
52static char   *membot = NULL;		/* PWP: bottom of allocatable memory */
53
54int dont_free = 0;
55
56#ifdef WINNT_NATIVE
57# define malloc		fmalloc
58# define free		ffree
59# define calloc		fcalloc
60# define realloc	frealloc
61#endif /* WINNT_NATIVE */
62
63#if !defined(DEBUG) || defined(SYSMALLOC)
64static void
65out_of_memory (void)
66{
67    static const char msg[] = "Out of memory\n";
68
69    TCSH_IGNORE(write(didfds ? 2 : SHDIAG, msg, strlen(msg)));
70    _exit(1);
71}
72#endif
73
74#ifndef SYSMALLOC
75
76#ifdef SX
77extern void* sbrk();
78#endif
79/*
80 * Lots of os routines are busted and try to free invalid pointers.
81 * Although our free routine is smart enough and it will pick bad
82 * pointers most of the time, in cases where we know we are going to get
83 * a bad pointer, we'd rather leak.
84 */
85
86#ifndef NULL
87#define	NULL 0
88#endif
89
90typedef unsigned char U_char;	/* we don't really have signed chars */
91typedef unsigned int U_int;
92typedef unsigned short U_short;
93typedef unsigned long U_long;
94
95
96/*
97 * The overhead on a block is at least 4 bytes.  When free, this space
98 * contains a pointer to the next free block, and the bottom two bits must
99 * be zero.  When in use, the first byte is set to MAGIC, and the second
100 * byte is the size index.  The remaining bytes are for alignment.
101 * If range checking is enabled and the size of the block fits
102 * in two bytes, then the top two bytes hold the size of the requested block
103 * plus the range checking words, and the header word MINUS ONE.
104 */
105
106
107#define MEMALIGN(a) (((a) + ROUNDUP) & ~ROUNDUP)
108
109union overhead {
110    union overhead *ov_next;	/* when free */
111    struct {
112	U_char  ovu_magic;	/* magic number */
113	U_char  ovu_index;	/* bucket # */
114#ifdef RCHECK
115	U_short ovu_size;	/* actual block size */
116	U_int   ovu_rmagic;	/* range magic number */
117#endif
118    }       ovu;
119#define	ov_magic	ovu.ovu_magic
120#define	ov_index	ovu.ovu_index
121#define	ov_size		ovu.ovu_size
122#define	ov_rmagic	ovu.ovu_rmagic
123};
124
125#define	MAGIC		0xfd	/* magic # on accounting info */
126#define RMAGIC		0x55555555	/* magic # on range info */
127#ifdef RCHECK
128#define	RSLOP		sizeof (U_int)
129#else
130#define	RSLOP		0
131#endif
132
133
134#ifdef _LP64
135#define ROUNDUP	15
136#else
137#define ROUNDUP	7
138#endif
139
140/*
141 * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
142 * smallest allocatable block is 8 bytes.  The overhead information
143 * precedes the data area returned to the user.
144 */
145#define	NBUCKETS ((sizeof(long) << 3) - 3)
146static union overhead *nextf[NBUCKETS] IZERO_STRUCT;
147
148/*
149 * nmalloc[i] is the difference between the number of mallocs and frees
150 * for a given block size.
151 */
152static U_int nmalloc[NBUCKETS] IZERO_STRUCT;
153
154#ifndef lint
155static	int	findbucket	(union overhead *, int);
156static	void	morecore	(int);
157#endif
158
159
160#ifdef DEBUG
161# define CHECK(a, str, p) \
162    if (a) { \
163	xprintf(str, p);	\
164	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
165	abort(); \
166    }
167#else
168# define CHECK(a, str, p) \
169    if (a) { \
170	xprintf(str, p);	\
171	xprintf(" (memtop = %p membot = %p)\n", memtop, membot);	\
172	return; \
173    }
174#endif
175
176memalign_t
177malloc(size_t nbytes)
178{
179#ifndef lint
180    union overhead *p;
181    int bucket = 0;
182    unsigned shiftr;
183
184    /*
185     * Convert amount of memory requested into closest block size stored in
186     * hash buckets which satisfies request.  Account for space used per block
187     * for accounting.
188     */
189#ifdef SUNOS4
190    /*
191     * SunOS localtime() overwrites the 9th byte on an 8 byte malloc()....
192     * so we get one more...
193     * From Michael Schroeder: This is not true. It depends on the
194     * timezone string. In Europe it can overwrite the 13th byte on a
195     * 12 byte malloc.
196     * So we punt and we always allocate an extra byte.
197     */
198    nbytes++;
199#endif
200
201    nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead)) + nbytes + RSLOP);
202    shiftr = (nbytes - 1) >> 2;
203
204    /* apart from this loop, this is O(1) */
205    while ((shiftr >>= 1) != 0)
206	bucket++;
207    /*
208     * If nothing in hash bucket right now, request more memory from the
209     * system.
210     */
211    if (nextf[bucket] == NULL)
212	morecore(bucket);
213    if ((p = nextf[bucket]) == NULL) {
214	child++;
215#ifndef DEBUG
216	out_of_memory();
217#else
218	showall(NULL, NULL);
219	xprintf(CGETS(19, 1, "nbytes=%zu: Out of memory\n"), nbytes);
220	abort();
221#endif
222	/* fool lint */
223	return ((memalign_t) 0);
224    }
225    /* remove from linked list */
226    nextf[bucket] = nextf[bucket]->ov_next;
227    p->ov_magic = MAGIC;
228    p->ov_index = bucket;
229    nmalloc[bucket]++;
230#ifdef RCHECK
231    /*
232     * Record allocated size of block and bound space with magic numbers.
233     */
234    p->ov_size = (p->ov_index <= 13) ? (U_short)nbytes - 1 : 0;
235    p->ov_rmagic = RMAGIC;
236    *((U_int *) (((caddr_t) p) + nbytes - RSLOP)) = RMAGIC;
237#endif
238    return ((memalign_t) (((caddr_t) p) + MEMALIGN(sizeof(union overhead))));
239#else
240    if (nbytes)
241	return ((memalign_t) 0);
242    else
243	return ((memalign_t) 0);
244#endif /* !lint */
245}
246
247#ifndef lint
248/*
249 * Allocate more memory to the indicated bucket.
250 */
251static void
252morecore(int bucket)
253{
254    union overhead *op;
255    int rnu;		/* 2^rnu bytes will be requested */
256    int nblks;		/* become nblks blocks of the desired size */
257    int siz;
258
259    if (nextf[bucket])
260	return;
261    /*
262     * Insure memory is allocated on a page boundary.  Should make getpageize
263     * call?
264     */
265    op = (union overhead *) sbrk(0);
266    memtop = (char *) op;
267    if (membot == NULL)
268	membot = memtop;
269    if ((long) op & 0x3ff) {
270	memtop = sbrk((int) (1024 - ((long) op & 0x3ff)));
271	memtop += (long) (1024 - ((long) op & 0x3ff));
272    }
273
274    /* take 2k unless the block is bigger than that */
275    rnu = (bucket <= 8) ? 11 : bucket + 3;
276    nblks = 1 << (rnu - (bucket + 3));	/* how many blocks to get */
277    memtop = sbrk(1 << rnu);	/* PWP */
278    op = (union overhead *) memtop;
279    /* no more room! */
280    if ((long) op == -1)
281	return;
282    memtop += (long) (1 << rnu);
283    /*
284     * Round up to minimum allocation size boundary and deduct from block count
285     * to reflect.
286     */
287    if (((U_long) op) & ROUNDUP) {
288	op = (union overhead *) (((U_long) op + (ROUNDUP + 1)) & ~ROUNDUP);
289	nblks--;
290    }
291    /*
292     * Add new memory allocated to that on free list for this hash bucket.
293     */
294    nextf[bucket] = op;
295    siz = 1 << (bucket + 3);
296    while (--nblks > 0) {
297	op->ov_next = (union overhead *) (((caddr_t) op) + siz);
298	op = (union overhead *) (((caddr_t) op) + siz);
299    }
300    op->ov_next = NULL;
301}
302
303#endif
304
305void
306free(ptr_t cp)
307{
308#ifndef lint
309    int size;
310    union overhead *op;
311
312    /*
313     * the don't free flag is there so that we avoid os bugs in routines
314     * that free invalid pointers!
315     */
316    if (cp == NULL || dont_free)
317	return;
318    CHECK(!memtop || !membot,
319	  CGETS(19, 2, "free(%p) called before any allocations."), cp);
320    CHECK(cp > (ptr_t) memtop,
321	  CGETS(19, 3, "free(%p) above top of memory."), cp);
322    CHECK(cp < (ptr_t) membot,
323	  CGETS(19, 4, "free(%p) below bottom of memory."), cp);
324    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
325    CHECK(op->ov_magic != MAGIC,
326	  CGETS(19, 5, "free(%p) bad block."), cp);
327
328#ifdef RCHECK
329    if (op->ov_index <= 13)
330	CHECK(*(U_int *) ((caddr_t) op + op->ov_size + 1 - RSLOP) != RMAGIC,
331	      CGETS(19, 6, "free(%p) bad range check."), cp);
332#endif
333    CHECK(op->ov_index >= NBUCKETS,
334	  CGETS(19, 7, "free(%p) bad block index."), cp);
335    size = op->ov_index;
336    op->ov_next = nextf[size];
337    nextf[size] = op;
338
339    nmalloc[size]--;
340
341#else
342    if (cp == NULL)
343	return;
344#endif
345}
346
347memalign_t
348calloc(size_t i, size_t j)
349{
350#ifndef lint
351    char *cp;
352    volatile size_t k;
353
354    i *= j;
355    cp = xmalloc(i);
356    /* Stop gcc 5.x from optimizing malloc+memset = calloc */
357    k = i;
358    memset(cp, 0, k);
359
360    return ((memalign_t) cp);
361#else
362    if (i && j)
363	return ((memalign_t) 0);
364    else
365	return ((memalign_t) 0);
366#endif
367}
368
369/*
370 * When a program attempts "storage compaction" as mentioned in the
371 * old malloc man page, it realloc's an already freed block.  Usually
372 * this is the last block it freed; occasionally it might be farther
373 * back.  We have to search all the free lists for the block in order
374 * to determine its bucket: 1st we make one pass thru the lists
375 * checking only the first block in each; if that fails we search
376 * ``realloc_srchlen'' blocks in each list for a match (the variable
377 * is extern so the caller can modify it).  If that fails we just copy
378 * however many bytes was given to realloc() and hope it's not huge.
379 */
380#ifndef lint
381/* 4 should be plenty, -1 =>'s whole list */
382static int     realloc_srchlen = 4;
383#endif /* lint */
384
385memalign_t
386realloc(ptr_t cp, size_t nbytes)
387{
388#ifndef lint
389    U_int onb;
390    union overhead *op;
391    ptr_t res;
392    int i;
393    int     was_alloced = 0;
394
395    if (cp == NULL)
396	return (malloc(nbytes));
397    op = (union overhead *) (((caddr_t) cp) - MEMALIGN(sizeof(union overhead)));
398    if (op->ov_magic == MAGIC) {
399	was_alloced++;
400	i = op->ov_index;
401    }
402    else
403	/*
404	 * Already free, doing "compaction".
405	 *
406	 * Search for the old block of memory on the free list.  First, check the
407	 * most common case (last element free'd), then (this failing) the last
408	 * ``realloc_srchlen'' items free'd. If all lookups fail, then assume
409	 * the size of the memory block being realloc'd is the smallest
410	 * possible.
411	 */
412	if ((i = findbucket(op, 1)) < 0 &&
413	    (i = findbucket(op, realloc_srchlen)) < 0)
414	    i = 0;
415
416    onb = MEMALIGN(nbytes + MEMALIGN(sizeof(union overhead)) + RSLOP);
417
418    /* avoid the copy if same size block */
419    if (was_alloced && (onb <= (U_int) (1 << (i + 3))) &&
420	(onb > (U_int) (1 << (i + 2)))) {
421#ifdef RCHECK
422	/* JMR: formerly this wasn't updated ! */
423	nbytes = MEMALIGN(MEMALIGN(sizeof(union overhead))+nbytes+RSLOP);
424	*((U_int *) (((caddr_t) op) + nbytes - RSLOP)) = RMAGIC;
425	op->ov_rmagic = RMAGIC;
426	op->ov_size = (op->ov_index <= 13) ? (U_short)nbytes - 1 : 0;
427#endif
428	return ((memalign_t) cp);
429    }
430    if ((res = malloc(nbytes)) == NULL)
431	return ((memalign_t) NULL);
432    if (cp != res) {		/* common optimization */
433	/*
434	 * christos: this used to copy nbytes! It should copy the
435	 * smaller of the old and new size
436	 */
437	onb = (1 << (i + 3)) - MEMALIGN(sizeof(union overhead)) - RSLOP;
438	(void) memmove(res, cp, onb < nbytes ? onb : nbytes);
439    }
440    if (was_alloced)
441	free(cp);
442    return ((memalign_t) res);
443#else
444    if (cp && nbytes)
445	return ((memalign_t) 0);
446    else
447	return ((memalign_t) 0);
448#endif /* !lint */
449}
450
451/*
452 * On linux, _nss_nis_setnetgrent() calls this function to determine
453 * the usable size of the pointer passed, but this is not a portable
454 * API, so we cannot use our malloc replacement without providing one.
455 * Thanks a lot glibc!
456 */
457#ifdef __linux__
458#define M_U_S_CONST
459#else
460#define M_U_S_CONST
461#endif
462size_t malloc_usable_size(M_U_S_CONST void *);
463size_t
464malloc_usable_size(M_U_S_CONST void *ptr)
465{
466    const union overhead *op = (const union overhead *)
467	(((const char *) ptr) - MEMALIGN(sizeof(*op)));
468    if (op->ov_magic == MAGIC)
469	    return 1 << (op->ov_index + 3);
470    else
471	    return 0;
472}
473
474
475#ifndef lint
476/*
477 * Search ``srchlen'' elements of each free list for a block whose
478 * header starts at ``freep''.  If srchlen is -1 search the whole list.
479 * Return bucket number, or -1 if not found.
480 */
481static int
482findbucket(union overhead *freep, int srchlen)
483{
484    union overhead *p;
485    size_t i;
486    int j;
487
488    for (i = 0; i < NBUCKETS; i++) {
489	j = 0;
490	for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
491	    if (p == freep)
492		return (i);
493	    j++;
494	}
495    }
496    return (-1);
497}
498
499#endif
500
501
502#else				/* SYSMALLOC */
503
504/**
505 ** ``Protected versions'' of malloc, realloc, calloc, and free
506 **
507 ** On many systems:
508 **
509 ** 1. malloc(0) is bad
510 ** 2. free(0) is bad
511 ** 3. realloc(0, n) is bad
512 ** 4. realloc(n, 0) is bad
513 **
514 ** Also we call our error routine if we run out of memory.
515 **/
516memalign_t
517smalloc(size_t n)
518{
519    ptr_t   ptr;
520
521    n = n ? n : 1;
522
523#ifdef USE_SBRK
524    if (membot == NULL)
525	membot = sbrk(0);
526#endif /* USE_SBRK */
527
528    if ((ptr = malloc(n)) == NULL)
529	out_of_memory();
530#ifndef USE_SBRK
531    if (memtop < ((char *) ptr) + n)
532	memtop = ((char *) ptr) + n;
533    if (membot == NULL)
534	membot = ptr;
535#endif /* !USE_SBRK */
536    return ((memalign_t) ptr);
537}
538
539memalign_t
540srealloc(ptr_t p, size_t n)
541{
542    ptr_t   ptr;
543
544    n = n ? n : 1;
545
546#ifdef USE_SBRK
547    if (membot == NULL)
548	membot = sbrk(0);
549#endif /* USE_SBRK */
550
551    if ((ptr = (p ? realloc(p, n) : malloc(n))) == NULL)
552	out_of_memory();
553#ifndef USE_SBRK
554    if (memtop < ((char *) ptr) + n)
555	memtop = ((char *) ptr) + n;
556    if (membot == NULL)
557	membot = ptr;
558#endif /* !USE_SBRK */
559    return ((memalign_t) ptr);
560}
561
562memalign_t
563scalloc(size_t s, size_t n)
564{
565    ptr_t   ptr;
566
567    n *= s;
568    n = n ? n : 1;
569
570#ifdef USE_SBRK
571    if (membot == NULL)
572	membot = sbrk(0);
573#endif /* USE_SBRK */
574
575    if ((ptr = malloc(n)) == NULL)
576	out_of_memory();
577
578    memset (ptr, 0, n);
579
580#ifndef USE_SBRK
581    if (memtop < ((char *) ptr) + n)
582	memtop = ((char *) ptr) + n;
583    if (membot == NULL)
584	membot = ptr;
585#endif /* !USE_SBRK */
586
587    return ((memalign_t) ptr);
588}
589
590void
591sfree(ptr_t p)
592{
593    if (p && !dont_free)
594	free(p);
595}
596
597#endif /* SYSMALLOC */
598
599/*
600 * mstats - print out statistics about malloc
601 *
602 * Prints two lines of numbers, one showing the length of the free list
603 * for each size category, the second showing the number of mallocs -
604 * frees for each size category.
605 */
606/*ARGSUSED*/
607void
608showall(Char **v, struct command *c)
609{
610#ifndef SYSMALLOC
611    size_t i, j;
612    union overhead *p;
613    int     totfree = 0, totused = 0;
614
615    xprintf(CGETS(19, 8, "%s current memory allocation:\nfree:\t"), progname);
616    for (i = 0; i < NBUCKETS; i++) {
617	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
618	    continue;
619	xprintf(" %4zd", j);
620	totfree += j * (1 << (i + 3));
621    }
622    xprintf("\n%s:\t", CGETS(19, 9, "used"));
623    for (i = 0; i < NBUCKETS; i++) {
624	xprintf(" %4d", nmalloc[i]);
625	totused += nmalloc[i] * (1 << (i + 3));
626    }
627    xprintf(CGETS(19, 10, "\n\tTotal in use: %d, total free: %d\n"),
628	    totused, totfree);
629    xprintf(CGETS(19, 11,
630	    "\tAllocated memory from 0x%lx to 0x%lx.  Real top at 0x%lx\n"),
631	    (unsigned long) membot, (unsigned long) memtop,
632	    (unsigned long) sbrk(0));
633#else /* SYSMALLOC */
634#ifndef HAVE_MALLINFO
635#ifdef USE_SBRK
636    memtop = sbrk(0);
637#endif /* USE_SBRK */
638    xprintf(CGETS(19, 12, "Allocated memory from 0x%lx to 0x%lx (%ld).\n"),
639	    (unsigned long) membot, (unsigned long) memtop,
640	    (unsigned long) (memtop - membot));
641#else /* HAVE_MALLINFO */
642    struct mallinfo mi;
643
644    mi = mallinfo();
645    xprintf(CGETS(19, 13, "%s current memory allocation:\n"), progname);
646    xprintf(CGETS(19, 14, "Total space allocated from system: %d\n"), mi.arena);
647    xprintf(CGETS(19, 15, "Number of non-inuse chunks: %d\n"), mi.ordblks);
648    xprintf(CGETS(19, 16, "Number of mmapped regions: %d\n"), mi.hblks);
649    xprintf(CGETS(19, 17, "Total space in mmapped regions: %d\n"), mi.hblkhd);
650    xprintf(CGETS(19, 18, "Total allocated space: %d\n"), mi.uordblks);
651    xprintf(CGETS(19, 19, "Total non-inuse space: %d\n"), mi.fordblks);
652    xprintf(CGETS(19, 20, "Top-most, releasable space: %d\n"), mi.keepcost);
653#endif /* HAVE_MALLINFO */
654#endif /* SYSMALLOC */
655    USE(c);
656    USE(v);
657}
658