1/*
2 * mem.c - memory management
3 *
4 * This file is part of zsh, the Z shell.
5 *
6 * Copyright (c) 1992-1997 Paul Falstad
7 * All rights reserved.
8 *
9 * Permission is hereby granted, without written agreement and without
10 * license or royalty fees, to use, copy, modify, and distribute this
11 * software and to distribute modified versions of this software for any
12 * purpose, provided that the above copyright notice and the following
13 * two paragraphs appear in all copies of this software.
14 *
15 * In no event shall Paul Falstad or the Zsh Development Group be liable
16 * to any party for direct, indirect, special, incidental, or consequential
17 * damages arising out of the use of this software and its documentation,
18 * even if Paul Falstad and the Zsh Development Group have been advised of
19 * the possibility of such damage.
20 *
21 * Paul Falstad and the Zsh Development Group specifically disclaim any
22 * warranties, including, but not limited to, the implied warranties of
23 * merchantability and fitness for a particular purpose.  The software
24 * provided hereunder is on an "as is" basis, and Paul Falstad and the
25 * Zsh Development Group have no obligation to provide maintenance,
26 * support, updates, enhancements, or modifications.
27 *
28 */
29
30#include "zsh.mdh"
31#include "mem.pro"
32
33/*
34	There are two ways to allocate memory in zsh.  The first way is
35	to call zalloc/zshcalloc, which call malloc/calloc directly.  It
36	is legal to call realloc() or free() on memory allocated this way.
37	The second way is to call zhalloc/hcalloc, which allocates memory
38	from one of the memory pools on the heap stack.  Such memory pools
39	will automatically created when the heap allocation routines are
40	called.  To be sure that they are freed at appropriate times
41	one should call pushheap() before one starts using heaps and
42	popheap() after that (when the memory allocated on the heaps since
43	the last pushheap() isn't needed anymore).
44	pushheap() saves the states of all currently allocated heaps and
45	popheap() resets them to the last state saved and destroys the
46	information about that state.  If you called pushheap() and
47	allocated some memory on the heaps and then come to a place where
48	you don't need the allocated memory anymore but you still want
49	to allocate memory on the heap, you should call freeheap().  This
50	works like popheap(), only that it doesn't free the information
51	about the heap states (i.e. the heaps are like after the call to
52	pushheap() and you have to call popheap some time later).
53
54	Memory allocated in this way does not have to be freed explicitly;
55	it will all be freed when the pool is destroyed.  In fact,
56	attempting to free this memory may result in a core dump.
57
58	If possible, the heaps are allocated using mmap() so that the
59	(*real*) heap isn't filled up with empty zsh heaps. If mmap()
60	is not available and zsh's own allocator is used, we use a simple trick
61	to avoid that: we allocate a large block of memory before allocating
62	a heap pool, this memory is freed again immediately after the pool
63	is allocated. If there are only small blocks on the free list this
64	guarantees that the memory for the pool is at the end of the memory
65	which means that we can give it back to the system when the pool is
66	freed.
67
68	hrealloc(char *p, size_t old, size_t new) is an optimisation
69	with a similar interface to realloc().  Typically the new size
70	will be larger than the old one, since there is no gain in
71	shrinking the allocation (indeed, that will confused hrealloc()
72	since it will forget that the unused space once belonged to this
73	pointer).  However, new == 0 is a special case; then if we
74	had to allocate a special heap for this memory it is freed at
75	that point.
76*/
77
78#if defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP) && defined(HAVE_MUNMAP)
79
80#include <sys/mman.h>
81
82#if defined(MAP_ANONYMOUS) && defined(MAP_PRIVATE)
83
84#define USE_MMAP 1
85#define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE)
86
87#endif
88#endif
89
90#ifdef ZSH_MEM_WARNING
91# ifndef DEBUG
92#  define DEBUG 1
93# endif
94#endif
95
96#if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
97
98static int h_m[1025], h_push, h_pop, h_free;
99
100#endif
101
102/* Make sure we align to the longest fundamental type. */
103union mem_align {
104    zlong l;
105    double d;
106};
107
108#define H_ISIZE  sizeof(union mem_align)
109#define HEAPSIZE (16384 - H_ISIZE)
110/* Memory available for user data in default arena size */
111#define HEAP_ARENA_SIZE (HEAPSIZE - sizeof(struct heap))
112#define HEAPFREE (16384 - H_ISIZE)
113
114/* Memory available for user data in heap h */
115#define ARENA_SIZEOF(h) ((h)->size - sizeof(struct heap))
116
117/* list of zsh heaps */
118
119static Heap heaps;
120
121/* a heap with free space, not always correct (it will be the last heap
122 * if that was newly allocated but it may also be another one) */
123
124static Heap fheap;
125
126/**/
127#ifdef ZSH_HEAP_DEBUG
128/*
129 * The heap ID we'll allocate next.
130 *
131 * We'll avoid using 0 as that means zero-initialised memory
132 * containing a heap ID is (correctly) marked as invalid.
133 */
134static Heapid next_heap_id = (Heapid)1;
135
136/*
137 * The ID of the heap from which we last allocated heap memory.
138 * In theory, since we carefully avoid allocating heap memory during
139 * interrupts, after any call to zhalloc() or wrappers this should
140 * be the ID of the heap containing the memory just returned.
141 */
142/**/
143mod_export Heapid last_heap_id;
144
145/*
146 * Stack of heaps saved by new_heaps().
147 * Assumes old_heaps() will come along and restore it later
148 * (outputs an error if old_heaps() is called out of sequence).
149 */
150LinkList heaps_saved;
151
152/*
153 * Debugging verbosity.  This must be set from a debugger.
154 * An 'or' of bits from the enum heap_debug_verbosity.
155 */
156volatile int heap_debug_verbosity;
157
158/*
159 * Generate a heap identifier that's unique up to unsigned integer wrap.
160 *
161 * For the purposes of debugging we won't bother trying to make a
162 * heap_id globally unique, which would require checking all existing
163 * heaps every time we create an ID and still wouldn't do what we
164 * ideally want, which is to make sure the IDs of valid heaps are
165 * different from the IDs of no-longer-valid heaps.  Given that,
166 * we'll just assume that if we haven't tracked the problem when the
167 * ID wraps we're out of luck.  We could change the type to a long long
168 * if we wanted more room
169 */
170
171static Heapid
172new_heap_id(void)
173{
174    return next_heap_id++;
175}
176
177/**/
178#endif
179
180/* Use new heaps from now on. This returns the old heap-list. */
181
182/**/
183mod_export Heap
184new_heaps(void)
185{
186    Heap h;
187
188    queue_signals();
189    h = heaps;
190
191    fheap = heaps = NULL;
192    unqueue_signals();
193
194#ifdef ZSH_HEAP_DEBUG
195    if (heap_debug_verbosity & HDV_NEW) {
196	fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
197		" saved, new heaps created.\n", h->heap_id);
198    }
199    if (!heaps_saved)
200	heaps_saved = znewlinklist();
201    zpushnode(heaps_saved, h);
202#endif
203    return h;
204}
205
206/* Re-install the old heaps again, freeing the new ones. */
207
208/**/
209mod_export void
210old_heaps(Heap old)
211{
212    Heap h, n;
213
214    queue_signals();
215    for (h = heaps; h; h = n) {
216	n = h->next;
217	DPUTS(h->sp, "BUG: old_heaps() with pushed heaps");
218#ifdef ZSH_HEAP_DEBUG
219	if (heap_debug_verbosity & HDV_FREE) {
220	    fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
221		    "freed in old_heaps().\n", h->heap_id);
222	}
223#endif
224#ifdef USE_MMAP
225	munmap((void *) h, h->size);
226#else
227	zfree(h, HEAPSIZE);
228#endif
229    }
230    heaps = old;
231#ifdef ZSH_HEAP_DEBUG
232    if (heap_debug_verbosity & HDV_OLD) {
233	fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
234		"restored.\n", heaps->heap_id);
235    }
236    {
237	Heap myold = heaps_saved ? getlinknode(heaps_saved) : NULL;
238	if (old != myold)
239	{
240	    fprintf(stderr, "HEAP DEBUG: invalid old heap " HEAPID_FMT
241		    ", expecting " HEAPID_FMT ".\n", old->heap_id,
242		    myold->heap_id);
243	}
244    }
245#endif
246    fheap = NULL;
247    unqueue_signals();
248}
249
250/* Temporarily switch to other heaps (or back again). */
251
252/**/
253mod_export Heap
254switch_heaps(Heap new)
255{
256    Heap h;
257
258    queue_signals();
259    h = heaps;
260
261#ifdef ZSH_HEAP_DEBUG
262    if (heap_debug_verbosity & HDV_SWITCH) {
263	fprintf(stderr, "HEAP DEBUG: heap temporarily switched from "
264		HEAPID_FMT " to " HEAPID_FMT ".\n", h->heap_id, new->heap_id);
265    }
266#endif
267    heaps = new;
268    fheap = NULL;
269    unqueue_signals();
270
271    return h;
272}
273
274/* save states of zsh heaps */
275
276/**/
277mod_export void
278pushheap(void)
279{
280    Heap h;
281    Heapstack hs;
282
283    queue_signals();
284
285#if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
286    h_push++;
287#endif
288
289    for (h = heaps; h; h = h->next) {
290	DPUTS(!h->used, "BUG: empty heap");
291	hs = (Heapstack) zalloc(sizeof(*hs));
292	hs->next = h->sp;
293	h->sp = hs;
294	hs->used = h->used;
295#ifdef ZSH_HEAP_DEBUG
296	hs->heap_id = h->heap_id;
297	h->heap_id = new_heap_id();
298	if (heap_debug_verbosity & HDV_PUSH) {
299	    fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT " pushed, new id is "
300		    HEAPID_FMT ".\n",
301		    hs->heap_id, h->heap_id);
302	}
303#endif
304    }
305    unqueue_signals();
306}
307
308/* reset heaps to previous state */
309
310/**/
311mod_export void
312freeheap(void)
313{
314    Heap h, hn, hl = NULL;
315
316    queue_signals();
317
318#if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
319    h_free++;
320#endif
321
322    /* At this point we used to do:
323    fheap = NULL;
324     *
325     * When pushheap() is called, it sweeps over the entire heaps list of
326     * arenas and marks every one of them with the amount of free space in
327     * that arena at that moment.  zhalloc() is then allowed to grab bits
328     * out of any of those arenas that have free space.
329     *
330     * With the above reset of fheap, the loop below sweeps back over the
331     * entire heap list again, resetting the free space in every arena to
332     * the amount stashed by pushheap() and finding the first arena with
333     * free space to optimize zhalloc()'s next search.  When there's a lot
334     * of stuff already on the heap, this is an enormous amount of work,
335     * and performance goes to hell.
336     *
337     * However, there doesn't seem to be any reason to reset fheap before
338     * beginning this loop.  Either it's already correct, or it has never
339     * been set and this loop will do it, or it'll be reset from scratch
340     * on the next popheap().  So all that's needed here is to pick up
341     * the scan wherever the last pass [or the last popheap()] left off.
342     */
343    for (h = (fheap ? fheap : heaps); h; h = hn) {
344	hn = h->next;
345	if (h->sp) {
346#ifdef ZSH_MEM_DEBUG
347	    memset(arena(h) + h->sp->used, 0xff, h->used - h->sp->used);
348#endif
349	    h->used = h->sp->used;
350	    if (!fheap && h->used < ARENA_SIZEOF(h))
351		fheap = h;
352	    hl = h;
353#ifdef ZSH_HEAP_DEBUG
354	    /*
355	     * As the free makes the heap invalid, give it a new
356	     * identifier.  We're not popping it, so don't use
357	     * the one in the heap stack.
358	     */
359	    {
360		Heapid new_id = new_heap_id();
361		if (heap_debug_verbosity & HDV_FREE) {
362		    fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
363			    " freed, new id is " HEAPID_FMT ".\n",
364			    h->heap_id, new_id);
365		}
366		h->heap_id = new_id;
367	    }
368#endif
369	} else {
370#ifdef USE_MMAP
371	    munmap((void *) h, h->size);
372#else
373	    zfree(h, HEAPSIZE);
374#endif
375	}
376    }
377    if (hl)
378	hl->next = NULL;
379    else
380	heaps = fheap = NULL;
381
382    unqueue_signals();
383}
384
385/* reset heap to previous state and destroy state information */
386
387/**/
388mod_export void
389popheap(void)
390{
391    Heap h, hn, hl = NULL;
392    Heapstack hs;
393
394    queue_signals();
395
396#if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
397    h_pop++;
398#endif
399
400    fheap = NULL;
401    for (h = heaps; h; h = hn) {
402	hn = h->next;
403	if ((hs = h->sp)) {
404	    h->sp = hs->next;
405#ifdef ZSH_MEM_DEBUG
406	    memset(arena(h) + hs->used, 0xff, h->used - hs->used);
407#endif
408	    h->used = hs->used;
409#ifdef ZSH_HEAP_DEBUG
410	    if (heap_debug_verbosity & HDV_POP) {
411		fprintf(stderr, "HEAP DEBUG: heap " HEAPID_FMT
412			" popped, old heap was " HEAPID_FMT ".\n",
413			h->heap_id, hs->heap_id);
414	    }
415	    h->heap_id = hs->heap_id;
416#endif
417	    if (!fheap && h->used < ARENA_SIZEOF(h))
418		fheap = h;
419	    zfree(hs, sizeof(*hs));
420
421	    hl = h;
422	} else {
423#ifdef USE_MMAP
424	    munmap((void *) h, h->size);
425#else
426	    zfree(h, HEAPSIZE);
427#endif
428	}
429    }
430    if (hl)
431	hl->next = NULL;
432    else
433	heaps = NULL;
434
435    unqueue_signals();
436}
437
438#ifdef USE_MMAP
439/*
440 * Utility function to allocate a heap area of at least *n bytes.
441 * *n will be rounded up to the next page boundary.
442 */
443static Heap
444mmap_heap_alloc(size_t *n)
445{
446    Heap h;
447    static size_t pgsz = 0;
448
449    if (!pgsz) {
450
451#ifdef _SC_PAGESIZE
452	pgsz = sysconf(_SC_PAGESIZE);     /* SVR4 */
453#else
454# ifdef _SC_PAGE_SIZE
455	pgsz = sysconf(_SC_PAGE_SIZE);    /* HPUX */
456# else
457	pgsz = getpagesize();
458# endif
459#endif
460
461	pgsz--;
462    }
463    *n = (*n + pgsz) & ~pgsz;
464    h = (Heap) mmap(NULL, *n, PROT_READ | PROT_WRITE,
465		    MMAP_FLAGS, -1, 0);
466    if (h == ((Heap) -1)) {
467	zerr("fatal error: out of heap memory");
468	exit(1);
469    }
470
471    return h;
472}
473#endif
474
475/* check whether a pointer is within a memory pool */
476
477/**/
478mod_export void *
479zheapptr(void *p)
480{
481    Heap h;
482    queue_signals();
483    for (h = heaps; h; h = h->next)
484	if ((char *)p >= arena(h) &&
485	    (char *)p + H_ISIZE < arena(h) + ARENA_SIZEOF(h))
486	    break;
487    unqueue_signals();
488    return (h ? p : 0);
489}
490
491/* allocate memory from the current memory pool */
492
493/**/
494mod_export void *
495zhalloc(size_t size)
496{
497    Heap h;
498    size_t n;
499
500    size = (size + H_ISIZE - 1) & ~(H_ISIZE - 1);
501
502    queue_signals();
503
504#if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
505    h_m[size < (1024 * H_ISIZE) ? (size / H_ISIZE) : 1024]++;
506#endif
507
508    /* find a heap with enough free space */
509
510    for (h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used))
511	      ? fheap : heaps);
512	 h; h = h->next) {
513	if (ARENA_SIZEOF(h) >= (n = size + h->used)) {
514	    void *ret;
515
516	    h->used = n;
517	    ret = arena(h) + n - size;
518	    unqueue_signals();
519#ifdef ZSH_HEAP_DEBUG
520	    last_heap_id = h->heap_id;
521	    if (heap_debug_verbosity & HDV_ALLOC) {
522		fprintf(stderr, "HEAP DEBUG: allocated memory from heap "
523			HEAPID_FMT ".\n", h->heap_id);
524	    }
525#endif
526	    return ret;
527	}
528    }
529    {
530	Heap hp;
531        /* not found, allocate new heap */
532#if defined(ZSH_MEM) && !defined(USE_MMAP)
533	static int called = 0;
534	void *foo = called ? (void *)malloc(HEAPFREE) : NULL;
535            /* tricky, see above */
536#endif
537
538	n = HEAP_ARENA_SIZE > size ? HEAPSIZE : size + sizeof(*h);
539	for (hp = NULL, h = heaps; h; hp = h, h = h->next);
540
541#ifdef USE_MMAP
542	h = mmap_heap_alloc(&n);
543#else
544	h = (Heap) zalloc(n);
545#endif
546
547#if defined(ZSH_MEM) && !defined(USE_MMAP)
548	if (called)
549	    zfree(foo, HEAPFREE);
550	called = 1;
551#endif
552
553	h->size = n;
554	h->used = size;
555	h->next = NULL;
556	h->sp = NULL;
557#ifdef ZSH_HEAP_DEBUG
558	h->heap_id = new_heap_id();
559	if (heap_debug_verbosity & HDV_CREATE) {
560	    fprintf(stderr, "HEAP DEBUG: create new heap " HEAPID_FMT ".\n",
561		    h->heap_id);
562	}
563#endif
564
565	if (hp)
566	    hp->next = h;
567	else
568	    heaps = h;
569	fheap = h;
570
571	unqueue_signals();
572#ifdef ZSH_HEAP_DEBUG
573	last_heap_id = h->heap_id;
574	if (heap_debug_verbosity & HDV_ALLOC) {
575	    fprintf(stderr, "HEAP DEBUG: allocated memory from heap "
576		    HEAPID_FMT ".\n", h->heap_id);
577	}
578#endif
579	return arena(h);
580    }
581}
582
583/**/
584mod_export void *
585hrealloc(char *p, size_t old, size_t new)
586{
587    Heap h, ph;
588
589    old = (old + H_ISIZE - 1) & ~(H_ISIZE - 1);
590    new = (new + H_ISIZE - 1) & ~(H_ISIZE - 1);
591
592    if (old == new)
593	return p;
594    if (!old && !p)
595	return zhalloc(new);
596
597    /* find the heap with p */
598
599    queue_signals();
600    for (h = heaps, ph = NULL; h; ph = h, h = h->next)
601	if (p >= arena(h) && p < arena(h) + ARENA_SIZEOF(h))
602	    break;
603
604    DPUTS(!h, "BUG: hrealloc() called for non-heap memory.");
605    DPUTS(h->sp && arena(h) + h->sp->used > p,
606	  "BUG: hrealloc() wants to realloc pushed memory");
607
608    /*
609     * If the end of the old chunk is before the used pointer,
610     * more memory has been zhalloc'ed afterwards.
611     * We can't tell if that's still in use, obviously, since
612     * that's the whole point of heap memory.
613     * We have no choice other than to grab some more memory
614     * somewhere else and copy in the old stuff.
615     */
616    if (p + old < arena(h) + h->used) {
617	if (new > old) {
618	    char *ptr = (char *) zhalloc(new);
619	    memcpy(ptr, p, old);
620#ifdef ZSH_MEM_DEBUG
621	    memset(p, 0xff, old);
622#endif
623	    unqueue_signals();
624	    return ptr;
625	} else {
626	    unqueue_signals();
627	    return new ? p : NULL;
628	}
629    }
630
631    DPUTS(p + old != arena(h) + h->used, "BUG: hrealloc more than allocated");
632
633    /*
634     * We now know there's nothing afterwards in the heap, now see if
635     * there's nothing before.  Then we can reallocate the whole thing.
636     * Otherwise, we need to keep the stuff at the start of the heap,
637     * then allocate a new one too; this is handled below.  (This will
638     * guarantee we occupy a full heap next time round, provided we
639     * don't use the heap for anything else.)
640     */
641    if (p == arena(h)) {
642#ifdef ZSH_HEAP_DEBUG
643	Heapid heap_id = h->heap_id;
644#endif
645	/*
646	 * Zero new seems to be a special case saying we've finished
647	 * with the specially reallocated memory, see scanner() in glob.c.
648	 */
649	if (!new) {
650	    if (ph)
651		ph->next = h->next;
652	    else
653		heaps = h->next;
654	    fheap = NULL;
655#ifdef USE_MMAP
656	    munmap((void *) h, h->size);
657#else
658	    zfree(h, HEAPSIZE);
659#endif
660	    unqueue_signals();
661	    return NULL;
662	}
663	if (new > ARENA_SIZEOF(h)) {
664	    /*
665	     * Not enough memory in this heap.  Allocate a new
666	     * one of sufficient size.
667	     *
668	     * To avoid this happening too often, allocate
669	     * chunks in multiples of HEAPSIZE.
670	     * (Historical note:  there didn't used to be any
671	     * point in this since we didn't consistently record
672	     * the allocated size of the heap, but now we do.)
673	     */
674	    size_t n = (new + sizeof(*h) + HEAPSIZE);
675	    n -= n % HEAPSIZE;
676	    fheap = NULL;
677
678#ifdef USE_MMAP
679	    {
680		/*
681		 * I don't know any easy portable way of requesting
682		 * a mmap'd segment be extended, so simply allocate
683		 * a new one and copy.
684		 */
685		Heap hnew;
686
687		hnew = mmap_heap_alloc(&n);
688		/* Copy the entire heap, header (with next pointer) included */
689		memcpy(hnew, h, h->size);
690		munmap((void *)h, h->size);
691		h = hnew;
692	    }
693#else
694	    h = (Heap) realloc(h, n);
695#endif
696
697	    h->size = n;
698	    if (ph)
699		ph->next = h;
700	    else
701		heaps = h;
702	}
703	h->used = new;
704#ifdef ZSH_HEAP_DEBUG
705	h->heap_id = heap_id;
706#endif
707	unqueue_signals();
708	return arena(h);
709    }
710#ifndef USE_MMAP
711    DPUTS(h->used > ARENA_SIZEOF(h), "BUG: hrealloc at invalid address");
712#endif
713    if (h->used + (new - old) <= ARENA_SIZEOF(h)) {
714	h->used += new - old;
715	unqueue_signals();
716	return p;
717    } else {
718	char *t = zhalloc(new);
719	memcpy(t, p, old > new ? new : old);
720	h->used -= old;
721#ifdef ZSH_MEM_DEBUG
722	memset(p, 0xff, old);
723#endif
724	unqueue_signals();
725	return t;
726    }
727}
728
729/**/
730#ifdef ZSH_HEAP_DEBUG
731/*
732 * Check if heap_id is the identifier of a currently valid heap,
733 * including any heap buried on the stack, or of permanent memory.
734 * Return 0 if so, else 1.
735 *
736 * This gets confused by use of switch_heaps().  That's because so do I.
737 */
738
739/**/
740mod_export int
741memory_validate(Heapid heap_id)
742{
743    Heap h;
744    Heapstack hs;
745    LinkNode node;
746
747    if (heap_id == HEAPID_PERMANENT)
748	return 0;
749
750    queue_signals();
751    for (h = heaps; h; h = h->next) {
752	if (h->heap_id == heap_id)
753	    return 0;
754	for (hs = heaps->sp; hs; hs = hs->next) {
755	    if (hs->heap_id == heap_id)
756		return 0;
757	}
758    }
759
760    if (heaps_saved) {
761	for (node = firstnode(heaps_saved); node; incnode(node)) {
762	    for (h = (Heap)getdata(node); h; h = h->next) {
763		if (h->heap_id == heap_id)
764		    return 0;
765		for (hs = heaps->sp; hs; hs = hs->next) {
766		    if (hs->heap_id == heap_id)
767			return 0;
768		}
769	    }
770	}
771    }
772
773    return 1;
774}
775/**/
776#endif
777
778/* allocate memory from the current memory pool and clear it */
779
780/**/
781mod_export void *
782hcalloc(size_t size)
783{
784    void *ptr;
785
786    ptr = zhalloc(size);
787    memset(ptr, 0, size);
788    return ptr;
789}
790
791/* allocate permanent memory */
792
793/**/
794mod_export void *
795zalloc(size_t size)
796{
797    void *ptr;
798
799    if (!size)
800	size = 1;
801    queue_signals();
802    if (!(ptr = (void *) malloc(size))) {
803	zerr("fatal error: out of memory");
804	exit(1);
805    }
806    unqueue_signals();
807
808    return ptr;
809}
810
811/**/
812mod_export void *
813zshcalloc(size_t size)
814{
815    void *ptr;
816
817    if (!size)
818	size = 1;
819    queue_signals();
820    if (!(ptr = (void *) malloc(size))) {
821	zerr("fatal error: out of memory");
822	exit(1);
823    }
824    unqueue_signals();
825    memset(ptr, 0, size);
826
827    return ptr;
828}
829
830/* This front-end to realloc is used to make sure we have a realloc *
831 * that conforms to POSIX realloc.  Older realloc's can fail if     *
832 * passed a NULL pointer, but POSIX realloc should handle this.  A  *
833 * better solution would be for configure to check if realloc is    *
834 * POSIX compliant, but I'm not sure how to do that.                */
835
836/**/
837mod_export void *
838zrealloc(void *ptr, size_t size)
839{
840    queue_signals();
841    if (ptr) {
842	if (size) {
843	    /* Do normal realloc */
844	    if (!(ptr = (void *) realloc(ptr, size))) {
845		zerr("fatal error: out of memory");
846		exit(1);
847	    }
848	    unqueue_signals();
849	    return ptr;
850	}
851	else
852	    /* If ptr is not NULL, but size is zero, *
853	     * then object pointed to is freed.      */
854	    free(ptr);
855
856	ptr = NULL;
857    } else {
858	/* If ptr is NULL, then behave like malloc */
859	ptr = malloc(size);
860    }
861    unqueue_signals();
862
863    return ptr;
864}
865
866/**/
867#ifdef ZSH_MEM
868
869/*
870   Below is a simple segment oriented memory allocator for systems on
871   which it is better than the system's one. Memory is given in blocks
872   aligned to an integer multiple of sizeof(union mem_align), which will
873   probably be 64-bit as it is the longer of zlong or double. Each block is
874   preceded by a header which contains the length of the data part (in
875   bytes). In allocated blocks only this field of the structure m_hdr is
876   senseful. In free blocks the second field (next) is a pointer to the next
877   free segment on the free list.
878
879   On top of this simple allocator there is a second allocator for small
880   chunks of data. It should be both faster and less space-consuming than
881   using the normal segment mechanism for such blocks.
882   For the first M_NSMALL-1 possible sizes memory is allocated in arrays
883   that can hold M_SNUM blocks. Each array is stored in one segment of the
884   main allocator. In these segments the third field of the header structure
885   (free) contains a pointer to the first free block in the array. The
886   last field (used) gives the number of already used blocks in the array.
887
888   If the macro name ZSH_MEM_DEBUG is defined, some information about the memory
889   usage is stored. This information can than be viewed by calling the
890   builtin `mem' (which is only available if ZSH_MEM_DEBUG is set).
891
892   If ZSH_MEM_WARNING is defined, error messages are printed in case of errors.
893
894   If ZSH_SECURE_FREE is defined, free() checks if the given address is really
895   one that was returned by malloc(), it ignores it if it wasn't (printing
896   an error message if ZSH_MEM_WARNING is also defined).
897*/
898#if !defined(__hpux) && !defined(DGUX) && !defined(__osf__)
899# if defined(_BSD)
900#  ifndef HAVE_BRK_PROTO
901   extern int brk _((caddr_t));
902#  endif
903#  ifndef HAVE_SBRK_PROTO
904   extern caddr_t sbrk _((int));
905#  endif
906# else
907#  ifndef HAVE_BRK_PROTO
908   extern int brk _((void *));
909#  endif
910#  ifndef HAVE_SBRK_PROTO
911   extern void *sbrk _((int));
912#  endif
913# endif
914#endif
915
916#if defined(_BSD) && !defined(STDC_HEADERS)
917# define FREE_RET_T   int
918# define FREE_ARG_T   char *
919# define FREE_DO_RET
920# define MALLOC_RET_T char *
921# define MALLOC_ARG_T size_t
922#else
923# define FREE_RET_T   void
924# define FREE_ARG_T   void *
925# define MALLOC_RET_T void *
926# define MALLOC_ARG_T size_t
927#endif
928
929/* structure for building free list in blocks holding small blocks */
930
931struct m_shdr {
932    struct m_shdr *next;	/* next one on free list */
933#ifdef PAD_64_BIT
934    /* dummy to make this 64-bit aligned */
935    struct m_shdr *dummy;
936#endif
937};
938
939struct m_hdr {
940    zlong len;			/* length of memory block */
941#if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
942    /* either 1 or 2 zlong's, whichever makes up 64 bits. */
943    zlong dummy1;
944#endif
945    struct m_hdr *next;		/* if free: next on free list
946				   if block of small blocks: next one with
947				                 small blocks of same size*/
948    struct m_shdr *free;	/* if block of small blocks: free list */
949    zlong used;			/* if block of small blocks: number of used
950				                                     blocks */
951#if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
952    zlong dummy2;
953#endif
954};
955
956
957/* alignment for memory blocks */
958
959#define M_ALIGN (sizeof(union mem_align))
960
961/* length of memory header, length of first field of memory header and
962   minimal size of a block left free (if we allocate memory and take a
963   block from the free list that is larger than needed, it must have at
964   least M_MIN extra bytes to be splitted; if it has, the rest is put on
965   the free list) */
966
967#define M_HSIZE (sizeof(struct m_hdr))
968#if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
969# define M_ISIZE (2*sizeof(zlong))
970#else
971# define M_ISIZE (sizeof(zlong))
972#endif
973#define M_MIN   (2 * M_ISIZE)
974
975/* M_FREE  is the number of bytes that have to be free before memory is
976 *         given back to the system
977 * M_KEEP  is the number of bytes that will be kept when memory is given
978 *         back; note that this has to be less than M_FREE
979 * M_ALLOC is the number of extra bytes to request from the system */
980
981#define M_FREE  32768
982#define M_KEEP  16384
983#define M_ALLOC M_KEEP
984
985/* a pointer to the last free block, a pointer to the free list (the blocks
986   on this list are kept in order - lowest address first) */
987
988static struct m_hdr *m_lfree, *m_free;
989
990/* system's pagesize */
991
992static long m_pgsz = 0;
993
994/* the highest and the lowest valid memory addresses, kept for fast validity
995   checks in free() and to find out if and when we can give memory back to
996   the system */
997
998static char *m_high, *m_low;
999
1000/* Management of blocks for small blocks:
1001   Such blocks are kept in lists (one list for each of the sizes that are
1002   allocated in such blocks).  The lists are stored in the m_small array.
1003   M_SIDX() calculates the index into this array for a given size.  M_SNUM
1004   is the size (in small blocks) of such blocks.  M_SLEN() calculates the
1005   size of the small blocks held in a memory block, given a pointer to the
1006   header of it.  M_SBLEN() gives the size of a memory block that can hold
1007   an array of small blocks, given the size of these small blocks.  M_BSLEN()
1008   calculates the size of the small blocks held in a memory block, given the
1009   length of that block (including the header of the memory block.  M_NSMALL
1010   is the number of possible block sizes that small blocks should be used
1011   for. */
1012
1013
1014#define M_SIDX(S)  ((S) / M_ISIZE)
1015#define M_SNUM     128
1016#define M_SLEN(M)  ((M)->len / M_SNUM)
1017#if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
1018/* Include the dummy in the alignment */
1019#define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) +  \
1020		    2*sizeof(zlong) + sizeof(struct m_hdr *))
1021#define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) -  \
1022		     2*sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
1023#else
1024#define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) +  \
1025		    sizeof(zlong) + sizeof(struct m_hdr *))
1026#define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) -  \
1027		     sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
1028#endif
1029#define M_NSMALL    8
1030
1031static struct m_hdr *m_small[M_NSMALL];
1032
1033#ifdef ZSH_MEM_DEBUG
1034
1035static int m_s = 0, m_b = 0;
1036static int m_m[1025], m_f[1025];
1037
1038static struct m_hdr *m_l;
1039
1040#endif /* ZSH_MEM_DEBUG */
1041
1042MALLOC_RET_T
1043malloc(MALLOC_ARG_T size)
1044{
1045    struct m_hdr *m, *mp, *mt;
1046    long n, s, os = 0;
1047#ifndef USE_MMAP
1048    struct heap *h, *hp, *hf = NULL, *hfp = NULL;
1049#endif
1050
1051    /* some systems want malloc to return the highest valid address plus one
1052       if it is called with an argument of zero.
1053
1054       TODO: really?  Suppose we allocate more memory, so
1055       that this is now in bounds, then a more rational application
1056       that thinks it can free() anything it malloc'ed, even
1057       of zero length, calls free for it?  Aren't we in big
1058       trouble?  Wouldn't it be safer just to allocate some
1059       memory anyway?
1060
1061       If the above comment is really correct, then at least
1062       we need to check in free() if we're freeing memory
1063       at m_high.
1064    */
1065
1066    if (!size)
1067#if 1
1068	size = 1;
1069#else
1070	return (MALLOC_RET_T) m_high;
1071#endif
1072
1073    queue_signals();  /* just queue signals rather than handling them */
1074
1075    /* first call, get page size */
1076
1077    if (!m_pgsz) {
1078
1079#ifdef _SC_PAGESIZE
1080	m_pgsz = sysconf(_SC_PAGESIZE);     /* SVR4 */
1081#else
1082# ifdef _SC_PAGE_SIZE
1083	m_pgsz = sysconf(_SC_PAGE_SIZE);    /* HPUX */
1084# else
1085	m_pgsz = getpagesize();
1086# endif
1087#endif
1088
1089	m_free = m_lfree = NULL;
1090    }
1091    size = (size + M_ALIGN - 1) & ~(M_ALIGN - 1);
1092
1093    /* Do we need a small block? */
1094
1095    if ((s = M_SIDX(size)) && s < M_NSMALL) {
1096	/* yep, find a memory block with free small blocks of the
1097	   appropriate size (if we find it in this list, this means that
1098	   it has room for at least one more small block) */
1099	for (mp = NULL, m = m_small[s]; m && !m->free; mp = m, m = m->next);
1100
1101	if (m) {
1102	    /* we found one */
1103	    struct m_shdr *sh = m->free;
1104
1105	    m->free = sh->next;
1106	    m->used++;
1107
1108	    /* if all small blocks in this block are allocated, the block is
1109	       put at the end of the list blocks with small blocks of this
1110	       size (i.e., we try to keep blocks with free blocks at the
1111	       beginning of the list, to make the search faster) */
1112
1113	    if (m->used == M_SNUM && m->next) {
1114		for (mt = m; mt->next; mt = mt->next);
1115
1116		mt->next = m;
1117		if (mp)
1118		    mp->next = m->next;
1119		else
1120		    m_small[s] = m->next;
1121		m->next = NULL;
1122	    }
1123#ifdef ZSH_MEM_DEBUG
1124	    m_m[size / M_ISIZE]++;
1125#endif
1126
1127	    unqueue_signals();
1128	    return (MALLOC_RET_T) sh;
1129	}
1130	/* we still want a small block but there were no block with a free
1131	   small block of the requested size; so we use the real allocation
1132	   routine to allocate a block for small blocks of this size */
1133	os = size;
1134	size = M_SBLEN(size);
1135    } else
1136	s = 0;
1137
1138    /* search the free list for an block of at least the requested size */
1139    for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
1140
1141#ifndef USE_MMAP
1142
1143    /* if there is an empty zsh heap at a lower address we steal it and take
1144       the memory from it, putting the rest on the free list (remember
1145       that the blocks on the free list are ordered) */
1146
1147    for (hp = NULL, h = heaps; h; hp = h, h = h->next)
1148	if (!h->used &&
1149	    (!hf || h < hf) &&
1150	    (!m || ((char *)m) > ((char *)h)))
1151	    hf = h, hfp = hp;
1152
1153    if (hf) {
1154	/* we found such a heap */
1155	Heapstack hso, hsn;
1156
1157	/* delete structures on the list holding the heap states */
1158	for (hso = hf->sp; hso; hso = hsn) {
1159	    hsn = hso->next;
1160	    zfree(hso, sizeof(*hso));
1161	}
1162	/* take it from the list of heaps */
1163	if (hfp)
1164	    hfp->next = hf->next;
1165	else
1166	    heaps = hf->next;
1167	/* now we simply free it and than search the free list again */
1168	zfree(hf, HEAPSIZE);
1169
1170	for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
1171    }
1172#endif
1173    if (!m) {
1174	long nal;
1175	/* no matching free block was found, we have to request new
1176	   memory from the system */
1177	n = (size + M_HSIZE + M_ALLOC + m_pgsz - 1) & ~(m_pgsz - 1);
1178
1179	if (((char *)(m = (struct m_hdr *)sbrk(n))) == ((char *)-1)) {
1180	    DPUTS1(1, "MEM: allocation error at sbrk, size %L.", n);
1181	    unqueue_signals();
1182	    return NULL;
1183	}
1184	if ((nal = ((long)(char *)m) & (M_ALIGN-1))) {
1185	    if ((char *)sbrk(M_ALIGN - nal) == (char *)-1) {
1186		DPUTS(1, "MEM: allocation error at sbrk.");
1187		unqueue_signals();
1188		return NULL;
1189	    }
1190	    m = (struct m_hdr *) ((char *)m + (M_ALIGN - nal));
1191	}
1192	/* set m_low, for the check in free() */
1193	if (!m_low)
1194	    m_low = (char *)m;
1195
1196#ifdef ZSH_MEM_DEBUG
1197	m_s += n;
1198
1199	if (!m_l)
1200	    m_l = m;
1201#endif
1202
1203	/* save new highest address */
1204	m_high = ((char *)m) + n;
1205
1206	/* initialize header */
1207	m->len = n - M_ISIZE;
1208	m->next = NULL;
1209
1210	/* put it on the free list and set m_lfree pointing to it */
1211	if ((mp = m_lfree))
1212	    m_lfree->next = m;
1213	m_lfree = m;
1214    }
1215    if ((n = m->len - size) > M_MIN) {
1216	/* the block we want to use has more than M_MIN bytes plus the
1217	   number of bytes that were requested; we split it in two and
1218	   leave the rest on the free list */
1219	struct m_hdr *mtt = (struct m_hdr *)(((char *)m) + M_ISIZE + size);
1220
1221	mtt->len = n - M_ISIZE;
1222	mtt->next = m->next;
1223
1224	m->len = size;
1225
1226	/* put the rest on the list */
1227	if (m_lfree == m)
1228	    m_lfree = mtt;
1229
1230	if (mp)
1231	    mp->next = mtt;
1232	else
1233	    m_free = mtt;
1234    } else if (mp) {
1235	/* the block we found wasn't the first one on the free list */
1236	if (m == m_lfree)
1237	    m_lfree = mp;
1238	mp->next = m->next;
1239    } else {
1240	/* it was the first one */
1241	m_free = m->next;
1242	if (m == m_lfree)
1243	    m_lfree = m_free;
1244    }
1245
1246    if (s) {
1247	/* we are allocating a block that should hold small blocks */
1248	struct m_shdr *sh, *shn;
1249
1250	/* build the free list in this block and set `used' filed */
1251	m->free = sh = (struct m_shdr *)(((char *)m) +
1252					 sizeof(struct m_hdr) + os);
1253
1254	for (n = M_SNUM - 2; n--; sh = shn)
1255	    shn = sh->next = sh + s;
1256	sh->next = NULL;
1257
1258	m->used = 1;
1259
1260	/* put the block on the list of blocks holding small blocks if
1261	   this size */
1262	m->next = m_small[s];
1263	m_small[s] = m;
1264
1265#ifdef ZSH_MEM_DEBUG
1266	m_m[os / M_ISIZE]++;
1267#endif
1268
1269	unqueue_signals();
1270	return (MALLOC_RET_T) (((char *)m) + sizeof(struct m_hdr));
1271    }
1272#ifdef ZSH_MEM_DEBUG
1273    m_m[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1274#endif
1275
1276    unqueue_signals();
1277    return (MALLOC_RET_T) & m->next;
1278}
1279
1280/* this is an internal free(); the second argument may, but need not hold
1281   the size of the block the first argument is pointing to; if it is the
1282   right size of this block, freeing it will be faster, though; the value
1283   0 for this parameter means: `don't know' */
1284
1285/**/
1286mod_export void
1287zfree(void *p, int sz)
1288{
1289    struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt = NULL;
1290    int i;
1291# ifdef DEBUG
1292    int osz = sz;
1293# endif
1294
1295#ifdef ZSH_SECURE_FREE
1296    sz = 0;
1297#else
1298    sz = (sz + M_ALIGN - 1) & ~(M_ALIGN - 1);
1299#endif
1300
1301    if (!p)
1302	return;
1303
1304    /* first a simple check if the given address is valid */
1305    if (((char *)p) < m_low || ((char *)p) > m_high ||
1306	((long)p) & (M_ALIGN - 1)) {
1307	DPUTS(1, "BUG: attempt to free storage at invalid address");
1308	return;
1309    }
1310
1311    queue_signals();
1312
1313  fr_rec:
1314
1315    if ((i = sz / M_ISIZE) < M_NSMALL || !sz)
1316	/* if the given sizes says that it is a small block, find the
1317	   memory block holding it; we search all blocks with blocks
1318	   of at least the given size; if the size parameter is zero,
1319	   this means, that all blocks are searched */
1320	for (; i < M_NSMALL; i++) {
1321	    for (mp = NULL, mt = m_small[i];
1322		 mt && (((char *)mt) > ((char *)p) ||
1323			(((char *)mt) + mt->len) < ((char *)p));
1324		 mp = mt, mt = mt->next);
1325
1326	    if (mt) {
1327		/* we found the block holding the small block */
1328		struct m_shdr *sh = (struct m_shdr *)p;
1329
1330#ifdef ZSH_SECURE_FREE
1331		struct m_shdr *sh2;
1332
1333		/* check if the given address is equal to the address of
1334		   the first small block plus an integer multiple of the
1335		   block size */
1336		if ((((char *)p) - (((char *)mt) + sizeof(struct m_hdr))) %
1337		    M_BSLEN(mt->len)) {
1338
1339		    DPUTS(1, "BUG: attempt to free storage at invalid address");
1340		    unqueue_signals();
1341		    return;
1342		}
1343		/* check, if the address is on the (block-intern) free list */
1344		for (sh2 = mt->free; sh2; sh2 = sh2->next)
1345		    if (((char *)p) == ((char *)sh2)) {
1346
1347			DPUTS(1, "BUG: attempt to free already free storage");
1348			unqueue_signals();
1349			return;
1350		    }
1351#endif
1352		DPUTS(M_BSLEN(mt->len) < osz,
1353		      "BUG: attempt to free more than allocated.");
1354
1355#ifdef ZSH_MEM_DEBUG
1356		m_f[M_BSLEN(mt->len) / M_ISIZE]++;
1357		memset(sh, 0xff, M_BSLEN(mt->len));
1358#endif
1359
1360		/* put the block onto the free list */
1361		sh->next = mt->free;
1362		mt->free = sh;
1363
1364		if (--mt->used) {
1365		    /* if there are still used blocks in this block, we
1366		       put it at the beginning of the list with blocks
1367		       holding small blocks of the same size (since we
1368		       know that there is at least one free block in it,
1369		       this will make allocation of small blocks faster;
1370		       it also guarantees that long living memory blocks
1371		       are preferred over younger ones */
1372		    if (mp) {
1373			mp->next = mt->next;
1374			mt->next = m_small[i];
1375			m_small[i] = mt;
1376		    }
1377		    unqueue_signals();
1378		    return;
1379		}
1380		/* if there are no more used small blocks in this
1381		   block, we free the whole block */
1382		if (mp)
1383		    mp->next = mt->next;
1384		else
1385		    m_small[i] = mt->next;
1386
1387		m = mt;
1388		p = (void *) & m->next;
1389
1390		break;
1391	    } else if (sz) {
1392		/* if we didn't find a block and a size was given, try it
1393		   again as if no size were given */
1394		sz = 0;
1395		goto fr_rec;
1396	    }
1397	}
1398#ifdef ZSH_MEM_DEBUG
1399    if (!mt)
1400	m_f[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1401#endif
1402
1403#ifdef ZSH_SECURE_FREE
1404    /* search all memory blocks, if one of them is at the given address */
1405    for (mt = (struct m_hdr *)m_low;
1406	 ((char *)mt) < m_high;
1407	 mt = (struct m_hdr *)(((char *)mt) + M_ISIZE + mt->len))
1408	if (((char *)p) == ((char *)&mt->next))
1409	    break;
1410
1411    /* no block was found at the given address */
1412    if (((char *)mt) >= m_high) {
1413	DPUTS(1, "BUG: attempt to free storage at invalid address");
1414	unqueue_signals();
1415	return;
1416    }
1417#endif
1418
1419    /* see if the block is on the free list */
1420    for (mp = NULL, mt = m_free; mt && mt < m; mp = mt, mt = mt->next);
1421
1422    if (m == mt) {
1423	/* it is, ouch! */
1424	DPUTS(1, "BUG: attempt to free already free storage");
1425	unqueue_signals();
1426	return;
1427    }
1428    DPUTS(m->len < osz, "BUG: attempt to free more than allocated");
1429#ifdef ZSH_MEM_DEBUG
1430    memset(p, 0xff, m->len);
1431#endif
1432    if (mt && ((char *)mt) == (((char *)m) + M_ISIZE + m->len)) {
1433	/* the block after the one we are freeing is free, we put them
1434	   together */
1435	m->len += mt->len + M_ISIZE;
1436	m->next = mt->next;
1437
1438	if (mt == m_lfree)
1439	    m_lfree = m;
1440    } else
1441	m->next = mt;
1442
1443    if (mp && ((char *)m) == (((char *)mp) + M_ISIZE + mp->len)) {
1444	/* the block before the one we are freeing is free, we put them
1445	   together */
1446	mp->len += m->len + M_ISIZE;
1447	mp->next = m->next;
1448
1449	if (m == m_lfree)
1450	    m_lfree = mp;
1451    } else if (mp)
1452	/* otherwise, we just put it on the free list */
1453	mp->next = m;
1454    else {
1455	m_free = m;
1456	if (!m_lfree)
1457	    m_lfree = m_free;
1458    }
1459
1460    /* if the block we have just freed was at the end of the process heap
1461       and now there is more than one page size of memory, we can give
1462       it back to the system (and we do it ;-) */
1463    if ((((char *)m_lfree) + M_ISIZE + m_lfree->len) == m_high &&
1464	m_lfree->len >= m_pgsz + M_MIN + M_FREE) {
1465	long n = (m_lfree->len - M_MIN - M_KEEP) & ~(m_pgsz - 1);
1466
1467	m_lfree->len -= n;
1468#ifdef HAVE_BRK
1469	if (brk(m_high -= n) == -1) {
1470#else
1471	m_high -= n;
1472	if (sbrk(-n) == (void *)-1) {
1473#endif /* HAVE_BRK */
1474	    DPUTS(1, "MEM: allocation error at brk.");
1475	}
1476
1477#ifdef ZSH_MEM_DEBUG
1478	m_b += n;
1479#endif
1480    }
1481    unqueue_signals();
1482}
1483
1484FREE_RET_T
1485free(FREE_ARG_T p)
1486{
1487    zfree(p, 0);		/* 0 means: size is unknown */
1488
1489#ifdef FREE_DO_RET
1490    return 0;
1491#endif
1492}
1493
1494/* this one is for strings (and only strings, real strings, real C strings,
1495   those that have a zero byte at the end) */
1496
1497/**/
1498mod_export void
1499zsfree(char *p)
1500{
1501    if (p)
1502	zfree(p, strlen(p) + 1);
1503}
1504
1505MALLOC_RET_T
1506realloc(MALLOC_RET_T p, MALLOC_ARG_T size)
1507{
1508    struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt;
1509    char *r;
1510    int i, l = 0;
1511
1512    /* some system..., see above */
1513    if (!p && size)
1514	return (MALLOC_RET_T) malloc(size);
1515    /* and some systems even do this... */
1516    if (!p || !size)
1517	return (MALLOC_RET_T) p;
1518
1519    queue_signals();  /* just queue signals caught rather than handling them */
1520
1521    /* check if we are reallocating a small block, if we do, we have
1522       to compute the size of the block from the sort of block it is in */
1523    for (i = 0; i < M_NSMALL; i++) {
1524	for (mp = NULL, mt = m_small[i];
1525	     mt && (((char *)mt) > ((char *)p) ||
1526		    (((char *)mt) + mt->len) < ((char *)p));
1527	     mp = mt, mt = mt->next);
1528
1529	if (mt) {
1530	    l = M_BSLEN(mt->len);
1531	    break;
1532	}
1533    }
1534    if (!l)
1535	/* otherwise the size of the block is in the memory just before
1536	   the given address */
1537	l = m->len;
1538
1539    /* now allocate the new block, copy the old contents, and free the
1540       old block */
1541    r = malloc(size);
1542    memcpy(r, (char *)p, (size > l) ? l : size);
1543    free(p);
1544
1545    unqueue_signals();
1546    return (MALLOC_RET_T) r;
1547}
1548
1549MALLOC_RET_T
1550calloc(MALLOC_ARG_T n, MALLOC_ARG_T size)
1551{
1552    long l;
1553    char *r;
1554
1555    if (!(l = n * size))
1556	return (MALLOC_RET_T) m_high;
1557
1558    r = malloc(l);
1559
1560    memset(r, 0, l);
1561
1562    return (MALLOC_RET_T) r;
1563}
1564
1565#ifdef ZSH_MEM_DEBUG
1566
1567/**/
1568int
1569bin_mem(char *name, char **argv, Options ops, int func)
1570{
1571    int i, ii, fi, ui, j;
1572    struct m_hdr *m, *mf, *ms;
1573    char *b, *c, buf[40];
1574    long u = 0, f = 0, to, cu;
1575
1576    queue_signals();
1577    if (OPT_ISSET(ops,'v')) {
1578	printf("The lower and the upper addresses of the heap. Diff gives\n");
1579	printf("the difference between them, i.e. the size of the heap.\n\n");
1580    }
1581    printf("low mem %ld\t high mem %ld\t diff %ld\n",
1582	   (long)m_l, (long)m_high, (long)(m_high - ((char *)m_l)));
1583
1584    if (OPT_ISSET(ops,'v')) {
1585	printf("\nThe number of bytes that were allocated using sbrk() and\n");
1586	printf("the number of bytes that were given back to the system\n");
1587	printf("via brk().\n");
1588    }
1589    printf("\nsbrk %d\tbrk %d\n", m_s, m_b);
1590
1591    if (OPT_ISSET(ops,'v')) {
1592	printf("\nInformation about the sizes that were allocated or freed.\n");
1593	printf("For each size that were used the number of mallocs and\n");
1594	printf("frees is shown. Diff gives the difference between these\n");
1595	printf("values, i.e. the number of blocks of that size that is\n");
1596	printf("currently allocated. Total is the product of size and diff,\n");
1597	printf("i.e. the number of bytes that are allocated for blocks of\n");
1598	printf("this size. The last field gives the accumulated number of\n");
1599	printf("bytes for all sizes.\n");
1600    }
1601    printf("\nsize\tmalloc\tfree\tdiff\ttotal\tcum\n");
1602    for (i = 0, cu = 0; i < 1024; i++)
1603	if (m_m[i] || m_f[i]) {
1604	    to = (long) i * M_ISIZE * (m_m[i] - m_f[i]);
1605	    printf("%ld\t%d\t%d\t%d\t%ld\t%ld\n",
1606		   (long)i * M_ISIZE, m_m[i], m_f[i], m_m[i] - m_f[i],
1607		   to, (cu += to));
1608	}
1609
1610    if (m_m[i] || m_f[i])
1611	printf("big\t%d\t%d\t%d\n", m_m[i], m_f[i], m_m[i] - m_f[i]);
1612
1613    if (OPT_ISSET(ops,'v')) {
1614	printf("\nThe list of memory blocks. For each block the following\n");
1615	printf("information is shown:\n\n");
1616	printf("num\tthe number of this block\n");
1617	printf("tnum\tlike num but counted separately for used and free\n");
1618	printf("\tblocks\n");
1619	printf("addr\tthe address of this block\n");
1620	printf("len\tthe length of the block\n");
1621	printf("state\tthe state of this block, this can be:\n");
1622	printf("\t  used\tthis block is used for one big block\n");
1623	printf("\t  free\tthis block is free\n");
1624	printf("\t  small\tthis block is used for an array of small blocks\n");
1625	printf("cum\tthe accumulated sizes of the blocks, counted\n");
1626	printf("\tseparately for used and free blocks\n");
1627	printf("\nFor blocks holding small blocks the number of free\n");
1628	printf("blocks, the number of used blocks and the size of the\n");
1629	printf("blocks is shown. For otherwise used blocks the first few\n");
1630	printf("bytes are shown as an ASCII dump.\n");
1631    }
1632    printf("\nblock list:\nnum\ttnum\taddr\t\tlen\tstate\tcum\n");
1633    for (m = m_l, mf = m_free, ii = fi = ui = 1; ((char *)m) < m_high;
1634	 m = (struct m_hdr *)(((char *)m) + M_ISIZE + m->len), ii++) {
1635	for (j = 0, ms = NULL; j < M_NSMALL && !ms; j++)
1636	    for (ms = m_small[j]; ms; ms = ms->next)
1637		if (ms == m)
1638		    break;
1639
1640	if (m == mf)
1641	    buf[0] = '\0';
1642	else if (m == ms)
1643	    sprintf(buf, "%ld %ld %ld", (long)(M_SNUM - ms->used),
1644		    (long)ms->used,
1645		    (long)(m->len - sizeof(struct m_hdr)) / M_SNUM + 1);
1646
1647	else {
1648	    for (i = 0, b = buf, c = (char *)&m->next; i < 20 && i < m->len;
1649		 i++, c++)
1650		*b++ = (*c >= ' ' && *c < 127) ? *c : '.';
1651	    *b = '\0';
1652	}
1653
1654	printf("%d\t%d\t%ld\t%ld\t%s\t%ld\t%s\n", ii,
1655	       (m == mf) ? fi++ : ui++,
1656	       (long)m, (long)m->len,
1657	       (m == mf) ? "free" : ((m == ms) ? "small" : "used"),
1658	       (m == mf) ? (f += m->len) : (u += m->len),
1659	       buf);
1660
1661	if (m == mf)
1662	    mf = mf->next;
1663    }
1664
1665    if (OPT_ISSET(ops,'v')) {
1666	printf("\nHere is some information about the small blocks used.\n");
1667	printf("For each size the arrays with the number of free and the\n");
1668	printf("number of used blocks are shown.\n");
1669    }
1670    printf("\nsmall blocks:\nsize\tblocks (free/used)\n");
1671
1672    for (i = 0; i < M_NSMALL; i++)
1673	if (m_small[i]) {
1674	    printf("%ld\t", (long)i * M_ISIZE);
1675
1676	    for (ii = 0, m = m_small[i]; m; m = m->next) {
1677		printf("(%ld/%ld) ", (long)(M_SNUM - m->used),
1678		       (long)m->used);
1679		if (!((++ii) & 7))
1680		    printf("\n\t");
1681	    }
1682	    putchar('\n');
1683	}
1684    if (OPT_ISSET(ops,'v')) {
1685	printf("\n\nBelow is some information about the allocation\n");
1686	printf("behaviour of the zsh heaps. First the number of times\n");
1687	printf("pushheap(), popheap(), and freeheap() were called.\n");
1688    }
1689    printf("\nzsh heaps:\n\n");
1690
1691    printf("push %d\tpop %d\tfree %d\n\n", h_push, h_pop, h_free);
1692
1693    if (OPT_ISSET(ops,'v')) {
1694	printf("\nThe next list shows for several sizes the number of times\n");
1695	printf("memory of this size were taken from heaps.\n\n");
1696    }
1697    printf("size\tmalloc\ttotal\n");
1698    for (i = 0; i < 1024; i++)
1699	if (h_m[i])
1700	    printf("%ld\t%d\t%ld\n", (long)i * H_ISIZE, h_m[i],
1701		   (long)i * H_ISIZE * h_m[i]);
1702    if (h_m[1024])
1703	printf("big\t%d\n", h_m[1024]);
1704
1705    unqueue_signals();
1706    return 0;
1707}
1708
1709#endif
1710
1711/**/
1712#else				/* not ZSH_MEM */
1713
1714/**/
1715mod_export void
1716zfree(void *p, UNUSED(int sz))
1717{
1718    if (p)
1719	free(p);
1720}
1721
1722/**/
1723mod_export void
1724zsfree(char *p)
1725{
1726    if (p)
1727	free(p);
1728}
1729
1730/**/
1731#endif
1732