1/* BEGIN LICENSE BLOCK
2 * Version: CMPL 1.1
3 *
4 * The contents of this file are subject to the Cisco-style Mozilla Public
5 * License Version 1.1 (the "License"); you may not use this file except
6 * in compliance with the License.  You may obtain a copy of the License
7 * at www.eclipse-clp.org/license.
8 *
9 * Software distributed under the License is distributed on an "AS IS"
10 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.  See
11 * the License for the specific language governing rights and limitations
12 * under the License.
13 *
14 * The Original Code is  The ECLiPSe Constraint Logic Programming System.
15 * The Initial Developer of the Original Code is  Cisco Systems, Inc.
16 * Portions created by the Initial Developer are
17 * Copyright (C) 1992-2006 Cisco Systems, Inc.  All Rights Reserved.
18 *
19 * Contributor(s): Joachim Schimpf, ECRC.
20 *
21 * END LICENSE BLOCK */
22
23/*
24* IDENTIFICATION	alloc.c
25*
26* VERSION		$Id: alloc.c,v 1.3 2012/10/20 13:16:02 jschimpf Exp $
27*
28* AUTHOR		Joachim Schimpf
29*
30* DESCRIPTION		heap allocator
31*
32* CONTENTS
33*
34*	        PAGE LEVEL
35*				pagemanager_init()
36*				alloc_pagewise()
37*				alloc_page()
38*				free_pages()
39*		BLOCK LEVEL
40*				alloc_init()
41*				alloc_statistics()
42*
43*				alloc_size(heap, bytes)
44*				free_size(heap, ptr, bytes)
45*				realloc_size(heap, ptr, oldbytes, newbytes)
46*
47*				h_alloc(heap, bytes)
48*				h_free(heap, ptr)
49*				h_realloc(heap, ptr, newbytes)
50*/
51
52#include	"config.h"
53#include	"memman.h"
54
55#ifdef HAVE_STRING_H
56#  include <string.h>
57#  ifdef MEMCPY_STRING
58#    define bcopy(s1, s2, n)	(void) memcpy((void *)(s2),(void *)(s1), n)
59#  endif
60#endif
61#ifdef MEMCPY_MEMORY
62#  include <memory.h>
63#  define bcopy(s1, s2, n)	(void) memcpy((char *)(s2), (char *)(s1), n)
64#endif
65
66#define OUT_OF_HEAP	((generic_ptr)(-1))
67
68
69/* DEBUG_HEAP works only when linked with sepia! */
70
71#ifdef DEBUG_HEAP
72#include <stdio.h>
73typedef void *          stream_id;
74extern  stream_id       current_err_;
75void	pr_heap();
76#endif
77
78extern void	exit(int);
79
80#define CHECK_HEAP
81
82#ifdef CHECK_HEAP
83static void
84_print(char *msg)
85{
86    (void) write(2, msg, strlen(msg));
87}
88#endif
89
90
91/*---------------------------------------------------------------------
92 * Interrupt Locks
93 *---------------------------------------------------------------------*/
94
95#define Lock_Heap(hd) { \
96    if (hd->shared_header) { a_mutex_lock(&hd->shared_header->lock); } \
97    else { Disable_Int(); }}
98
99#define Unlock_Heap(hd) { \
100    if (hd->shared_header) { a_mutex_unlock(&hd->shared_header->lock); } \
101    else { Enable_Int(); }}
102
103void (*delayed_irq_func)(void) = 0;	/* to process delayed interrupts	*/
104
105volatile int it_disabled_ = 0;		/* number of nested disables */
106volatile int delayed_it_ = 0;		/* flags that something is in the queue */
107
108
109void
110irq_lock_init(void (*irq_func)(void))
111{
112    it_disabled_ = delayed_it_ = 0;
113    delayed_irq_func = irq_func;
114}
115
116
117/*---------------------------------------------------------------------
118 * Low-level pagewise memory management
119 *
120 * The allocation functions call panic() when there is no memory.
121 * The pagewise allocation functions do not disable interrupts, it
122 * must be done in the caller.
123 *
124 * Management of free pages:
125 *
126 * We maintain a bitmap pages->map[] of all memory pages.
127 * The bit is set when the corresponding page is free.
128 *
129 * Additionally, we have free lists pages->free[] for page clusters.
130 * pages->free[i] holds a list of i-page cluster descriptor.
131 * pages->free[0] holds a list of clusters of size PAGE_LISTS or larger.
132 *
133 * Allocation of an i-page cluster:
134 *	- check the free list for i-page clusters
135 *	- if this is empty, split a larger cluster
136 *	- if no sufficiently large cluster available, call sbrk()
137 *	- reset the free-bits in the bitmap
138 * Freeing a cluster:
139 *	- set the free-bits in the bitmap
140 *	- join with adjacent clusters, if possible
141 *	- put resulting free cluster into appropriate free list
142 *
143 * A page in this context here is just a unit of BYTES_PER_PAGE bytes.
144 * It is not strictly necessary that it corresponds to the
145 * system_page_size.
146 * TODO: Especially for shared memory, the free cluster links should
147 * not be in the pages themselves, that causes unnecessary page accesses.
148 *---------------------------------------------------------------------*/
149
150
151#define RoundTo(n,unit) ((n) - ((n) - 1) % (unit) -1 + (unit))
152
153#if (SIZEOF_CHAR_P == 4)
154#define USE_BITMAPS
155#endif
156
157#ifdef USE_BITMAPS
158
159/* Define bitmasks and their widths for memory parameters. */
160#define BITS_IN_WORD_M		0x1f
161#define BITS_IN_WORD_W		5
162#define WORDS_IN_BLOCK_M	0x3ff
163#define WORDS_IN_BLOCK_W	10
164
165#define MapBlock(i)	((i) >> (BITS_IN_WORD_W + WORDS_IN_BLOCK_W))
166#define MapWord(i)	(((i) >> BITS_IN_WORD_W) & WORDS_IN_BLOCK_M)
167#define MapBit(i)	(0x1L << ((i) & BITS_IN_WORD_M))
168
169/* mask must be unsigned */
170#define Page_Parameters(PageAddr, Block, Ptr, Mask) {		\
171	register bits32 i = (bits32)(PageAddr) / BYTES_PER_PAGE;\
172	Block = MapBlock(i);					\
173	if (! pages->map[Block])					\
174	    pages->map[Block] = _new_bitmap_block(hd);		\
175	Ptr = &(pages->map[Block][MapWord(i)]);			\
176	Mask = MapBit(i);					\
177}
178
179#define Next_Bit(block, ptr, mask) {			\
180	if (((mask) <<= 1) == 0)			\
181	{						\
182	    (mask) = 0x1;				\
183	    if ((word)(++(ptr)) % BITMAP_BLOCKSIZE == 0)	\
184	    {						\
185		++block;				\
186		if (! pages->map[block])			\
187		    pages->map[block] = _new_bitmap_block(hd);\
188		ptr = pages->map[block];			\
189	    }						\
190	}						\
191}
192
193#define Prev_Bit(block, ptr, mask) {			\
194	if (((mask) >>= 1) == 0)			\
195	{						\
196	    (mask) = SIGN_BIT;				\
197	    if ((word)((ptr)--) % BITMAP_BLOCKSIZE == 0)	\
198	    {						\
199		--block;				\
200		if (! pages->map[block])			\
201		    pages->map[block] = _new_bitmap_block(hd);\
202		ptr = pages->map[block] - 1 +		\
203		    BITMAP_BLOCKSIZE/sizeof(bits32);	\
204	    }						\
205	}						\
206}
207
208#endif
209
210/*ARGSUSED*/
211void
212pagemanager_init(struct heap_descriptor *hd)
213{
214    register int i;
215    struct page_admin *pages = hd->pages;
216    pages->min_addr = pages->max_addr = 0;
217    for (i=0; i<BITMAP_BLOCKS; i++)
218	pages->map[i] = (bits32 *) 0;
219    for (i=0; i<PAGE_LISTS; i++)
220	pages->free[i] = (struct cluster *) 0;
221    pages->allocated = 0;
222    pages->freed = 0;
223    pages->log_page = (struct page_log *) 0;
224    pages->log_idx = 0;
225}
226
227
228static generic_ptr
229_alloc_aux_page(struct heap_descriptor *hd)
230{
231    generic_ptr address;
232    address = hd->more(BYTES_PER_PAGE, BYTES_PER_PAGE, hd);
233    if (address == OUT_OF_HEAP)
234    {
235	_print("SHM: out of memory in _alloc_aux_page()");
236	exit(-1);	/* cannot recover from here ... */
237    }
238    ++hd->pages->allocated;
239    return address;
240}
241
242static void
243_release_aux_page(struct heap_descriptor *hd, generic_ptr address)
244{
245    (void) hd->less(address, BYTES_PER_PAGE, hd);
246    --hd->pages->allocated;
247}
248
249static void _release_logged_pages(struct heap_descriptor *hd);
250
251void
252pagemanager_fini(struct heap_descriptor *hd)
253{
254    int i;
255    _release_logged_pages(hd);
256    for (i=0; i<BITMAP_BLOCKS; i++)
257    {
258	if (hd->pages->map[i])
259	{
260	    _release_aux_page(hd, hd->pages->map[i]);
261	    hd->pages->map[i] = 0;
262	}
263    }
264    if (hd->pages->allocated)
265    {
266	_print("SHM: not all pages were freed in pagemanager_fini()\n");
267    }
268}
269
270
271#ifdef USE_BITMAPS
272static bits32 *
273_new_bitmap_block(struct heap_descriptor *hd)
274{
275    register int i;		/* careful: bootstrapping problem! */
276    bits32 *p = (bits32 *)_alloc_aux_page(hd);
277    for (i=0; i < WORDS_PER_PAGE; i++)
278	p[i] = 0;
279    return p;
280}
281#endif
282
283static void
284_add_to_list(struct heap_descriptor *hd,
285	generic_ptr ptr,
286	word number_of_pages)	/* should be > 0 */
287{
288    int list_index = number_of_pages < PAGE_LISTS ? number_of_pages : 0;
289    ((struct cluster *)ptr)->next = hd->pages->free[list_index];
290    ((struct cluster *)ptr)->addr = ptr;
291    ((struct cluster *)ptr)->size = number_of_pages;
292    hd->pages->free[list_index] = (struct cluster *)ptr;
293}
294
295static void
296_remove_from_list(struct heap_descriptor *hd,
297	generic_ptr ptr,
298	word	number_of_pages)	/* should be > 0 */
299{
300    register struct cluster **p;
301    p = &hd->pages->free[number_of_pages < PAGE_LISTS ? number_of_pages : 0];
302    while ((*p) && (*p)->addr != ptr)
303    {
304	p = &((*p)->next);
305    }
306#ifdef CHECK_HEAP
307    if (*p == 0)
308    {
309	_print("SHM INTERNAL ERROR: pagecluster missing from free list\n");
310	return;
311    }
312#endif
313    *p = (*p)->next;
314}
315
316#ifdef CHECK_HEAP
317void
318_check_address(
319	struct heap_descriptor *hd,
320	generic_ptr ptr,
321	uword size)
322{
323    if (ptr < hd->pages->min_addr ||
324    	((generic_ptr)((char*) ptr + size) > hd->pages->max_addr && hd->pages->max_addr))
325    {
326	_print("SHM: attempt to free out-of-heap pointer!\n");
327    }
328}
329#endif
330
331#ifdef USE_BITMAPS
332
333void
334free_pages(
335	struct heap_descriptor *hd,
336	generic_ptr ptr,
337	word	number_of_pages)	/* should be > 0 */
338{
339    int	block;
340    register bits32 *p, mask;
341    char *from, *to;
342    struct page_admin *pages = hd->pages;
343
344#if (DEBUG_HEAP > 1)
345    fprintf(stderr, "free %d pages\n", number_of_pages);
346#endif
347
348#ifdef CHECK_HEAP
349    if ((word) ptr % BYTES_PER_PAGE != 0)
350    {
351	_print("SHM: misaligned pointer in free_pages()\n");
352	return;
353    }
354    _check_address(hd, ptr, number_of_pages*BYTES_PER_PAGE);
355#endif
356
357    pages->freed += number_of_pages;
358    Page_Parameters(ptr, block, p, mask);
359    from = (char *) ptr;
360    Prev_Bit(block, p, mask);
361    if (*p & mask)			/* adjacent to lower neighbour */
362    {
363	do {
364	    Prev_Bit(block, p, mask);
365	    from -= BYTES_PER_PAGE;
366	} while (*p & mask);
367	Page_Parameters(ptr, block, p, mask);
368	Prev_Bit(block, p, mask);
369	_remove_from_list(hd, (generic_ptr) from, ((char*)ptr-from)/BYTES_PER_PAGE);
370    }
371    while (number_of_pages--)		/* update the bitmap */
372    {
373	Next_Bit(block, p, mask);
374	ptr = (generic_ptr) ((char *) ptr + BYTES_PER_PAGE);
375#ifdef CHECK_HEAP
376	if (mask & *p)
377	{
378	    _print("SHM: page is already free in free_pages()\n");
379	}
380#endif
381	*p |= mask;
382    }
383    to = (char *) ptr;
384    Next_Bit(block, p, mask);
385    if (*p & mask)			/* adjacent to upper neighbour */
386    {
387	do {
388	    Next_Bit(block, p, mask);
389	    to += BYTES_PER_PAGE;
390	} while (*p & mask);
391	_remove_from_list(hd, ptr, (to-(char*)ptr)/BYTES_PER_PAGE);
392    }
393    _add_to_list(hd, (generic_ptr) from, (to-from)/BYTES_PER_PAGE);
394}
395
396#else
397
398#define GenericAdd(p,off) ((generic_ptr)((char*)(p)+(off)))
399
400void
401free_pages(
402	struct heap_descriptor *hd,
403	generic_ptr ptr,
404	word	number_of_pages)	/* should be > 0 */
405{
406    struct cluster *clu, *next;
407    generic_ptr to;
408    int i;
409    struct page_admin *pages = hd->pages;
410
411#if (DEBUG_HEAP > 1)
412    fprintf(stderr, "free %d pages\n", number_of_pages);
413#endif
414
415#ifdef CHECK_HEAP
416    if ((word) ptr % BYTES_PER_PAGE != 0)
417    {
418	_print("SHM: misaligned pointer in free_pages()\n");
419	return;
420    }
421    _check_address(hd, ptr, number_of_pages*BYTES_PER_PAGE);
422#endif
423
424    pages->freed += number_of_pages;
425    to = GenericAdd(ptr, number_of_pages*BYTES_PER_PAGE);
426    for (i=0; i<PAGE_LISTS; ++i)
427    {
428	for (clu = hd->pages->free[i]; clu; clu = next)
429	{
430	    next = clu->next;	/* memorize because clu may be unlinked */
431	    if (clu->addr > to
432		    || GenericAdd(clu->addr, clu->size*BYTES_PER_PAGE) < ptr)
433		continue;
434	    if (clu->addr == to)
435	    {
436		/* adjacent to upper neighbour */
437		_remove_from_list(hd, clu->addr, clu->size);
438		number_of_pages += clu->size;
439		/* ptr unchanged */
440		to = GenericAdd(ptr, number_of_pages*BYTES_PER_PAGE);
441	    }
442	    else if (GenericAdd(clu->addr, clu->size*BYTES_PER_PAGE) == ptr)
443	    {
444		/* adjacent to lower neighbour */
445		_remove_from_list(hd, clu->addr, clu->size);
446		number_of_pages += clu->size;
447		ptr = clu->addr;
448		/* to unchanged */
449	    }
450	    else	/* overlap, shouldn't happen */
451	    {
452		_print("SHM: page is already free in free_pages()\n");
453		return;
454	    }
455	}
456    }
457    _add_to_list(hd, ptr, number_of_pages);
458}
459
460#endif
461
462
463/*
464 * We keep an additional log of all the more-requests to the OS.
465 * This is used to forcibly free all heap space (allocated or not)
466 * when the heap is finalised. We use auxiliary pages for this log.
467 */
468
469static void
470_log_more_pages(struct heap_descriptor *hd, generic_ptr address, word pages_requested)
471{
472    struct page_log *log_page = hd->pages->log_page;
473
474    if (pages_requested == 0)
475    	return;
476
477    if (log_page)
478    {
479	int i = hd->pages->log_idx;
480	if (address == log_page[i].addr + log_page[i].npages*BYTES_PER_PAGE)
481	{
482	    /* adjacent to previous logged allocation, amend the record */
483	    log_page[i].npages += pages_requested;
484	    return;
485	}
486	if (++i < BYTES_PER_PAGE/sizeof(struct page_log))
487	{
488	    /* create a new log entry in same block */
489	    hd->pages->log_idx = i;
490	    log_page[i].addr = address;
491	    log_page[i].npages = pages_requested;
492	    return;
493	}
494    }
495
496    /* allocate a new auxiliary page for the log, and initialise */
497    hd->pages->log_page = (struct page_log*) _alloc_aux_page(hd);
498    hd->pages->log_idx = 1;
499    hd->pages->log_page[0].addr = log_page;	/* link to previous */
500    hd->pages->log_page[0].npages = 0;
501    hd->pages->log_page[1].addr = address;
502    hd->pages->log_page[1].npages = pages_requested;
503    return;
504}
505
506
507static void
508_release_logged_pages(struct heap_descriptor *hd)
509{
510    struct page_log *log_page = hd->pages->log_page;
511    int max = hd->pages->log_idx;
512    while (log_page)
513    {
514	int i;
515	struct page_log *old_log_page;
516	/* free all the logged page clusters in this log page */
517	for(i=max; i>=1; --i)
518	{
519	    (void) hd->less(log_page[i].addr, log_page[i].npages*BYTES_PER_PAGE, hd);
520	    hd->pages->allocated -= log_page[i].npages;
521	}
522	/* free the log page itself, and proceed to previous one */
523	old_log_page = log_page;
524	log_page = (struct page_log *) log_page[0].addr;
525	_release_aux_page(hd, old_log_page);
526	max = BYTES_PER_PAGE/sizeof(struct page_log) - 1;
527    }
528}
529
530
531/*
532 * Allocate memory in units of pages. The second argument returns the
533 * amount of memory that has really been allocated. It is at least as
534 * much as was requested, but rounded up to the next page multiple.
535 */
536
537generic_ptr
538alloc_pagewise(
539	struct heap_descriptor *hd,
540	word	bytes_needed,
541	word	*out_bytes_allocated)
542{
543    register struct cluster **cluster_list;
544    register struct cluster *cluster;
545    word bytes_allocated, pages_needed;
546#ifdef USE_BITMAPS
547    int block;
548    bits32 *p, mask;
549#endif
550    struct page_admin *pages = hd->pages;
551
552    *out_bytes_allocated = bytes_allocated = RoundTo(bytes_needed, BYTES_PER_PAGE);
553    pages_needed = bytes_allocated/BYTES_PER_PAGE;
554
555#if (DEBUG_HEAP > 1)
556    fprintf(stderr, "alloc %d pages\n", pages_needed);
557#endif
558
559    if (pages_needed < PAGE_LISTS)
560    {
561	if (pages->free[pages_needed])	/* a cluster that fits exactly */
562	{
563	    pages->freed -= pages_needed;
564	    cluster = pages->free[pages_needed];
565	    pages->free[pages_needed] = cluster->next;	/* remove from free list */
566#ifdef USE_BITMAPS
567	    Page_Parameters(cluster->addr, block, p, mask);
568	    while (pages_needed--)			/* update the bitmap */
569	    {
570		*p &= ~mask;
571		Next_Bit(block, p, mask);
572	    }
573#endif
574	    return cluster->addr;
575	}
576
577	/* no exact fit, set cluster_list to a free list with larger clusters */
578	cluster_list = &pages->free[0];	/* try default list first */
579	if (! (*cluster_list))		/* else any another larger cluster */
580	{
581	    word list_index = pages_needed;
582	    while (++list_index < PAGE_LISTS)
583	    {
584		if (pages->free[list_index])		/* found one */
585		{
586		    cluster_list = &pages->free[list_index];
587		    break;
588		}
589	    }
590	}
591    }
592    else	/* very large block requested, try the default list */
593    {
594	cluster_list = &pages->free[0];
595    }
596
597    /*
598     * look for a sufficiently large cluster (at least pages_needed)
599     * in cluster_list (which may be empty)
600     */
601    while (cluster = (*cluster_list))
602    {
603	if (cluster->size >= pages_needed)
604	{
605	    pages->freed -= pages_needed;
606	    *cluster_list = cluster->next;	/* remove from free list */
607	    if (cluster->size > pages_needed)	/* put back the rest, if any */
608	    {
609		_add_to_list(hd, (generic_ptr)
610			((char *) cluster->addr + pages_needed*BYTES_PER_PAGE),
611			cluster->size - pages_needed);
612	    }
613#ifdef USE_BITMAPS
614	    Page_Parameters(cluster->addr, block, p, mask);
615	    while (pages_needed--)		/* update the bitmap */
616	    {
617		*p &= ~mask;
618		Next_Bit(block, p, mask);
619	    }
620#endif
621	    return cluster->addr;
622	}
623	cluster_list = &cluster->next;
624    }
625
626    /*
627     * Nothing appropriate in our free lists,
628     * get a sufficiently large cluster from the operating system
629     */
630    {
631	register generic_ptr address;
632	word pages_requested, bytes_requested;
633
634	/* allocate pages_needed, but at least MIN_OS_PAGE_REQUEST */
635	pages_requested = pages_needed >= MIN_OS_PAGE_REQUEST ?
636		pages_needed : MIN_OS_PAGE_REQUEST;
637
638	address = hd->more(pages_requested*BYTES_PER_PAGE, BYTES_PER_PAGE, hd);
639	if (address == OUT_OF_HEAP && pages_requested > pages_needed)
640	{
641	    pages_requested = pages_needed;
642	    address = hd->more(pages_requested*BYTES_PER_PAGE, BYTES_PER_PAGE, hd);
643	}
644	if (address == OUT_OF_HEAP)
645	{
646	    if (hd->panic)
647	    {
648		Unlock_Heap(hd);
649		(*hd->panic)("Out of swap space", "heap allocation");
650	    }
651	    return (generic_ptr) 0;
652	}
653	if ((word) address % BYTES_PER_PAGE != 0)
654	{
655	    _print("SHM: misaligned pointer returned from OS\n");
656	}
657
658	/* update some statistics */
659	_log_more_pages(hd, address, pages_requested);
660	pages->allocated += pages_requested;
661	bytes_requested = pages_requested*BYTES_PER_PAGE;
662	if (!pages->min_addr || address < pages->min_addr)
663	    pages->min_addr = address;
664	if (!pages->min_addr ||
665	    (generic_ptr)((char*)address + bytes_requested) > pages->max_addr)
666	    pages->max_addr = (generic_ptr)((char*)address + bytes_requested);
667
668	/* put excess pages in the free list */
669	if (pages_requested > pages_needed)
670	{
671	    free_pages(hd, address + bytes_allocated, pages_requested-pages_needed);
672	}
673	return address;
674    }
675}
676
677/*
678 * Allocate a single page (of size BYTES_PER_PAGE)
679 */
680
681generic_ptr
682alloc_page(struct heap_descriptor *hd)
683{
684    word dummy;
685    return alloc_pagewise(hd, BYTES_PER_PAGE, &dummy);
686}
687
688
689
690/*---------------------------------------------------------------------
691 * Heap allocator with special handling of small blocks
692 * We keep a number of separate free lists and never split larger blocks!
693 * Allocation larger than half the pagesize are done pagewise.
694 *---------------------------------------------------------------------*/
695
696
697/* derived constants */
698
699#define Units(n)	(((n)+(BYTES_PER_UNIT-1)) / BYTES_PER_UNIT)
700
701#ifdef DEBUG_HEAP
702int alloc_stop;
703#endif
704
705void
706alloc_init(struct heap_descriptor *hd)
707{
708    int	i;
709    struct heap *heap = hd->heap;
710    for (i=0; i <= LARGEST_SMALL_BLOCK; i++)
711    {
712	heap->small_blocks[i] = (generic_ptr) 0;
713	heap->small_allocated[i] = 0;
714    }
715    for (i=0; i < POWERS; i++)
716    {
717	heap->powers[i] = (generic_ptr) 0;
718	heap->powers_allocated[i] = 0;
719    }
720    heap->alloc_ptr = alloc_page(hd);
721    heap->alloc_free = UNITS_PER_PAGE;
722    heap->requested = 0;
723    heap->used = 0;
724    heap->allocs = 0;
725    heap->small_block_pages = 1;
726    heap->power_pages = 0;
727    hd->debug_level = 0;
728}
729
730void
731alloc_debug_level(struct heap_descriptor *hd, int debug_level)
732{
733    hd->debug_level = debug_level;
734}
735
736generic_ptr
737alloc_size(struct heap_descriptor *hd, word bytes_needed)
738{
739    register word units_needed = Units(bytes_needed);
740    generic_ptr ptr;
741    struct heap *heap = hd->heap;
742
743    Lock_Heap(hd);
744
745#ifdef DEBUG_HEAP
746    heap->requested += bytes_needed;
747    if (++heap->allocs == alloc_stop)
748	alloc_stop++;
749#endif
750    if (units_needed <= LARGEST_SMALL_BLOCK)	/* perfect fit algorithm */
751    {
752	heap->used += units_needed;
753	ptr = heap->small_blocks[units_needed];
754	heap->small_allocated[units_needed]++;
755	if (ptr)			/* we have one in the free list */
756	{
757	    heap->small_blocks[units_needed] = *((generic_ptr *) ptr);
758	}
759	else
760	{
761	    if (units_needed > heap->alloc_free) /* allocation block exhausted */
762	    {
763		if (heap->alloc_free)	/* put the rest into a free list */
764		{
765		    * ((generic_ptr *) heap->alloc_ptr) =
766				heap->small_blocks[heap->alloc_free];
767		    heap->small_blocks[heap->alloc_free] = heap->alloc_ptr;
768		}
769		heap->alloc_ptr = alloc_page(hd);
770		heap->small_block_pages++;
771		heap->alloc_free = UNITS_PER_PAGE;
772	    }
773	    ptr = heap->alloc_ptr;	/* allocate from the current block */
774	    heap->alloc_free -= units_needed;
775	    heap->alloc_ptr = (generic_ptr)
776		((char *) heap->alloc_ptr + units_needed*BYTES_PER_UNIT);
777	}
778    }
779    else if (units_needed <= LARGEST_POWER_BLOCK) /* allocate in powers of 2 */
780    {
781	register int index = POWER_FIRST_INDEX;
782	register int blocksize = SMALLEST_POWER_BLOCK;
783
784	while (units_needed > blocksize)
785	{
786	    blocksize <<= 1;
787	    index++;
788	}
789	heap->used += blocksize;
790	ptr = heap->powers[index];
791	heap->powers_allocated[index]++;
792	if (ptr)			/* we have one in the free list */
793	{
794	    heap->powers[index] = *((generic_ptr *) ptr);
795	}
796	else if (blocksize <= heap->alloc_free)	/* get from allocation block */
797	{
798	    ptr = heap->alloc_ptr;
799	    heap->alloc_free -= blocksize;
800	    heap->alloc_ptr = (generic_ptr)
801		((char *) heap->alloc_ptr + blocksize*BYTES_PER_UNIT);
802	}
803	else	/* get a fresh page and split into blocks of blocksize */
804	{
805	    unit_type *initptr;
806	    ptr = alloc_page(hd);
807	    heap->power_pages++;
808	    initptr = (unit_type *) ptr + UNITS_PER_PAGE - blocksize;
809	    *(unit_type **) initptr = (unit_type *) 0;
810	    initptr -= blocksize;
811	    while ((generic_ptr) initptr > ptr)
812	    {
813		*(unit_type **) initptr = initptr + blocksize;
814		initptr -= blocksize;
815	    }
816	    heap->powers[index] = (generic_ptr)(initptr + blocksize);
817	}
818    }
819    else					/* allocate pagewise */
820    {
821	word bytes_allocated;
822	ptr = alloc_pagewise(hd, bytes_needed, &bytes_allocated);
823    }
824
825    Unlock_Heap(hd);
826    return ptr;
827}
828
829void
830free_size(struct heap_descriptor *hd, generic_ptr ptr, word size)
831{
832    register word units = Units(size);
833    struct heap *heap = hd->heap;
834
835    Lock_Heap(hd);
836
837#ifdef CHECK_HEAP
838    _check_address(hd, ptr, size);
839
840    if (hd->debug_level > 0)
841    {
842	memset(ptr, 0, size);	/* zero out the freed memory */
843    }
844#endif
845
846#ifdef DEBUG_HEAP
847    heap->requested -= size;
848#endif
849    if (units <= LARGEST_SMALL_BLOCK)		/* perfect fit algorithm */
850    {
851	heap->used -= units;
852	* ((generic_ptr *) ptr) = heap->small_blocks[units];
853	heap->small_blocks[units] = ptr;
854	heap->small_allocated[units]--;
855    }
856    else if (units <= LARGEST_POWER_BLOCK)	/* powers of 2 algorithm */
857    {
858	register int index = POWER_FIRST_INDEX;
859	register int blocksize = SMALLEST_POWER_BLOCK;
860
861	while (units > blocksize)
862	{
863	    blocksize <<= 1;
864	    index++;
865	}
866	heap->used -= blocksize;
867	* ((generic_ptr *) ptr) = heap->powers[index];
868	heap->powers[index] = ptr;
869	heap->powers_allocated[index]--;
870    }
871    else					/* pagewise allocation */
872    {
873	word pages_allocated = (size-1)/BYTES_PER_PAGE + 1;
874	free_pages(hd, ptr, pages_allocated);
875    }
876    Unlock_Heap(hd);
877}
878
879/* return the actual size of a memory block */
880
881static word
882_true_size(word size)
883{
884    register word units = Units(size);
885    if (units <= LARGEST_SMALL_BLOCK)		/* perfect fit algorithm */
886	return units*BYTES_PER_UNIT;
887    else if (units <= LARGEST_POWER_BLOCK)	/* powers of 2 algorithm */
888    {
889	register int blocksize = SMALLEST_POWER_BLOCK;
890	while (units > blocksize)
891	    blocksize <<= 1;
892	return blocksize*BYTES_PER_UNIT;
893    }
894    else					/* pagewise allocation */
895    {
896	return RoundTo(size, BYTES_PER_PAGE);
897    }
898}
899
900generic_ptr
901realloc_size(
902	struct heap_descriptor *hd,
903	generic_ptr ptr,
904	word oldsize,
905	word newsize)
906{
907    if (_true_size(oldsize) == _true_size(newsize))
908    {
909	return ptr;	/* already the right size */
910    }
911    else		/* grow or shrink */
912    {
913	generic_ptr new_ptr = alloc_size(hd, newsize);
914	if (new_ptr)
915	{
916	    bcopy((char *) ptr, (char *) new_ptr,
917		newsize < oldsize ? newsize : oldsize);
918	    free_size(hd, ptr, oldsize);
919	}
920	return new_ptr;
921    }
922}
923
924
925/*---------------------------------------------------------------------
926 * Allocate memory that remembers its own size
927 * We need a double header for alignment.
928 * It gives us also space for a magic number.
929 *---------------------------------------------------------------------*/
930
931generic_ptr
932h_alloc(struct heap_descriptor *hd, word size)
933{
934    HEADER *ptr;
935    if (!(ptr = (HEADER*) alloc_size(hd, size + sizeof(HEADER))))
936	return (generic_ptr) 0;
937    ptr->s.size = size;
938    ptr->s.magic = hd->heap;
939    return (generic_ptr)(ptr + 1);
940}
941
942void
943h_free(struct heap_descriptor *hd, generic_ptr ptr)
944{
945    HEADER *h;
946    if (!ptr)
947    	return;
948
949    h = (HEADER*) ptr - 1;
950    if (h->s.magic != hd->heap)
951    {
952	_print("SHM: invalid header in h_free()\n");
953	return;
954    }
955    h->s.magic = (struct heap *) 0;
956    free_size(hd, (generic_ptr) h, h->s.size + sizeof(HEADER));
957}
958
959generic_ptr
960h_realloc(struct heap_descriptor *hd, generic_ptr ptr, word newsize)
961{
962    HEADER *h;
963    word oldsize;
964
965    if (!ptr)
966	return h_alloc(hd, newsize);
967
968    h = (HEADER*) ptr - 1;
969    oldsize = h->s.size;
970
971    if (h->s.magic != hd->heap)
972    {
973	_print("SHM: invalid header in h_realloc()\n");
974	return ptr;
975    }
976    h->s.size = newsize;
977    return (generic_ptr) ((HEADER*) realloc_size(hd, (generic_ptr) h,
978	oldsize + sizeof(HEADER),
979	newsize + sizeof(HEADER)) + 1);
980}
981
982
983/*---------------------------------------------------------------------
984 * Debugging and statistics
985 *---------------------------------------------------------------------*/
986
987#define FullyUsedPages(hd) (hd->pages->allocated - hd->pages->freed \
988	    - hd->heap->small_block_pages - hd->heap->power_pages)
989
990int
991alloc_statistics(struct heap_descriptor *hd, int what)
992{
993    switch(what)
994    {
995    case HEAP_STAT_ALLOCATED:
996	return hd->pages->allocated * BYTES_PER_PAGE;
997    case HEAP_STAT_USED:
998#ifdef DEBUG_HEAP
999	pr_heap(hd);
1000#endif
1001	return FullyUsedPages(hd) * BYTES_PER_PAGE
1002		+ hd->heap->used * BYTES_PER_UNIT;
1003    default:
1004	return 0;
1005    }
1006}
1007
1008#ifdef DEBUG_HEAP
1009
1010void
1011pr_heap(struct heap_descriptor *hd)
1012{
1013    int	i, j, blocksize;
1014    generic_ptr p;
1015    struct cluster *cl;
1016    struct page_admin *pages = hd->pages;
1017    struct heap *heap = hd->heap;
1018
1019    p_fprintf(current_err_, "\nSMALL BLOCK MANAGEMENT:\n");
1020    for (i=1; i <= LARGEST_SMALL_BLOCK; i++)
1021    {
1022	for (j=0, p = heap->small_blocks[i]; p; p = *(generic_ptr *)p, j++)
1023		;
1024	p_fprintf(current_err_, "%10d byte blocks: %4d allocated, %4d free\n",
1025		i*BYTES_PER_UNIT, heap->small_allocated[i], j);
1026    }
1027
1028    for (i=POWER_FIRST_INDEX, blocksize = SMALLEST_POWER_BLOCK;
1029	blocksize <= LARGEST_POWER_BLOCK;
1030	i++, blocksize <<= 1)
1031    {
1032	for (j=0, p = heap->powers[i]; p; p = *(generic_ptr *)p, j++)
1033		;
1034	p_fprintf(current_err_, "%10u byte blocks: %4d allocated, %4d free\n",
1035		blocksize*BYTES_PER_UNIT, heap->powers_allocated[i], j);
1036    }
1037    p_fprintf(current_err_, "%10u byte chunk free\n", heap->alloc_free * BYTES_PER_UNIT);
1038    p_fprintf(current_err_, "    #allocs   = %d\n", heap->allocs);
1039    p_fprintf(current_err_, "    requested = %d bytes\n", heap->requested);
1040    p_fprintf(current_err_, "    used      = %d bytes\n", heap->used * BYTES_PER_UNIT);
1041    p_fprintf(current_err_, "    allocated = %d bytes",
1042    	(heap->small_block_pages + heap->power_pages)*BYTES_PER_PAGE);
1043    p_fprintf(current_err_, " = %d small pages + %d power pages\n",
1044	heap->small_block_pages, heap->power_pages);
1045
1046    p_fprintf(current_err_, "\nPAGE (%d bytes) MANAGEMENT:\n", BYTES_PER_PAGE);
1047    blocksize = 0;
1048    for (i=1; i<PAGE_LISTS; i++)
1049    {
1050	for (j=0, cl = pages->free[i]; cl; cl = cl->next, j++)
1051	    blocksize += i;
1052	if (j)
1053	    p_fprintf(current_err_, "%10d %4d-page clusters free\n", j, i);
1054    }
1055    for (cl = pages->free[0]; cl; cl = cl->next)
1056    {
1057	p_fprintf(current_err_, "%10d %4d-page cluster free\n", 1, cl->size);
1058	blocksize += cl->size;
1059    }
1060
1061    p_fprintf(current_err_, "\nTOTAL:\n");
1062    p_fprintf(current_err_, "    used      = %d bytes\n",
1063	FullyUsedPages(hd) * BYTES_PER_PAGE + hd->heap->used * BYTES_PER_UNIT);
1064    p_fprintf(current_err_, "    allocated = %d bytes\n",
1065    	hd->pages->allocated * BYTES_PER_PAGE);
1066    p_fprintf(current_err_, "    pages     = %d (%d small + %d power + %d whole + %d free)\n",
1067	pages->allocated, heap->small_block_pages, heap->power_pages,
1068	pages->allocated - heap->small_block_pages - heap->power_pages - pages->freed,
1069	pages->freed);
1070
1071    ec_flush(current_err_);
1072}
1073
1074#endif /* DEBUG_HEAP */
1075
1076