1/***********************************************************************
2*                                                                      *
3*               This software is part of the ast package               *
4*          Copyright (c) 1985-2012 AT&T Intellectual Property          *
5*                      and is licensed under the                       *
6*                 Eclipse Public License, Version 1.0                  *
7*                    by AT&T Intellectual Property                     *
8*                                                                      *
9*                A copy of the License is available at                 *
10*          http://www.eclipse.org/org/documents/epl-v10.html           *
11*         (with md5 checksum b35adb5213ca9657e911e9befb180842)         *
12*                                                                      *
13*              Information and Software Systems Research               *
14*                            AT&T Research                             *
15*                           Florham Park NJ                            *
16*                                                                      *
17*                 Glenn Fowler <gsf@research.att.com>                  *
18*                  David Korn <dgk@research.att.com>                   *
19*                   Phong Vo <kpv@research.att.com>                    *
20*                                                                      *
21***********************************************************************/
22#if defined(_UWIN) && defined(_BLD_ast)
23
24void _STUB_vmbest(){}
25
26#else
27
28#include	"vmhdr.h"
29
30/*	Best-fit allocation method. This is based on a best-fit strategy
31**	using a splay tree for storage of lists of free blocks of the same
32**	size. Recent free blocks may be cached for fast reuse.
33**
34**	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35*/
36
37#ifdef DEBUG
38static int	N_free;		/* # of free calls			*/
39static int	N_alloc;	/* # of alloc calls			*/
40static int	N_resize;	/* # of resize calls			*/
41static int	N_wild;		/* # allocated from the wild block	*/
42static int	N_last;		/* # allocated from last free block	*/
43static int	N_reclaim;	/* # of bestreclaim calls		*/
44#endif /*DEBUG*/
45
46#define COMPACT		8	/* factor to decide when to compact	*/
47
48/* Check to see if a block is in the free tree */
49#if __STD_C
50static int vmintree(Block_t* node, Block_t* b)
51#else
52static int vmintree(node,b)
53Block_t*	node;
54Block_t*	b;
55#endif
56{	Block_t*	t;
57
58	for(t = node; t; t = LINK(t))
59		if(t == b)
60			return 1;
61	if(LEFT(node) && vmintree(LEFT(node),b))
62		return 1;
63	if(RIGHT(node) && vmintree(RIGHT(node),b))
64		return 1;
65	return 0;
66}
67
68#if __STD_C
69static int vmonlist(Block_t* list, Block_t* b)
70#else
71static int vmonlist(list,b)
72Block_t*	list;
73Block_t*	b;
74#endif
75{
76	for(; list; list = LINK(list))
77		if(list == b)
78			return 1;
79	return 0;
80}
81
82/* Check to see if a block is known to be free */
83#if __STD_C
84static int vmisfree(Vmdata_t* vd, Block_t* b)
85#else
86static int vmisfree(vd,b)
87Vmdata_t*	vd;
88Block_t*	b;
89#endif
90{
91	if(SIZE(b) & (BUSY|JUNK|PFREE))
92		return 0;
93
94	if(b == vd->wild)
95		return 1;
96
97	if(SIZE(b) < MAXTINY)
98		return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
99
100	if(vd->root)
101		return vmintree(vd->root, b);
102
103	return 0;
104}
105
106/* Check to see if a block is known to be junked */
107#if __STD_C
108static int vmisjunk(Vmdata_t* vd, Block_t* b)
109#else
110static int vmisjunk(vd,b)
111Vmdata_t*	vd;
112Block_t*	b;
113#endif
114{
115	Block_t*	t;
116
117	if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
118		return 0;
119
120	if(b == vd->free) /* recently freed */
121		return 1;
122
123	/* check the list that b is supposed to be in */
124	for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
125		if(t == b)
126			return 1;
127
128	/* on occasions, b may be put onto the catch-all list */
129	if(C_INDEX(SIZE(b)) < S_CACHE)
130		for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
131			if(t == b)
132				return 1;
133
134	return 0;
135}
136
137/* check to see if the free tree is in good shape */
138#if __STD_C
139static int vmchktree(Block_t* node)
140#else
141static int vmchktree(node)
142Block_t*	node;
143#endif
144{	Block_t*	t;
145
146	if(SIZE(node) & BITS)
147		{ /**/ASSERT(0); return -1; }
148
149	for(t = LINK(node); t; t = LINK(t))
150		if(SIZE(t) != SIZE(node))
151			{ /**/ASSERT(0); return -1; }
152
153	if((t = LEFT(node)) )
154	{	if(SIZE(t) >= SIZE(node) )
155			{ /**/ASSERT(0); return -1; }
156		else	return vmchktree(t);
157	}
158	if((t = RIGHT(node)) )
159	{	if(SIZE(t) <= SIZE(node) )
160			{ /**/ASSERT(0); return -1; }
161		else	return vmchktree(t);
162	}
163
164	return 0;
165}
166
167#if __STD_C
168int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
169#else
170int _vmbestcheck(vd, freeb)
171Vmdata_t*	vd;
172Block_t*	freeb; /* known to be free but not on any free list */
173#endif
174{
175	reg Seg_t	*seg;
176	reg Block_t	*b, *endb, *nextb;
177	int		rv = 0;
178
179	if(!CHECK())
180		return 0;
181
182	/* make sure the free tree is still in shape */
183	if(vd->root && vmchktree(vd->root) < 0 )
184		{ rv = -1; /**/ASSERT(0); }
185
186	for(seg = vd->seg; seg && rv == 0; seg = seg->next)
187	{	b = SEGBLOCK(seg);
188		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
189		for(; b < endb && rv == 0; b = nextb)
190		{	nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
191
192			if(!ISBUSY(SIZE(b)) ) /* a completely free block */
193			{	/* there should be no marked bits of any type */
194				if(SIZE(b) & (BUSY|JUNK|PFREE) )
195					{ rv = -1; /**/ASSERT(0); }
196
197				/* next block must be busy and marked PFREE */
198				if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
199					{ rv = -1; /**/ASSERT(0); }
200
201				/* must have a self-reference pointer */
202				if(SELF(b) != b)
203					{ rv = -1; /**/ASSERT(0); }
204
205				/* segment pointer should be well-defined */
206				if(!TINIEST(b) && SEG(b) != seg)
207					{ rv = -1; /**/ASSERT(0); }
208
209				/* must be on a free list */
210				if(b != freeb && !vmisfree(vd, b) )
211					{ rv = -1; /**/ASSERT(0); }
212			}
213			else
214			{	/* segment pointer should be well-defined */
215				if(SEG(b) != seg)
216					{ rv = -1; /**/ASSERT(0); }
217
218				/* next block should not be marked PFREE */
219				if(ISPFREE(SIZE(nextb)) )
220					{ rv = -1; /**/ASSERT(0); }
221
222				/* if PFREE, last block should be free */
223				if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
224				   !vmisfree(vd, LAST(b)) )
225					{ rv = -1; /**/ASSERT(0); }
226
227				/* if free but unreclaimed, should be junk */
228				if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
229					{ rv = -1; /**/ASSERT(0); }
230			}
231		}
232	}
233
234	return rv;
235}
236
237/* Tree rotation functions */
238#define RROTATE(x,y)	(LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
239#define LROTATE(x,y)	(RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
240#define RLINK(s,x)	((s) = LEFT(s) = (x))
241#define LLINK(s,x)	((s) = RIGHT(s) = (x))
242
243/* Find and delete a suitable element in the free tree. */
244#if __STD_C
245static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
246#else
247static Block_t* bestsearch(vd, size, wanted)
248Vmdata_t*	vd;
249reg size_t	size;
250Block_t*	wanted;
251#endif
252{
253	reg size_t	s;
254	reg Block_t	*t, *root, *l, *r;
255	Block_t		link;
256
257	/* extracting a tiniest block from its list */
258	if((root = wanted) && size == TINYSIZE)
259	{	reg Seg_t*	seg;
260
261		l = TLEFT(root);
262		if((r = LINK(root)) )
263			TLEFT(r) = l;
264		if(l)
265			LINK(l) = r;
266		else	TINY(vd)[0] = r;
267
268		seg = vd->seg;
269		if(!seg->next)
270			SEG(root) = seg;
271		else for(;; seg = seg->next)
272		{	if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
273			   (Vmuchar_t*)root < seg->baddr)
274			{	SEG(root) = seg;
275				break;
276			}
277		}
278
279		return root;
280	}
281
282	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
283
284	/* find the right one to delete */
285	l = r = &link;
286	if((root = vd->root) ) do
287	{	/**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
288		if(size == (s = SIZE(root)) )
289			break;
290		if(size < s)
291		{	if((t = LEFT(root)) )
292			{	if(size <= (s = SIZE(t)) )
293				{	RROTATE(root,t);
294					if(size == s)
295						break;
296					t = LEFT(root);
297				}
298				else
299				{	LLINK(l,t);
300					t = RIGHT(t);
301				}
302			}
303			RLINK(r,root);
304		}
305		else
306		{	if((t = RIGHT(root)) )
307			{	if(size >= (s = SIZE(t)) )
308				{	LROTATE(root,t);
309					if(size == s)
310						break;
311					t = RIGHT(root);
312				}
313				else
314				{	RLINK(r,t);
315					t = LEFT(t);
316				}
317			}
318			LLINK(l,root);
319		}
320		/**/ ASSERT(root != t);
321	} while((root = t) );
322
323	if(root)	/* found it, now isolate it */
324	{	RIGHT(l) = LEFT(root);
325		LEFT(r) = RIGHT(root);
326	}
327	else		/* nothing exactly fit	*/
328	{	LEFT(r) = NIL(Block_t*);
329		RIGHT(l) = NIL(Block_t*);
330
331		/* grab the least one from the right tree */
332		if((root = LEFT(&link)) )
333		{	while((t = LEFT(root)) )
334				RROTATE(root,t);
335			LEFT(&link) = RIGHT(root);
336		}
337	}
338
339	if(root && (r = LINK(root)) )
340	{	/* head of a link list, use next one for root */
341		LEFT(r) = RIGHT(&link);
342		RIGHT(r) = LEFT(&link);
343	}
344	else if(!(r = LEFT(&link)) )
345		r = RIGHT(&link);
346	else /* graft left tree to right tree */
347	{	while((t = LEFT(r)) )
348			RROTATE(r,t);
349		LEFT(r) = RIGHT(&link);
350	}
351	vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
352
353	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
354	/**/ASSERT(!wanted || wanted == root);
355
356	return root;
357}
358
359/* Reclaim all delayed free blocks into the free tree */
360#if __STD_C
361static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
362#else
363static int bestreclaim(vd, wanted, c)
364reg Vmdata_t*	vd;
365Block_t*	wanted;
366int		c;
367#endif
368{
369	reg size_t	size, s;
370	reg Block_t	*fp, *np, *t, *list;
371	reg int		n, saw_wanted;
372
373	/**/COUNT(N_reclaim);
374	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
375
376	if((fp = vd->free) )
377	{	LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
378		vd->free = NIL(Block_t*);
379	}
380
381	saw_wanted = wanted ? 0 : 1;
382	for(n = S_CACHE; n >= c; --n)
383	{	list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
384		while((fp = list) )
385		{	/* Note that below here we allow ISJUNK blocks to be
386			** forward-merged even though they are not removed from
387			** the list immediately. In this way, the list is
388			** scanned only once. It works because the LINK and SIZE
389			** fields are not destroyed during the merging. This can
390			** be seen by observing that a tiniest block has a 2-word
391			** header and a 2-word body. Merging a tiniest block
392			** (1seg) and the next block (2seg) looks like this:
393			**	1seg  size  link  left  2seg size link left ....
394			**	1seg  size  link  left  rite xxxx xxxx .... self
395			** After the merge, the 2seg word is replaced by the RIGHT
396			** pointer of the new block and somewhere beyond the
397			** two xxxx fields, the SELF pointer will replace some
398			** other word. The important part is that the two xxxx
399			** fields are kept intact.
400			*/
401			list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
402
403			size = SIZE(fp);
404			if(!ISJUNK(size))	/* already done */
405				continue;
406
407			if(ISPFREE(size))	/* backward merge */
408			{	fp = LAST(fp);
409				s = SIZE(fp); /**/ASSERT(!(s&BITS));
410				REMOVE(vd,fp,INDEX(s),t,bestsearch);
411				size = (size&~BITS) + s + sizeof(Head_t);
412			}
413			else	size &= ~BITS;
414
415			for(;;)	/* forward merge */
416			{	np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
417				s = SIZE(np);	/**/ASSERT(s > 0);
418				if(!ISBUSY(s))
419				{	/**/ASSERT((s&BITS) == 0);
420					if(np == vd->wild)
421						vd->wild = NIL(Block_t*);
422					else	REMOVE(vd,np,INDEX(s),t,bestsearch);
423				}
424				else if(ISJUNK(s))
425				{	/* reclaim any touched junk list */
426					if((int)C_INDEX(s) < c)
427						c = (int)C_INDEX(s);
428					SIZE(np) = 0;
429					CLRBITS(s);
430				}
431				else	break;
432				size += s + sizeof(Head_t);
433			}
434			SIZE(fp) = size;
435
436			/* tell next block that this one is free */
437			np = NEXT(fp);	/**/ASSERT(ISBUSY(SIZE(np)));
438					/**/ASSERT(!ISJUNK(SIZE(np)));
439			SETPFREE(SIZE(np));
440			SELF(fp) = fp;
441
442			if(fp == wanted) /* to be consumed soon */
443			{	/**/ASSERT(!saw_wanted); /* should be seen just once */
444				saw_wanted = 1;
445				continue;
446			}
447
448			/* wilderness preservation */
449			if(np->body.data >= vd->seg->baddr)
450			{	vd->wild = fp;
451				continue;
452			}
453
454			/* tiny block goes to tiny list */
455			if(size < MAXTINY)
456			{	s = INDEX(size);
457				np = LINK(fp) = TINY(vd)[s];
458				if(s == 0)	/* TINIEST block */
459				{	if(np)
460						TLEFT(np) = fp;
461					TLEFT(fp) = NIL(Block_t*);
462				}
463				else
464				{	if(np)
465						LEFT(np)  = fp;
466					LEFT(fp) = NIL(Block_t*);
467					SETLINK(fp);
468				}
469				TINY(vd)[s] = fp;
470				continue;
471			}
472
473			LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
474			if(!(np = vd->root) )	/* inserting into an empty tree	*/
475			{	vd->root = fp;
476				continue;
477			}
478
479			size = SIZE(fp);
480			while(1)	/* leaf insertion */
481			{	/**/ASSERT(np != fp);
482				if((s = SIZE(np)) > size)
483				{	if((t = LEFT(np)) )
484					{	/**/ ASSERT(np != t);
485						np = t;
486					}
487					else
488					{	LEFT(np) = fp;
489						break;
490					}
491				}
492				else if(s < size)
493				{	if((t = RIGHT(np)) )
494					{	/**/ ASSERT(np != t);
495						np = t;
496					}
497					else
498					{	RIGHT(np) = fp;
499						break;
500					}
501				}
502				else /* s == size */
503				{	if((t = LINK(np)) )
504					{	LINK(fp) = t;
505						LEFT(t) = fp;
506					}
507					LINK(np) = fp;
508					LEFT(fp) = np;
509					SETLINK(fp);
510					break;
511				}
512			}
513		}
514	}
515
516	/**/ASSERT(!wanted || saw_wanted == 1);
517	/**/ASSERT(_vmbestcheck(vd, wanted) == 0);
518	return saw_wanted;
519}
520
521#if __STD_C
522static int bestcompact(Vmalloc_t* vm, int local)
523#else
524static int bestcompact(vm, local)
525Vmalloc_t*	vm;
526int		local;
527#endif
528{
529	reg Seg_t	*seg, *next;
530	reg Block_t	*bp, *tp;
531	reg size_t	size, segsize, round;
532	reg Vmdata_t*	vd = vm->data;
533
534	SETLOCK(vm, local);
535
536	bestreclaim(vd,NIL(Block_t*),0);
537
538	for(seg = vd->seg; seg; seg = next)
539	{	next = seg->next;
540
541		bp = BLOCK(seg->baddr);
542		if(!ISPFREE(SIZE(bp)) )
543			continue;
544
545		bp = LAST(bp);	/**/ASSERT(vmisfree(vd,bp));
546		size = SIZE(bp);
547		if(bp == vd->wild)
548		{	/* During large block allocations, _Vmextend might
549			** have been enlarged the rounding factor. Reducing
550			** it a bit help avoiding getting large raw memory.
551			*/
552			if((round = vm->disc->round) == 0)
553				round = _Vmpagesize;
554			if(size > COMPACT*vd->incr && vd->incr > round)
555				vd->incr /= 2;
556
557			/* for the bottom segment, we don't necessarily want
558			** to return raw memory too early. vd->pool has an
559			** approximation of the average size of recently freed
560			** blocks. If this is large, the application is managing
561			** large blocks so we throttle back memory chopping
562			** to avoid thrashing the underlying memory system.
563			*/
564			if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
565				continue;
566
567			vd->wild = NIL(Block_t*);
568			vd->pool = 0;
569		}
570		else	REMOVE(vd,bp,INDEX(size),tp,bestsearch);
571		tp = NEXT(bp); /* avoid strict-aliasing pun */
572		CLRPFREE(SIZE(tp));
573
574		if(size < (segsize = seg->size))
575			size += sizeof(Head_t);
576
577		if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
578		{	if(size >= segsize) /* entire segment deleted */
579				continue;
580			/**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
581
582			if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
583				SIZE(bp) = size - sizeof(Head_t);
584			else	bp = NIL(Block_t*);
585		}
586
587		if(bp)
588		{	/**/ ASSERT(SIZE(bp) >= BODYSIZE);
589			/**/ ASSERT(SEGWILD(bp));
590			/**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
591			SIZE(bp) |= BUSY|JUNK;
592			LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
593			CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
594		}
595	}
596
597	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
598		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
599
600	CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
601
602	return 0;
603}
604
605#if __STD_C
606static Void_t* bestalloc(Vmalloc_t* vm, size_t size , int local)
607#else
608static Void_t* bestalloc(vm, size, local)
609Vmalloc_t*	vm;	/* region allocating from	*/
610size_t		size;	/* desired block size		*/
611int		local;	/* internal call		*/
612#endif
613{
614	reg Vmdata_t*	vd = vm->data;
615	reg size_t	s;
616	reg int		n;
617	reg Block_t	*tp, *np, *ap;
618	size_t		orgsize = size;
619
620	/**/COUNT(N_alloc);
621	/**/ASSERT(local ? (vd->lock == 1) : 1 );
622
623	SETLOCK(vm,local);
624
625	/**/ ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
626	/**/ ASSERT(HEADSIZE == sizeof(Head_t));
627	/**/ ASSERT(BODYSIZE == sizeof(Body_t));
628	/**/ ASSERT((ALIGN%(BITS+1)) == 0 );
629	/**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
630	/**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
631	/**/ ASSERT((BODYSIZE%ALIGN) == 0 );
632	/**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
633
634	/* for ANSI requirement that malloc(0) returns non-NULL pointer */
635	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
636
637	if((tp = vd->free) )	/* reuse last free piece if appropriate */
638	{	/**/ASSERT(ISBUSY(SIZE(tp)) );
639		/**/ASSERT(ISJUNK(SIZE(tp)) );
640		/**/COUNT(N_last);
641
642		vd->free = NIL(Block_t*);
643		if((s = SIZE(tp)) >= size && s < (size << 1) )
644		{	if(s >= size + (sizeof(Head_t)+BODYSIZE) )
645			{	SIZE(tp) = size;
646				np = NEXT(tp);
647				SEG(np) = SEG(tp);
648				SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
649				vd->free = np;
650				SIZE(tp) |= s&BITS;
651			}
652			CLRJUNK(SIZE(tp));
653			goto done;
654		}
655
656		LINK(tp) = CACHE(vd)[S_CACHE];
657		CACHE(vd)[S_CACHE] = tp;
658	}
659
660	for(n = S_CACHE; n >= 0; --n)	/* best-fit except for coalescing */
661	{	bestreclaim(vd,NIL(Block_t*),n);
662		if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
663			goto got_block;
664	}
665
666	/**/ASSERT(!vd->free);
667	if((tp = vd->wild) && SIZE(tp) >= size)
668	{	/**/COUNT(N_wild);
669		vd->wild = NIL(Block_t*);
670		goto got_block;
671	}
672
673	/* need to extend the arena */
674	KPVCOMPACT(vm,bestcompact);
675	if((tp = (*_Vmextend)(vm,size,bestsearch)) )
676	{ got_block:
677		/**/ ASSERT(!ISBITS(SIZE(tp)));
678		/**/ ASSERT(SIZE(tp) >= size);
679		/**/ ASSERT((SIZE(tp)%ALIGN) == 0);
680		/**/ ASSERT(!vd->free);
681
682		/* tell next block that we are no longer a free block */
683		np = NEXT(tp);
684		CLRPFREE(SIZE(np));	/**/ ASSERT(ISBUSY(SIZE(np)));
685
686		if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
687		{	SIZE(tp) = size;
688
689			np = NEXT(tp);
690			SEG(np) = SEG(tp);
691			SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
692
693			if(VMWILD(vd,np))
694			{	SIZE(np) &= ~BITS;
695				SELF(np) = np;
696				ap = NEXT(np); /**/ASSERT(ISBUSY(SIZE(ap)));
697				SETPFREE(SIZE(ap));
698				vd->wild = np;
699			}
700			else	vd->free = np;
701		}
702
703		SETBUSY(SIZE(tp));
704	}
705
706done:
707	if(tp && !local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
708		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
709
710	CLRLOCK(vm,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
711
712	return tp ? DATA(tp) : NIL(Void_t*);
713}
714
715#if __STD_C
716static long bestaddr(Vmalloc_t* vm, Void_t* addr, int local )
717#else
718static long bestaddr(vm, addr, local)
719Vmalloc_t*	vm;	/* region allocating from	*/
720Void_t*		addr;	/* address to check		*/
721int		local;
722#endif
723{
724	reg Seg_t*	seg;
725	reg Block_t	*b, *endb;
726	reg long	offset;
727	reg Vmdata_t*	vd = vm->data;
728
729	/**/ASSERT(local ? (vd->lock == 1) : 1 );
730	SETLOCK(vm, local);
731
732	offset = -1L; b = endb = NIL(Block_t*);
733	for(seg = vd->seg; seg; seg = seg->next)
734	{	b = SEGBLOCK(seg);
735		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
736		if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
737		   (Vmuchar_t*)addr < (Vmuchar_t*)endb)
738			break;
739	}
740
741	if(local ) /* from bestfree or bestresize */
742	{	b = BLOCK(addr);
743		if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
744			offset = 0;
745	}
746	else if(seg)
747	{	while(b < endb)
748		{	reg Vmuchar_t*	data = (Vmuchar_t*)DATA(b);
749			reg size_t	size = SIZE(b)&~BITS;
750
751			if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
752			{	if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
753					offset = -1L;
754				else	offset = (long)((Vmuchar_t*)addr - data);
755				goto done;
756			}
757
758			b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
759		}
760	}
761
762done:
763	CLRLOCK(vm,local);
764	return offset;
765}
766
767#if __STD_C
768static int bestfree(Vmalloc_t* vm, Void_t* data, int local )
769#else
770static int bestfree(vm, data, local )
771Vmalloc_t*	vm;
772Void_t*		data;
773int		local;
774#endif
775{
776	reg Vmdata_t*	vd = vm->data;
777	reg Block_t	*bp;
778	reg size_t	s;
779
780#ifdef DEBUG
781	if(((char*)data - (char*)0) <= 1)
782	{	_Vmassert |= VM_check;
783		_vmbestcheck(vd, NIL(Block_t*));
784		if (!data)
785			_Vmassert &= ~VM_check;
786		return 0;
787	}
788#else
789	if(!data) /* ANSI-ism */
790		return 0;
791#endif
792
793	/**/COUNT(N_free);
794	/**/ASSERT(local ? (vd->lock == 1) : 1 );
795
796	SETLOCK(vm, local);
797
798	/**/ASSERT(KPVADDR(vm, data, bestaddr) == 0);
799	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
800	bp = BLOCK(data); s = SIZE(bp);
801
802	/* Keep an approximate average free block size.
803	** This is used in bestcompact() to decide when to release
804	** raw memory back to the underlying memory system.
805	*/
806	vd->pool = (vd->pool + (s&~BITS))/2;
807
808	if(ISBUSY(s) && !ISJUNK(s))
809	{	SETJUNK(SIZE(bp));
810	        if(s < MAXCACHE)
811	        {       /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
812	                LINK(bp) = CACHE(vd)[INDEX(s)];
813	                CACHE(vd)[INDEX(s)] = bp;
814	        }
815	        else if(!vd->free)
816	                vd->free = bp;
817	        else
818	        {       /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
819	                LINK(bp) = CACHE(vd)[S_CACHE];
820	                CACHE(vd)[S_CACHE] = bp;
821	        }
822
823		/* coalesce on freeing large blocks to avoid fragmentation */
824		if(SIZE(bp) >= 2*vd->incr)
825		{	bestreclaim(vd,NIL(Block_t*),0);
826			if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
827				KPVCOMPACT(vm,bestcompact);
828		}
829	}
830
831	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
832		(*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
833
834	CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
835
836	return 0;
837}
838
839#if __STD_C
840static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type, int local)
841#else
842static Void_t* bestresize(vm, data, size, type, local)
843Vmalloc_t*	vm;		/* region allocating from	*/
844Void_t*		data;		/* old block of data		*/
845reg size_t	size;		/* new size			*/
846int		type;		/* !=0 to move, <0 for not copy */
847int		local;
848#endif
849{
850	reg Block_t	*rp, *np, *t;
851	size_t		s, bs;
852	size_t		oldz = 0,  orgsize = size;
853	Void_t		*oldd = 0, *orgdata = data;
854	Vmdata_t	*vd = vm->data;
855
856	/**/COUNT(N_resize);
857	/**/ASSERT(local ? (vd->lock == 1) :  1);
858
859	if(!data) /* resizing a NULL block is the same as allocating */
860	{	data = bestalloc(vm, size, local);
861		if(data && (type&VM_RSZERO) )
862			memset((Void_t*)data, 0, size);
863		return data;
864	}
865	if(size == 0) /* resizing to zero size is the same as freeing */
866	{	(void)bestfree(vm, data, local);
867		return NIL(Void_t*);
868	}
869
870	SETLOCK(vm, local);
871
872	/**/ASSERT(KPVADDR(vm, data, bestaddr) == 0);
873	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
874	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
875	rp = BLOCK(data);	/**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
876	oldz = SIZE(rp); CLRBITS(oldz);
877	if(oldz < size)
878	{	np = (Block_t*)((Vmuchar_t*)rp + oldz + sizeof(Head_t));
879		do	/* forward merge as much as possible */
880		{	s = SIZE(np); /**/ASSERT(!ISPFREE(s));
881			if(np == vd->free)
882			{	vd->free = NIL(Block_t*);
883				CLRBITS(s);
884			}
885			else if(ISJUNK(s) )
886			{	if(!bestreclaim(vd,np,(int)C_INDEX(s)) )
887					/**/ASSERT(0); /* oops: did not see np! */
888				s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
889			}
890			else if(!ISBUSY(s) )
891			{	if(np == vd->wild)
892					vd->wild = NIL(Block_t*);
893				else	REMOVE(vd,np,INDEX(s),t,bestsearch);
894			}
895			else	break;
896
897			SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
898			np = (Block_t*)((Vmuchar_t*)np + s);
899			CLRPFREE(SIZE(np));
900		} while(SIZE(rp) < size);
901
902		if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
903		{	reg Seg_t*	seg;
904
905			s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
906			seg = SEG(rp);
907			if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
908				      vm->disc) == seg->addr )
909			{	SIZE(rp) += s;
910				seg->extent += s;
911				seg->size += s;
912				seg->baddr += s;
913				s  = (SIZE(rp)&~BITS) + sizeof(Head_t);
914				np = (Block_t*)((Vmuchar_t*)rp + s);
915				SEG(np) = seg;
916				SIZE(np) = BUSY;
917			}
918		}
919	}
920
921	if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
922	{	SIZE(rp) = size;
923		np = NEXT(rp);
924		SEG(np) = SEG(rp);
925		SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
926		CPYBITS(SIZE(rp),s);
927		rp = np;
928		goto do_free;
929	}
930	else if((bs = s&~BITS) < size)
931	{	if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
932			data = NIL(Void_t*); /* old data is not moveable */
933		else
934		{	oldd = data;
935			if((data = KPVALLOC(vm,size,bestalloc)) )
936			{	if(type&VM_RSCOPY)
937					memcpy(data, oldd, bs);
938
939			do_free: /* reclaim these right away */
940				SETJUNK(SIZE(rp));
941				LINK(rp) = CACHE(vd)[S_CACHE];
942				CACHE(vd)[S_CACHE] = rp;
943				bestreclaim(vd, NIL(Block_t*), S_CACHE);
944			}
945		}
946	}
947
948	if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldz )
949		memset((Void_t*)((Vmuchar_t*)data + oldz), 0, size-oldz);
950
951	if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
952		(*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
953
954	CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
955
956	return data;
957}
958
959#if __STD_C
960static long bestsize(Vmalloc_t* vm, Void_t* addr, int local )
961#else
962static long bestsize(vm, addr, local)
963Vmalloc_t*	vm;	/* region allocating from	*/
964Void_t*		addr;	/* address to check		*/
965int		local;
966#endif
967{
968	Seg_t		*seg;
969	Block_t		*b, *endb;
970	long		size;
971	Vmdata_t	*vd = vm->data;
972
973	SETLOCK(vm, local);
974
975	size = -1L;
976	for(seg = vd->seg; seg; seg = seg->next)
977	{	b = SEGBLOCK(seg);
978		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
979		if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
980		   (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
981			continue;
982		while(b < endb)
983		{	if(addr == DATA(b))
984			{	if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
985					size = -1L;
986				else	size = (long)SIZE(b)&~BITS;
987				goto done;
988			}
989			else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
990				break;
991
992			b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
993		}
994	}
995
996done:
997	CLRLOCK(vm, local);
998	return size;
999}
1000
1001#if __STD_C
1002static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align, int local)
1003#else
1004static Void_t* bestalign(vm, size, align, local)
1005Vmalloc_t*	vm;
1006size_t		size;
1007size_t		align;
1008int		local;
1009#endif
1010{
1011	Vmuchar_t	*data;
1012	Block_t		*tp, *np;
1013	Seg_t		*seg;
1014	size_t		s, extra;
1015	size_t		orgsize = size, orgalign = align;
1016	Vmdata_t	*vd = vm->data;
1017
1018	if(size <= 0 || align <= 0)
1019		return NIL(Void_t*);
1020
1021	SETLOCK(vm, local);
1022
1023	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1024	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1025	align = MULTIPLE(align,ALIGN);
1026
1027	/* hack so that dbalign() can store header data */
1028	if(VMETHOD(vd) != VM_MTDEBUG)
1029		extra = 0;
1030	else
1031	{	extra = DB_HEAD;
1032		while(align < extra || (align - extra) < sizeof(Block_t))
1033			align *= 2;
1034	}
1035
1036	/* reclaim all free blocks now to avoid fragmentation */
1037	bestreclaim(vd,NIL(Block_t*),0);
1038
1039	s = size + 2*(align+sizeof(Head_t)+extra);
1040	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1041		goto done;
1042
1043	tp = BLOCK(data);
1044	seg = SEG(tp);
1045
1046	/* get an aligned address that we can live with */
1047	if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1048		data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1049
1050	if((np = BLOCK(data)) != tp ) /* need to free left part */
1051	{	if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1052		{	data += align;
1053			np = BLOCK(data);
1054		} /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1055
1056		s  = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1057		SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1058		SEG(np) = seg;
1059
1060		SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1061		/**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1062		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1063		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1064	}
1065
1066	/* free left-over if too big */
1067	if((s = SIZE(np) - size) >= sizeof(Block_t))
1068	{	SIZE(np) = size;
1069
1070		tp = NEXT(np);
1071		SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1072		SEG(tp) = seg;
1073		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1074		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1075
1076		SIZE(np) |= s&BITS;
1077	}
1078
1079	bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1080
1081	if(!local && _Vmtrace && (vd->mode&VM_TRACE) )
1082		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1083
1084done:
1085	CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1086
1087	return (Void_t*)data;
1088}
1089
1090/* The below implements the discipline Vmdcsbrk and the heap region Vmheap.
1091** There are 5 alternative ways to get raw memory:
1092**	win32, sbrk, mmap_anon, mmap_zero and reusing the native malloc
1093** The selection of method done here is to enable our malloc implementation
1094** to work with concurrent threads. The sbrk/brk interface is unfortunately
1095** not atomic. Thus, we prefer mmap_anon or mmap_zero if they are available.
1096*/
1097#if _mem_win32
1098#undef	_mem_mmap_anon
1099#undef	_mem_mmap_zero
1100#undef	_mem_sbrk
1101#endif
1102#if _mem_mmap_anon
1103#undef	_mem_mmap_zero
1104#if !_PACKAGE_ast
1105#undef	_mem_sbrk
1106#endif
1107#endif
1108#if _mem_mmap_zero
1109#if !_PACKAGE_ast
1110#undef	_mem_sbrk
1111#endif
1112#endif
1113
1114#if __linux__
1115
1116/* make sure that allocated memory is addressable */
1117
1118#include	<signal.h>
1119typedef void	(*Sig_f)(int);
1120static int	Gotsegv = 0;
1121
1122static void sigsegv(int sig)
1123{
1124	if(sig == SIGSEGV)
1125		Gotsegv = 1;
1126}
1127static int chkaddr(Vmuchar_t* addr, size_t nsize)
1128{
1129	Sig_f	segv;
1130	int	rv;
1131
1132	Gotsegv = 0; /* catch segment fault */
1133	segv = signal(SIGSEGV, sigsegv);
1134
1135	rv = *(addr+nsize-1);
1136	rv = Gotsegv ? -1 : rv;
1137
1138	signal(SIGSEGV, segv); /* restore signal catcher */
1139	Gotsegv = 0;
1140
1141	return rv;
1142}
1143#else
1144
1145/* known !__linux__ guarantee that brk-addresses are valid */
1146
1147#define	chkaddr(a,n)	(0)
1148
1149#endif /*__linux__*/
1150
1151#if _mem_win32 /* getting memory on a window system */
1152#if _PACKAGE_ast
1153#include	<ast_windows.h>
1154#else
1155#include	<windows.h>
1156#endif
1157
1158static Void_t* win32mem(Void_t* caddr, size_t csize, size_t nsize)
1159{	/**/ ASSERT(csize > 0 || nsize > 0)
1160	if(csize == 0)
1161	{	caddr = (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1162		return caddr;
1163	}
1164	else if(nsize == 0)
1165	{	(void)VirtualFree((LPVOID)caddr,0,MEM_RELEASE);
1166		return caddr;
1167	}
1168	else	return NIL(Void_t*);
1169}
1170#endif /* _mem_win32 */
1171
1172#if _mem_sbrk /* getting space via brk/sbrk - not concurrent-ready */
1173static Void_t* sbrkmem(Void_t* caddr, size_t csize, size_t nsize)
1174{
1175	Vmuchar_t	*addr = (Vmuchar_t*)sbrk(0);
1176
1177	if(!addr || addr == (Vmuchar_t*)(-1) )
1178		return NIL(Void_t*);
1179
1180	if(csize > 0 && addr != (Vmuchar_t*)caddr+csize)
1181		return NIL(Void_t*);
1182	else if(csize == 0)
1183		caddr = addr;
1184
1185	/**/ASSERT(addr == (Vmuchar_t*)caddr+csize);
1186	if(nsize < csize)
1187		addr -= csize-nsize;
1188	else if((addr += nsize-csize) < (Vmuchar_t*)caddr )
1189		return NIL(Void_t*);
1190
1191	if(brk(addr) != 0 )
1192		return NIL(Void_t*);
1193	else if(nsize > csize && chkaddr(caddr, nsize) < 0 )
1194	{	(void)brk((Vmuchar_t*)caddr+csize);
1195		return NIL(Void_t*);
1196	}
1197	else	return caddr;
1198}
1199#endif /* _mem_sbrk */
1200
1201#if _mem_mmap_anon || _mem_mmap_zero /* get space using mmap */
1202#include		<fcntl.h>
1203#include		<sys/mman.h>
1204
1205#ifndef MAP_ANON
1206#ifdef MAP_ANONYMOUS
1207#define	MAP_ANON	MAP_ANONYMOUS
1208#else
1209#define MAP_ANON	0
1210#endif
1211#endif /*MAP_ANON*/
1212
1213#ifndef OPEN_MAX
1214#define	OPEN_MAX	64
1215#endif
1216#define FD_INIT		(-1)		/* uninitialized file desc	*/
1217#define FD_NONE		(-2)		/* no mapping with file desc	*/
1218
1219typedef struct _mmdisc_s
1220{	Vmdisc_t	disc;
1221	int		fd;
1222	off_t		offset;
1223} Mmdisc_t;
1224
1225static Void_t* mmapmem(Void_t* caddr, size_t csize, size_t nsize, Mmdisc_t* mmdc)
1226{
1227#if _mem_mmap_zero
1228	if(mmdc) /* /dev/zero mapping */
1229	{	if(mmdc->fd == FD_INIT ) /* open /dev/zero for mapping */
1230		{	int	fd;
1231			if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1232			{	mmdc->fd = FD_NONE;
1233				return NIL(Void_t*);
1234			}
1235			mmdc->fd = _vmfd(fd);
1236		}
1237
1238		if(mmdc->fd == FD_NONE)
1239			return NIL(Void_t*);
1240	}
1241#endif /* _mem_mmap_zero */
1242
1243	/**/ASSERT(csize > 0 || nsize > 0);
1244	if(csize == 0)
1245	{	nsize = ROUND(nsize, _Vmpagesize);
1246		caddr = NIL(Void_t*);
1247#if _mem_mmap_zero
1248		if(mmdc && mmdc->fd >= 0 )
1249			caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_PRIVATE, mmdc->fd, mmdc->offset);
1250#endif
1251#if _mem_mmap_anon
1252		if(!mmdc )
1253			caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
1254#endif
1255		if(!caddr || caddr == (Void_t*)(-1))
1256			return NIL(Void_t*);
1257		else if(chkaddr((Vmuchar_t*)caddr, nsize) < 0 )
1258		{	(void)munmap(caddr, nsize);
1259			return NIL(Void_t*);
1260		}
1261		else
1262		{	if(mmdc)
1263				mmdc->offset += nsize;
1264			return caddr;
1265		}
1266	}
1267	else if(nsize == 0)
1268	{	Vmuchar_t	*addr = (Vmuchar_t*)sbrk(0);
1269		if(addr < (Vmuchar_t*)caddr ) /* in sbrk space */
1270			return NIL(Void_t*);
1271		else
1272		{	(void)munmap(caddr, csize);
1273			return caddr;
1274		}
1275	}
1276	else	return NIL(Void_t*);
1277}
1278#endif /* _mem_map_anon || _mem_mmap_zero */
1279
1280#if _std_malloc /* using native malloc as a last resource */
1281static Void_t* mallocmem(Void_t* caddr, size_t csize, size_t nsize)
1282{
1283	/**/ASSERT(csize > 0 || nsize > 0);
1284	if(csize == 0)
1285		return (Void_t*)malloc(nsize);
1286	else if(nsize == 0)
1287	{	free(caddr);
1288		return caddr;
1289	}
1290	else	return NIL(Void_t*);
1291}
1292#endif
1293
1294/* A discipline to get raw memory using VirtualAlloc/mmap/sbrk */
1295static Void_t* getmemory(Vmalloc_t* vm, Void_t* caddr, size_t csize, size_t nsize, Vmdisc_t* disc)
1296{
1297	Vmuchar_t	*addr;
1298
1299	if((csize > 0 && !caddr) || (csize == 0 && nsize == 0) )
1300		return NIL(Void_t*);
1301
1302#if _mem_win32
1303	if((addr = win32mem(caddr, csize, nsize)) )
1304		return (Void_t*)addr;
1305#endif
1306#if _mem_sbrk
1307#if 1 /* no sbrk() unless explicit VM_break */
1308	if((_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) )
1309#else /* asoinit(0,0,0)==0 => the application did not request a specific aso method => sbrk() ok */
1310	if(((_Vmassert & VM_break) || !(_Vmassert & VM_mmap) && !asoinit(0, 0, 0)) && (addr = sbrkmem(caddr, csize, nsize)) )
1311#endif
1312		return (Void_t*)addr;
1313#endif
1314#if _mem_mmap_anon
1315	if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)0)) )
1316		return (Void_t*)addr;
1317#endif
1318#if _mem_mmap_zero
1319	if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)disc)) )
1320		return (Void_t*)addr;
1321#endif
1322#if _mem_sbrk
1323	if(!(_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) )
1324		return (Void_t*)addr;
1325#endif
1326#if _std_malloc
1327	if((addr = mallocmem(caddr, csize, nsize)) )
1328		return (Void_t*)addr;
1329#endif
1330	return NIL(Void_t*);
1331}
1332
1333#if _mem_mmap_zero || _mem_mmap_anon
1334static Mmdisc_t _Vmdcsystem = { { getmemory, NIL(Vmexcept_f), 64*1024, sizeof(Mmdisc_t) }, FD_INIT, 0 };
1335#else
1336static Vmdisc_t _Vmdcsystem = { getmemory, NIL(Vmexcept_f), 0, sizeof(Vmdisc_t) };
1337#endif
1338
1339static Vmethod_t _Vmbest =
1340{
1341	bestalloc,
1342	bestresize,
1343	bestfree,
1344	bestaddr,
1345	bestsize,
1346	bestcompact,
1347	bestalign,
1348	VM_MTBEST
1349};
1350
1351/* The heap region */
1352static Vmdata_t	_Vmdata =
1353{
1354	0,				/* lock		*/
1355	VM_MTBEST|VM_SHARE,		/* mode		*/
1356	0,				/* incr		*/
1357	0,				/* pool		*/
1358	NIL(Seg_t*),			/* seg		*/
1359	NIL(Block_t*),			/* free		*/
1360	NIL(Block_t*),			/* wild		*/
1361	NIL(Block_t*)			/* root		*/
1362					/* tiny[]	*/
1363					/* cache[]	*/
1364};
1365Vmalloc_t _Vmheap =
1366{
1367	{ bestalloc,
1368	  bestresize,
1369	  bestfree,
1370	  bestaddr,
1371	  bestsize,
1372	  bestcompact,
1373	  bestalign,
1374	  VM_MTBEST
1375	},
1376	NIL(char*),			/* file		*/
1377	0,				/* line		*/
1378	0,				/* func		*/
1379	(Vmdisc_t*)(&_Vmdcsystem),	/* disc		*/
1380	&_Vmdata,			/* data		*/
1381	NIL(Vmalloc_t*)			/* next		*/
1382};
1383
1384__DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1385__DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1386__DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1387__DEFINE__(Vmdisc_t*,  Vmdcsystem, (Vmdisc_t*)(&_Vmdcsystem) );
1388__DEFINE__(Vmdisc_t*,  Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsystem) );
1389
1390#ifdef NoF
1391NoF(vmbest)
1392#endif
1393
1394#endif
1395