1/***********************************************************************
2*                                                                      *
3*               This software is part of the ast package               *
4*          Copyright (c) 1985-2010 AT&T Intellectual Property          *
5*                      and is licensed under the                       *
6*                  Common Public License, Version 1.0                  *
7*                    by AT&T Intellectual Property                     *
8*                                                                      *
9*                A copy of the License is available at                 *
10*            http://www.opensource.org/licenses/cpl1.0.txt             *
11*         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12*                                                                      *
13*              Information and Software Systems Research               *
14*                            AT&T Research                             *
15*                           Florham Park NJ                            *
16*                                                                      *
17*                 Glenn Fowler <gsf@research.att.com>                  *
18*                  David Korn <dgk@research.att.com>                   *
19*                   Phong Vo <kpv@research.att.com>                    *
20*                                                                      *
21***********************************************************************/
22#ifndef _VMHDR_H
23#define _VMHDR_H	1
24#ifndef _BLD_vmalloc
25#define _BLD_vmalloc	1
26#endif
27
28/*	Common types, and macros for vmalloc functions.
29**
30**	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
31*/
32
33#ifndef __STD_C	/* this is normally in vmalloc.h but it's included late here */
34#ifdef __STDC__
35#define	__STD_C		1
36#else
37#if __cplusplus || c_plusplus
38#define __STD_C		1
39#else
40#define __STD_C		0
41#endif /*__cplusplus*/
42#endif /*__STDC__*/
43#endif /*__STD_C*/
44
45#if _PACKAGE_ast
46
47#if !_UWIN
48#define getpagesize		______getpagesize
49#define _npt_getpagesize	1
50#define brk			______brk
51#define sbrk			______sbrk
52#define _npt_sbrk		1
53#endif
54
55#include	<ast.h>
56
57#if _npt_getpagesize
58#undef				getpagesize
59#endif
60#if _npt_sbrk
61#undef				brk
62#undef				sbrk
63#endif
64
65#else
66
67#include	<ast_common.h>
68
69#if !_UWIN
70#define _npt_getpagesize	1
71#define _npt_sbrk		1
72#endif
73
74#ifndef integralof
75#define integralof(x)		(((char*)(x))-((char*)0))
76#endif
77
78#endif /*_PACKAGE_ast*/
79
80#include	"FEATURE/vmalloc"
81
82#include	<setjmp.h>
83
84/* the below macros decide which combinations of sbrk() or mmap() to used */
85#if defined(_WIN32)
86#define _mem_win32	1
87#undef _mem_sbrk
88#undef _mem_mmap_anon
89#undef _mem_mmap_zero
90#endif
91
92#if _mem_mmap_anon
93#undef _mem_mmap_zero
94#endif
95
96#if !_mem_win32 && !_mem_sbrk && !_mem_mmap_anon && !_mem_mmap_zero
97#undef _std_malloc
98#define _std_malloc	1	/* do not define malloc/free/realloc */
99#endif
100
101typedef unsigned char	Vmuchar_t;
102typedef unsigned long	Vmulong_t;
103
104typedef union _head_u	Head_t;
105typedef union _body_u	Body_t;
106typedef struct _block_s	Block_t;
107typedef struct _seg_s	Seg_t;
108typedef struct _pfobj_s	Pfobj_t;
109
110#if !_typ_ssize_t
111typedef int		ssize_t;
112#endif
113
114#define NIL(t)		((t)0)
115#define reg		register
116#if __STD_C
117#define NOTUSED(x)	(void)(x)
118#else
119#define NOTUSED(x)	(&x,1)
120#endif
121
122/* convert an address to an integral value */
123#define VLONG(addr)	((Vmulong_t)((char*)(addr) - (char*)0) )
124
125/* Round x up to a multiple of y. ROUND2 does powers-of-2 and ROUNDX does others */
126#define ROUND2(x,y)	(((x) + ((y)-1)) & ~((y)-1))
127#define ROUNDX(x,y)	((((x) + ((y)-1)) / (y)) * (y))
128#define ROUND(x,y)	(((y)&((y)-1)) ? ROUNDX((x),(y)) : ROUND2((x),(y)) )
129
130/* compute a value that is a common multiple of x and y */
131#define MULTIPLE(x,y)	((x)%(y) == 0 ? (x) : (y)%(x) == 0 ? (y) : (y)*(x))
132
133#define VM_check	0x0001	/* enable detailed checks		*/
134#define VM_abort	0x0002	/* abort() on assertion failure		*/
135#define VM_region	0x0004	/* enable region segment checks		*/
136#define VM_mmap		0x0010	/* favor mmap allocation		*/
137
138#if _UWIN
139#include <ast_windows.h>
140#endif
141
142#ifndef DEBUG
143#ifdef _BLD_DEBUG
144#define DEBUG		1
145#endif /*_BLD_DEBUG*/
146#endif /*DEBUG*/
147#if DEBUG
148extern void		_vmmessage _ARG_((const char*, long, const char*, long));
149#define ABORT()		(_Vmassert & VM_abort)
150#define CHECK()		(_Vmassert & VM_check)
151#define ASSERT(p)	((p) ? 0 : (MESSAGE("Assertion failed"), ABORT() ? (abort(),0) : 0))
152#define COUNT(n)	((n) += 1)
153#define MESSAGE(s)	_vmmessage(__FILE__,__LINE__,s,0)
154#else
155#define ABORT()		(0)
156#define ASSERT(p)
157#define CHECK()		(0)
158#define COUNT(n)
159#define MESSAGE(s)	(0)
160#endif /*DEBUG*/
161
162#define VMPAGESIZE	8192
163
164#if _AST_PAGESIZE > VMPAGESIZE
165#undef	VMPAGESIZE
166#define VMPAGESIZE	_AST_PAGESIZE
167#endif
168
169#if _lib_getpagesize && !defined(_AST_PAGESIZE)
170#define GETPAGESIZE(x)	((x) ? (x) : \
171			 (((x)=getpagesize()) < VMPAGESIZE ? ((x)=VMPAGESIZE) : (x)) )
172#else
173#define GETPAGESIZE(x)	((x) = VMPAGESIZE)
174#endif
175
176#ifdef	_AST_PAGESIZE
177#define VMHEAPINCR	(_Vmpagesize*1)
178#else
179#define VMHEAPINCR	(_Vmpagesize*4)
180#endif
181
182/* Blocks are allocated such that their sizes are 0%(BITS+1)
183** This frees up enough low order bits to store state information
184*/
185#define BUSY		(01)	/* block is busy				*/
186#define PFREE		(02)	/* preceding block is free			*/
187#define JUNK		(04)	/* marked as freed but not yet processed	*/
188#define BITS		(07)	/* (BUSY|PFREE|JUNK)				*/
189#define ALIGNB		(8)	/* size must be a multiple of BITS+1		*/
190
191#define ISBITS(w)	((w) & BITS)
192#define CLRBITS(w)	((w) &= ~BITS)
193#define CPYBITS(w,f)	((w) |= ((f)&BITS) )
194
195#define ISBUSY(w)	((w) & BUSY)
196#define SETBUSY(w)	((w) |= BUSY)
197#define CLRBUSY(w)	((w) &= ~BUSY)
198
199#define ISPFREE(w)	((w) & PFREE)
200#define SETPFREE(w)	((w) |= PFREE)
201#define CLRPFREE(w)	((w) &= ~PFREE)
202
203#define ISJUNK(w)	((w) & JUNK)
204#define SETJUNK(w)	((w) |= JUNK)
205#define CLRJUNK(w)	((w) &= ~JUNK)
206
207#define OFFSET(t,e)	((size_t)(&(((t*)0)->e)) )
208
209/* these bits share the "mode" field with the public bits */
210#define VM_AGAIN	0010000		/* research the arena for space */
211#define VM_LOCK		0020000		/* region is locked		*/
212#define VM_LOCAL	0040000		/* local call, bypass lock	*/
213#define VM_INUSE	0004000		/* some operation is running	*/
214#define VM_UNUSED	0100060
215#define VMETHOD(vd)	((vd)->mode&VM_METHODS)
216
217/* test/set/clear lock state */
218#define SETINUSE(vd,iu)	(((iu) = (vd)->mode&VM_INUSE), ((vd)->mode |= VM_INUSE) )
219#define CLRINUSE(vd,iu)	((iu) ? 0 : ((vd)->mode &= ~VM_INUSE) )
220#define SETLOCAL(vd)	((vd)->mode |= VM_LOCAL)
221#define GETLOCAL(vd,l)	(((l) = (vd)->mode&VM_LOCAL), ((vd)->mode &= ~VM_LOCAL) )
222#define ISLOCK(vd,l)	((l) ? 0 : ((vd)->mode &  VM_LOCK) )
223#define SETLOCK(vd,l)	((l) ? 0 : ((vd)->mode |= VM_LOCK) )
224#define CLRLOCK(vd,l)	((l) ? 0 : ((vd)->mode &= ~VM_LOCK) )
225
226/* announcing entry/exit of allocation calls */
227#define ANNOUNCE(lc, vm,ev,dt,dc) \
228		(( ((lc)&VM_LOCAL) || !(dc) || !(dc)->exceptf ) ? 0 : \
229			(*(dc)->exceptf)((vm), (ev), (Void_t*)(dt), (dc)) )
230
231
232/* local calls */
233#define KPVALLOC(vm,sz,func)		(SETLOCAL((vm)->data), func((vm),(sz)) )
234#define KPVALIGN(vm,sz,al,func)		(SETLOCAL((vm)->data), func((vm),(sz),(al)) )
235#define KPVFREE(vm,d,func)		(SETLOCAL((vm)->data), func((vm),(d)) )
236#define KPVRESIZE(vm,d,sz,mv,func)	(SETLOCAL((vm)->data), func((vm),(d),(sz),(mv)) )
237#define KPVADDR(vm,addr,func)		(SETLOCAL((vm)->data), func((vm),(addr)) )
238#define KPVCOMPACT(vm,func)		(SETLOCAL((vm)->data), func((vm)) )
239
240/* ALIGN is chosen so that a block can store all primitive types.
241** It should also be a multiple of ALIGNB==(BITS+1) so the size field
242** of Block_t will always be 0%(BITS+1) as noted above.
243** Of paramount importance is the ALIGNA macro below. If the local compile
244** environment is strange enough that the below method does not calculate
245** ALIGNA right, then the code below should be commented out and ALIGNA
246** redefined to the appropriate requirement.
247*/
248union _align_u
249{	char		c, *cp;
250	int		i, *ip;
251	long		l, *lp;
252	double		d, *dp, ***dppp[8];
253	size_t		s, *sp;
254	void(*		fn)();
255	union _align_u*	align;
256	Head_t*		head;
257	Body_t*		body;
258	Block_t*	block;
259	Vmuchar_t	a[ALIGNB];
260	_ast_fltmax_t	ld, *ldp;
261	jmp_buf		jmp;
262};
263struct _a_s
264{	char		c;
265	union _align_u	a;
266};
267#define ALIGNA	(sizeof(struct _a_s) - sizeof(union _align_u))
268struct _align_s
269{	char	data[MULTIPLE(ALIGNA,ALIGNB)];
270};
271#undef	ALIGN	/* bsd sys/param.h defines this */
272#define ALIGN	sizeof(struct _align_s)
273
274/* make sure that the head of a block is a multiple of ALIGN */
275struct _head_s
276{	union
277	{ Seg_t*	seg;	/* the containing segment	*/
278	  Block_t*	link;	/* possible link list usage	*/
279	  Pfobj_t*	pf;	/* profile structure pointer	*/
280	  char*		file;	/* for file name in Vmdebug	*/
281	} seg;
282	union
283	{ size_t	size;	/* size of data area in bytes	*/
284	  Block_t*	link;	/* possible link list usage	*/
285	  int		line;	/* for line number in Vmdebug	*/
286	} size;
287};
288#define HEADSIZE	ROUND(sizeof(struct _head_s),ALIGN)
289union _head_u
290{	Vmuchar_t	data[HEADSIZE];	/* to standardize size		*/
291	struct _head_s	head;
292};
293
294/* now make sure that the body of a block is a multiple of ALIGN */
295struct _body_s
296{	Block_t*	link;	/* next in link list		*/
297	Block_t*	left;	/* left child in free tree	*/
298	Block_t*	right;	/* right child in free tree	*/
299	Block_t**	self;	/* self pointer when free	*/
300};
301#define BODYSIZE	ROUND(sizeof(struct _body_s),ALIGN)
302union _body_u
303{	Vmuchar_t	data[BODYSIZE];	/* to standardize size		*/
304	struct _body_s	body;
305};
306
307/* After all the songs and dances, we should now have:
308**	sizeof(Head_t)%ALIGN == 0
309**	sizeof(Body_t)%ALIGN == 0
310** and	sizeof(Block_t) = sizeof(Head_t)+sizeof(Body_t)
311*/
312struct _block_s
313{	Head_t	head;
314	Body_t	body;
315};
316
317/* requirements for smallest block type */
318struct _tiny_s
319{	Block_t*	link;
320	Block_t*	self;
321};
322#define TINYSIZE	ROUND(sizeof(struct _tiny_s),ALIGN)
323#define S_TINY		1				/* # of tiny blocks	*/
324#define MAXTINY		(S_TINY*ALIGN + TINYSIZE)
325#define TLEFT(b)	((b)->head.head.seg.link)	/* instead of LEFT	*/
326#define TINIEST(b)	(SIZE(b) == TINYSIZE)		/* this type uses TLEFT	*/
327
328#define DIV(x,y)	((y) == 8 ? ((x)>>3) : (x)/(y) )
329#define INDEX(s)	DIV((s)-TINYSIZE,ALIGN)
330
331/* small block types kept in separate caches for quick allocation */
332#define S_CACHE		6	/* # of types of small blocks to be cached	*/
333#define N_CACHE		32	/* on allocation, create this many at a time	*/
334#define MAXCACHE	(S_CACHE*ALIGN + TINYSIZE)
335#define C_INDEX(s)	(s < MAXCACHE ? INDEX(s) : S_CACHE)
336
337#define TINY(vd)	((vd)->tiny)
338#define CACHE(vd)	((vd)->cache)
339
340struct _vmdata_s
341{	int		mode;		/* current mode for region		*/
342	size_t		incr;		/* allocate in multiple of this		*/
343	size_t		pool;		/* size	of an elt in a Vmpool region	*/
344	Seg_t*		seg;		/* list of segments			*/
345	Block_t*	free;		/* most recent free block		*/
346	Block_t*	wild;		/* wilderness block			*/
347	Block_t*	root;		/* root of free tree			*/
348	Block_t*	tiny[S_TINY];	/* small blocks				*/
349	Block_t*	cache[S_CACHE+1]; /* delayed free blocks		*/
350};
351/* Vmdata_t typedef in <vmalloc.h> */
352
353#include	"vmalloc.h"
354
355#if !_PACKAGE_ast
356/* we don't use these here and they interfere with some local names */
357#undef malloc
358#undef free
359#undef realloc
360#endif
361
362/* segment structure */
363struct _seg_s
364{	Vmdata_t*	vmdt;	/* the data region holding this	*/
365	Seg_t*		next;	/* next segment			*/
366	Void_t*		addr;	/* starting segment address	*/
367	size_t		extent;	/* extent of segment		*/
368	Vmuchar_t*	baddr;	/* bottom of usable memory	*/
369	size_t		size;	/* allocable size		*/
370	Block_t*	free;	/* recent free blocks		*/
371	Block_t*	last;	/* Vmlast last-allocated block	*/
372};
373
374/* starting block of a segment */
375#define SEGBLOCK(s)	((Block_t*)(((Vmuchar_t*)(s)) + ROUND(sizeof(Seg_t),ALIGN)))
376
377/* short-hands for block data */
378#define SEG(b)		((b)->head.head.seg.seg)
379#define SEGLINK(b)	((b)->head.head.seg.link)
380#define	SIZE(b)		((b)->head.head.size.size)
381#define SIZELINK(b)	((b)->head.head.size.link)
382#define LINK(b)		((b)->body.body.link)
383#define LEFT(b)		((b)->body.body.left)
384#define RIGHT(b)	((b)->body.body.right)
385#define VM(b)		(SEG(b)->vm)
386
387#define DATA(b)		((Void_t*)((b)->body.data) )
388#define BLOCK(d)	((Block_t*)((char*)(d) - sizeof(Head_t)) )
389#define SELF(b)		((Block_t**)((b)->body.data + SIZE(b) - sizeof(Block_t*)) )
390#define LAST(b)		(*((Block_t**)(((char*)(b)) - sizeof(Block_t*)) ) )
391#define NEXT(b)		((Block_t*)((b)->body.data + SIZE(b)) )
392
393/* functions to manipulate link lists of elts of the same size */
394#define SETLINK(b)	(RIGHT(b) =  (b) )
395#define ISLINK(b)	(RIGHT(b) == (b) )
396#define UNLINK(vd,b,i,t) \
397		((((t) = LINK(b)) ? (LEFT(t) = LEFT(b)) : NIL(Block_t*) ), \
398		 (((t) = LEFT(b)) ? (LINK(t) = LINK(b)) : (TINY(vd)[i] = LINK(b)) ) )
399
400/* delete a block from a link list or the free tree.
401** The test in the below macro is worth scratching your head a bit.
402** Even though tiny blocks (size < BODYSIZE) are kept in separate lists,
403** only the TINIEST ones require TLEFT(b) for the back link. Since this
404** destroys the SEG(b) pointer, it must be carefully restored in bestsearch().
405** Other tiny blocks have enough space to use the usual LEFT(b).
406** In this case, I have also carefully arranged so that RIGHT(b) and
407** SELF(b) can be overlapped and the test ISLINK() will go through.
408*/
409#define REMOVE(vd,b,i,t,func) \
410		((!TINIEST(b) && ISLINK(b)) ? UNLINK((vd),(b),(i),(t)) : \
411	 		func((vd),SIZE(b),(b)) )
412
413/* see if a block is the wilderness block */
414#define SEGWILD(b)	(((b)->body.data+SIZE(b)+sizeof(Head_t)) >= SEG(b)->baddr)
415#define VMWILD(vd,b)	(((b)->body.data+SIZE(b)+sizeof(Head_t)) >= vd->seg->baddr)
416
417#define VMFLF(vm,fi,ln,fn)	((fi) = (vm)->file, (vm)->file = NIL(char*), \
418		 		 (ln) = (vm)->line, (vm)->line = 0 , \
419		 		 (fn) = (vm)->func, (vm)->func = NIL(Void_t*) )
420
421/* The lay-out of a Vmprofile block is this:
422**	seg_ size ----data---- _pf_ size
423**	_________ ____________ _________
424**	seg_, size: header required by Vmbest.
425**	data:	actual data block.
426**	_pf_:	pointer to the corresponding Pfobj_t struct
427**	size:	the true size of the block.
428** So each block requires an extra Head_t.
429*/
430#define PF_EXTRA   sizeof(Head_t)
431#define PFDATA(d)  ((Head_t*)((Vmuchar_t*)(d)+(SIZE(BLOCK(d))&~BITS)-sizeof(Head_t)) )
432#define PFOBJ(d)   (PFDATA(d)->head.seg.pf)
433#define PFSIZE(d)  (PFDATA(d)->head.size.size)
434
435/* The lay-out of a block allocated by Vmdebug is this:
436**	seg_ size file size seg_ magi ----data---- --magi-- magi line
437**	--------- --------- --------- ------------ -------- ---------
438**	seg_,size: header required by Vmbest management.
439**	file:	the file where it was created.
440**	size:	the true byte count of the block
441**	seg_:	should be the same as the previous seg_.
442**		This allows the function vmregion() to work.
443**	magi:	magic bytes to detect overwrites.
444**	data:	the actual data block.
445**	magi:	more magic bytes.
446**	line:	the line number in the file where it was created.
447** So for each allocated block, we'll need 3 extra Head_t.
448*/
449
450/* convenient macros for accessing the above fields */
451#define DB_HEAD		(2*sizeof(Head_t))
452#define DB_TAIL		(2*sizeof(Head_t))
453#define DB_EXTRA	(DB_HEAD+DB_TAIL)
454#define DBBLOCK(d)	((Block_t*)((Vmuchar_t*)(d) - 3*sizeof(Head_t)) )
455#define DBBSIZE(d)	(SIZE(DBBLOCK(d)) & ~BITS)
456#define DBSEG(d)	(((Head_t*)((Vmuchar_t*)(d) - sizeof(Head_t)))->head.seg.seg )
457#define DBSIZE(d)	(((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.size.size )
458#define DBFILE(d)	(((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.seg.file )
459#define DBLN(d)		(((Head_t*)((Vmuchar_t*)DBBLOCK(d)+DBBSIZE(d)))->head.size.line )
460#define DBLINE(d)	(DBLN(d) < 0 ? -DBLN(d) : DBLN(d))
461
462/* forward/backward translation for addresses between Vmbest and Vmdebug */
463#define DB2BEST(d)	((Vmuchar_t*)(d) - 2*sizeof(Head_t))
464#define DB2DEBUG(b)	((Vmuchar_t*)(b) + 2*sizeof(Head_t))
465
466/* set file and line number, note that DBLN > 0 so that DBISBAD will work  */
467#define DBSETFL(d,f,l)	(DBFILE(d) = (f), DBLN(d) = (f) ? (l) : 1)
468
469/* set and test the state of known to be corrupted */
470#define DBSETBAD(d)	(DBLN(d) > 0 ? (DBLN(d) = -DBLN(d)) : -1)
471#define DBISBAD(d)	(DBLN(d) <= 0)
472
473#define DB_MAGIC	0255		/* 10101101	*/
474
475/* compute the bounds of the magic areas */
476#define DBHEAD(d,begp,endp) \
477		(((begp) = (Vmuchar_t*)(&DBSEG(d)) + sizeof(Seg_t*)), ((endp) = (d)) )
478#define DBTAIL(d,begp,endp) \
479		(((begp) = (Vmuchar_t*)(d)+DBSIZE(d)), ((endp) = (Vmuchar_t*)(&DBLN(d))) )
480
481/* external symbols for internal use by vmalloc */
482typedef Block_t*	(*Vmsearch_f)_ARG_((Vmdata_t*, size_t, Block_t*));
483typedef struct _vmextern_
484{	Block_t*	(*vm_extend)_ARG_((Vmalloc_t*, size_t, Vmsearch_f ));
485	ssize_t		(*vm_truncate)_ARG_((Vmalloc_t*, Seg_t*, size_t, int));
486	size_t		vm_pagesize;
487	char*		(*vm_strcpy)_ARG_((char*, const char*, int));
488	char*		(*vm_itoa)_ARG_((Vmulong_t, int));
489	void		(*vm_trace)_ARG_((Vmalloc_t*,
490					  Vmuchar_t*, Vmuchar_t*, size_t, size_t));
491	void		(*vm_pfclose)_ARG_((Vmalloc_t*));
492	int		vm_assert;
493	int		vm_options;
494} Vmextern_t;
495
496#define _Vmextend	(_Vmextern.vm_extend)
497#define _Vmtruncate	(_Vmextern.vm_truncate)
498#define _Vmpagesize	(_Vmextern.vm_pagesize)
499#define _Vmstrcpy	(_Vmextern.vm_strcpy)
500#define _Vmitoa		(_Vmextern.vm_itoa)
501#define _Vmtrace	(_Vmextern.vm_trace)
502#define _Vmpfclose	(_Vmextern.vm_pfclose)
503#define _Vmassert	(_Vmextern.vm_assert)
504#define _Vmoptions	(_Vmextern.vm_options)
505
506#define VMOPTIONS()	do { if (!_Vmoptions) { _vmoptions(); } } while (0)
507
508extern void		_vmoptions _ARG_((void));
509extern int		_vmbestcheck _ARG_((Vmdata_t*, Block_t*));
510
511_BEGIN_EXTERNS_
512
513extern Vmextern_t	_Vmextern;
514
515#if _PACKAGE_ast
516
517#if _npt_getpagesize
518extern int		getpagesize _ARG_((void));
519#endif
520#if _npt_sbrk
521extern int		brk _ARG_(( void* ));
522extern Void_t*		sbrk _ARG_(( ssize_t ));
523#endif
524
525#else
526
527#if _hdr_unistd
528#include	<unistd.h>
529#else
530extern void		abort _ARG_(( void ));
531extern ssize_t		write _ARG_(( int, const void*, size_t ));
532extern int		getpagesize _ARG_((void));
533extern Void_t*		sbrk _ARG_((ssize_t));
534#endif
535
536#if !__STDC__ && !_hdr_stdlib
537extern size_t		strlen _ARG_(( const char* ));
538extern char*		strcpy _ARG_(( char*, const char* ));
539extern int		strcmp _ARG_(( const char*, const char* ));
540extern int		atexit _ARG_(( void(*)(void) ));
541extern char*		getenv _ARG_(( const char* ));
542extern Void_t*		memcpy _ARG_(( Void_t*, const Void_t*, size_t ));
543extern Void_t*		memset _ARG_(( Void_t*, int, size_t ));
544#else
545#include	<stdlib.h>
546#include	<string.h>
547#endif
548
549/* for vmexit.c */
550extern int		onexit _ARG_(( void(*)(void) ));
551extern void		_exit _ARG_(( int ));
552extern void		_cleanup _ARG_(( void ));
553
554#endif /*_PACKAGE_ast*/
555
556_END_EXTERNS_
557
558#if _UWIN
559#define abort()		(DebugBreak(),abort())
560#endif
561
562#endif /* _VMHDR_H */
563