1/* This file is no longer automatically generated from libc.  */
2
3#define _MALLOC_INTERNAL
4#ifdef HAVE_GTK_AND_PTHREAD
5#define USE_PTHREAD
6#endif
7
8/* The malloc headers and source files from the C library follow here.  */
9
10/* Declarations for `malloc' and friends.
11   Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
12                 2005, 2006, 2007 Free Software Foundation, Inc.
13		  Written May 1989 by Mike Haertel.
14
15This library is free software; you can redistribute it and/or
16modify it under the terms of the GNU General Public License as
17published by the Free Software Foundation; either version 2 of the
18License, or (at your option) any later version.
19
20This library is distributed in the hope that it will be useful,
21but WITHOUT ANY WARRANTY; without even the implied warranty of
22MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23General Public License for more details.
24
25You should have received a copy of the GNU General Public
26License along with this library; see the file COPYING.  If
27not, write to the Free Software Foundation, Inc., 51 Franklin Street,
28Fifth Floor, Boston, MA 02110-1301, USA.
29
30   The author may be reached (Email) at the address mike@ai.mit.edu,
31   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
32
33#ifndef _MALLOC_H
34
35#define _MALLOC_H	1
36
37#ifdef _MALLOC_INTERNAL
38
39#ifdef	HAVE_CONFIG_H
40#include <config.h>
41#endif
42
43#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
44      || defined STDC_HEADERS || defined PROTOTYPES) \
45     && ! defined (BROKEN_PROTOTYPES))
46#undef	PP
47#define	PP(args)	args
48#undef	__ptr_t
49#define	__ptr_t		void *
50#else /* Not C++ or ANSI C.  */
51#undef	PP
52#define	PP(args)	()
53#undef	__ptr_t
54#define	__ptr_t		char *
55#endif /* C++ or ANSI C.  */
56
57#if	defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
58#include <string.h>
59#else
60#ifndef memset
61#define	memset(s, zero, n)	bzero ((s), (n))
62#endif
63#ifndef memcpy
64#define	memcpy(d, s, n)		bcopy ((s), (d), (n))
65#endif
66#endif
67
68#ifdef HAVE_LIMITS_H
69#include <limits.h>
70#endif
71#ifndef CHAR_BIT
72#define	CHAR_BIT	8
73#endif
74
75#ifdef	HAVE_UNISTD_H
76#include <unistd.h>
77#endif
78
79#ifdef USE_PTHREAD
80#include <pthread.h>
81#endif
82
83#endif	/* _MALLOC_INTERNAL.  */
84
85
86#ifdef	__cplusplus
87extern "C"
88{
89#endif
90
91#ifdef STDC_HEADERS
92#include <stddef.h>
93#define	__malloc_size_t		size_t
94#define	__malloc_ptrdiff_t	ptrdiff_t
95#else
96#ifdef __GNUC__
97#include <stddef.h>
98#ifdef __SIZE_TYPE__
99#define	__malloc_size_t		__SIZE_TYPE__
100#endif
101#endif
102#ifndef __malloc_size_t
103#define	__malloc_size_t		unsigned int
104#endif
105#define	__malloc_ptrdiff_t	int
106#endif
107
108#ifndef	NULL
109#define	NULL	0
110#endif
111
112#ifndef FREE_RETURN_TYPE
113#define FREE_RETURN_TYPE void
114#endif
115
116
117/* Allocate SIZE bytes of memory.  */
118extern __ptr_t malloc PP ((__malloc_size_t __size));
119/* Re-allocate the previously allocated block
120   in __ptr_t, making the new block SIZE bytes long.  */
121extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
122/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
123extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
124/* Free a block allocated by `malloc', `realloc' or `calloc'.  */
125extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
126
127/* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
128#if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict.  */
129extern __ptr_t memalign PP ((__malloc_size_t __alignment,
130			     __malloc_size_t __size));
131#endif
132
133/* Allocate SIZE bytes on a page boundary.  */
134#if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
135extern __ptr_t valloc PP ((__malloc_size_t __size));
136#endif
137
138
139#ifdef _MALLOC_INTERNAL
140
141/* The allocator divides the heap into blocks of fixed size; large
142   requests receive one or more whole blocks, and small requests
143   receive a fragment of a block.  Fragment sizes are powers of two,
144   and all fragments of a block are the same size.  When all the
145   fragments in a block have been freed, the block itself is freed.  */
146#define INT_BIT		(CHAR_BIT * sizeof(int))
147#define BLOCKLOG	(INT_BIT > 16 ? 12 : 9)
148#define BLOCKSIZE	(1 << BLOCKLOG)
149#define BLOCKIFY(SIZE)	(((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
150
151/* Determine the amount of memory spanned by the initial heap table
152   (not an absolute limit).  */
153#define HEAP		(INT_BIT > 16 ? 4194304 : 65536)
154
155/* Number of contiguous free blocks allowed to build up at the end of
156   memory before they will be returned to the system.  */
157#define FINAL_FREE_BLOCKS	8
158
159/* Data structure giving per-block information.  */
160typedef union
161  {
162    /* Heap information for a busy block.  */
163    struct
164      {
165	/* Zero for a large (multiblock) object, or positive giving the
166	   logarithm to the base two of the fragment size.  */
167	int type;
168	union
169	  {
170	    struct
171	      {
172		__malloc_size_t nfree; /* Free frags in a fragmented block.  */
173		__malloc_size_t first; /* First free fragment of the block.  */
174	      } frag;
175	    /* For a large object, in its first block, this has the number
176	       of blocks in the object.  In the other blocks, this has a
177	       negative number which says how far back the first block is.  */
178	    __malloc_ptrdiff_t size;
179	  } info;
180      } busy;
181    /* Heap information for a free block
182       (that may be the first of a free cluster).  */
183    struct
184      {
185	__malloc_size_t size;	/* Size (in blocks) of a free cluster.  */
186	__malloc_size_t next;	/* Index of next free cluster.  */
187	__malloc_size_t prev;	/* Index of previous free cluster.  */
188      } free;
189  } malloc_info;
190
191/* Pointer to first block of the heap.  */
192extern char *_heapbase;
193
194/* Table indexed by block number giving per-block information.  */
195extern malloc_info *_heapinfo;
196
197/* Address to block number and vice versa.  */
198#define BLOCK(A)	(((char *) (A) - _heapbase) / BLOCKSIZE + 1)
199#define ADDRESS(B)	((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
200
201/* Current search index for the heap table.  */
202extern __malloc_size_t _heapindex;
203
204/* Limit of valid info table indices.  */
205extern __malloc_size_t _heaplimit;
206
207/* Doubly linked lists of free fragments.  */
208struct list
209  {
210    struct list *next;
211    struct list *prev;
212  };
213
214/* Free list headers for each fragment size.  */
215extern struct list _fraghead[];
216
217/* List of blocks allocated with `memalign' (or `valloc').  */
218struct alignlist
219  {
220    struct alignlist *next;
221    __ptr_t aligned;		/* The address that memaligned returned.  */
222    __ptr_t exact;		/* The address that malloc returned.  */
223  };
224extern struct alignlist *_aligned_blocks;
225
226/* Instrumentation.  */
227extern __malloc_size_t _chunks_used;
228extern __malloc_size_t _bytes_used;
229extern __malloc_size_t _chunks_free;
230extern __malloc_size_t _bytes_free;
231
232/* Internal versions of `malloc', `realloc', and `free'
233   used when these functions need to call each other.
234   They are the same but don't call the hooks.  */
235extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
236extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
237extern void _free_internal PP ((__ptr_t __ptr));
238
239#ifdef USE_PTHREAD
240extern pthread_mutex_t _malloc_mutex;
241#define LOCK()     pthread_mutex_lock (&_malloc_mutex)
242#define UNLOCK()   pthread_mutex_unlock (&_malloc_mutex)
243#else
244#define LOCK()
245#define UNLOCK()
246#endif
247
248#endif /* _MALLOC_INTERNAL.  */
249
250/* Given an address in the middle of a malloc'd object,
251   return the address of the beginning of the object.  */
252extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
253
254/* Underlying allocation function; successive calls should
255   return contiguous pieces of memory.  */
256extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
257
258/* Default value of `__morecore'.  */
259extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
260
261/* If not NULL, this function is called after each time
262   `__morecore' is called to increase the data size.  */
263extern void (*__after_morecore_hook) PP ((void));
264
265/* Number of extra blocks to get each time we ask for more core.
266   This reduces the frequency of calling `(*__morecore)'.  */
267extern __malloc_size_t __malloc_extra_blocks;
268
269/* Nonzero if `malloc' has been called and done its initialization.  */
270extern int __malloc_initialized;
271/* Function called to initialize malloc data structures.  */
272extern int __malloc_initialize PP ((void));
273
274/* Hooks for debugging versions.  */
275extern void (*__malloc_initialize_hook) PP ((void));
276extern void (*__free_hook) PP ((__ptr_t __ptr));
277extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
278extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
279extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
280				       __malloc_size_t __alignment));
281
282/* Return values for `mprobe': these are the kinds of inconsistencies that
283   `mcheck' enables detection of.  */
284enum mcheck_status
285  {
286    MCHECK_DISABLED = -1,	/* Consistency checking is not turned on.  */
287    MCHECK_OK,			/* Block is fine.  */
288    MCHECK_FREE,		/* Block freed twice.  */
289    MCHECK_HEAD,		/* Memory before the block was clobbered.  */
290    MCHECK_TAIL			/* Memory after the block was clobbered.  */
291  };
292
293/* Activate a standard collection of debugging hooks.  This must be called
294   before `malloc' is ever called.  ABORTFUNC is called with an error code
295   (see enum above) when an inconsistency is detected.  If ABORTFUNC is
296   null, the standard function prints on stderr and then calls `abort'.  */
297extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
298
299/* Check for aberrations in a particular malloc'd block.  You must have
300   called `mcheck' already.  These are the same checks that `mcheck' does
301   when you free or reallocate a block.  */
302extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
303
304/* Activate a standard collection of tracing hooks.  */
305extern void mtrace PP ((void));
306extern void muntrace PP ((void));
307
308/* Statistics available to the user.  */
309struct mstats
310  {
311    __malloc_size_t bytes_total; /* Total size of the heap. */
312    __malloc_size_t chunks_used; /* Chunks allocated by the user. */
313    __malloc_size_t bytes_used;	/* Byte total of user-allocated chunks. */
314    __malloc_size_t chunks_free; /* Chunks in the free list. */
315    __malloc_size_t bytes_free;	/* Byte total of chunks in the free list. */
316  };
317
318/* Pick up the current statistics. */
319extern struct mstats mstats PP ((void));
320
321/* Call WARNFUN with a warning message when memory usage is high.  */
322extern void memory_warnings PP ((__ptr_t __start,
323				 void (*__warnfun) PP ((const char *))));
324
325
326/* Relocating allocator.  */
327
328/* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
329extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
330
331/* Free the storage allocated in HANDLEPTR.  */
332extern void r_alloc_free PP ((__ptr_t *__handleptr));
333
334/* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
335extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
336
337
338#ifdef	__cplusplus
339}
340#endif
341
342#endif /* malloc.h  */
343/* Memory allocator `malloc'.
344   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
345		  Written May 1989 by Mike Haertel.
346
347This library is free software; you can redistribute it and/or
348modify it under the terms of the GNU General Public License as
349published by the Free Software Foundation; either version 2 of the
350License, or (at your option) any later version.
351
352This library is distributed in the hope that it will be useful,
353but WITHOUT ANY WARRANTY; without even the implied warranty of
354MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
355General Public License for more details.
356
357You should have received a copy of the GNU General Public
358License along with this library; see the file COPYING.  If
359not, write to the Free Software Foundation, Inc., 51 Franklin Street,
360Fifth Floor, Boston, MA 02110-1301, USA.
361
362   The author may be reached (Email) at the address mike@ai.mit.edu,
363   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
364
365#ifndef	_MALLOC_INTERNAL
366#define _MALLOC_INTERNAL
367#include <malloc.h>
368#endif
369#include <errno.h>
370
371/* How to really get more memory.  */
372#if defined(CYGWIN)
373extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
374extern int bss_sbrk_did_unexec;
375#endif
376__ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
377
378/* Debugging hook for `malloc'.  */
379__ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
380
381/* Pointer to the base of the first block.  */
382char *_heapbase;
383
384/* Block information table.  Allocated with align/__free (not malloc/free).  */
385malloc_info *_heapinfo;
386
387/* Number of info entries.  */
388static __malloc_size_t heapsize;
389
390/* Search index in the info table.  */
391__malloc_size_t _heapindex;
392
393/* Limit of valid info table indices.  */
394__malloc_size_t _heaplimit;
395
396/* Free lists for each fragment size.  */
397struct list _fraghead[BLOCKLOG];
398
399/* Instrumentation.  */
400__malloc_size_t _chunks_used;
401__malloc_size_t _bytes_used;
402__malloc_size_t _chunks_free;
403__malloc_size_t _bytes_free;
404
405/* Are you experienced?  */
406int __malloc_initialized;
407
408__malloc_size_t __malloc_extra_blocks;
409
410void (*__malloc_initialize_hook) PP ((void));
411void (*__after_morecore_hook) PP ((void));
412
413#if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
414
415/* Some code for hunting a bug writing into _heapinfo.
416
417   Call this macro with argument PROT non-zero to protect internal
418   malloc state against writing to it, call it with a zero argument to
419   make it readable and writable.
420
421   Note that this only works if BLOCKSIZE == page size, which is
422   the case on the i386.  */
423
424#include <sys/types.h>
425#include <sys/mman.h>
426
427static int state_protected_p;
428static __malloc_size_t last_state_size;
429static malloc_info *last_heapinfo;
430
431void
432protect_malloc_state (protect_p)
433     int protect_p;
434{
435  /* If _heapinfo has been relocated, make sure its old location
436     isn't left read-only; it will be reused by malloc.  */
437  if (_heapinfo != last_heapinfo
438      && last_heapinfo
439      && state_protected_p)
440    mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
441
442  last_state_size = _heaplimit * sizeof *_heapinfo;
443  last_heapinfo   = _heapinfo;
444
445  if (protect_p != state_protected_p)
446    {
447      state_protected_p = protect_p;
448      if (mprotect (_heapinfo, last_state_size,
449		    protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
450	abort ();
451    }
452}
453
454#define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
455
456#else
457#define PROTECT_MALLOC_STATE(PROT)	/* empty */
458#endif
459
460
461/* Aligned allocation.  */
462static __ptr_t align PP ((__malloc_size_t));
463static __ptr_t
464align (size)
465     __malloc_size_t size;
466{
467  __ptr_t result;
468  unsigned long int adj;
469
470  /* align accepts an unsigned argument, but __morecore accepts a
471     signed one.  This could lead to trouble if SIZE overflows a
472     signed int type accepted by __morecore.  We just punt in that
473     case, since they are requesting a ludicrous amount anyway.  */
474  if ((__malloc_ptrdiff_t)size < 0)
475    result = 0;
476  else
477    result = (*__morecore) (size);
478  adj = (unsigned long int) ((unsigned long int) ((char *) result -
479						  (char *) NULL)) % BLOCKSIZE;
480  if (adj != 0)
481    {
482      __ptr_t new;
483      adj = BLOCKSIZE - adj;
484      new = (*__morecore) (adj);
485      result = (char *) result + adj;
486    }
487
488  if (__after_morecore_hook)
489    (*__after_morecore_hook) ();
490
491  return result;
492}
493
494/* Get SIZE bytes, if we can get them starting at END.
495   Return the address of the space we got.
496   If we cannot get space at END, fail and return 0.  */
497static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
498static __ptr_t
499get_contiguous_space (size, position)
500     __malloc_ptrdiff_t size;
501     __ptr_t position;
502{
503  __ptr_t before;
504  __ptr_t after;
505
506  before = (*__morecore) (0);
507  /* If we can tell in advance that the break is at the wrong place,
508     fail now.  */
509  if (before != position)
510    return 0;
511
512  /* Allocate SIZE bytes and get the address of them.  */
513  after = (*__morecore) (size);
514  if (!after)
515    return 0;
516
517  /* It was not contiguous--reject it.  */
518  if (after != position)
519    {
520      (*__morecore) (- size);
521      return 0;
522    }
523
524  return after;
525}
526
527
528/* This is called when `_heapinfo' and `heapsize' have just
529   been set to describe a new info table.  Set up the table
530   to describe itself and account for it in the statistics.  */
531static void register_heapinfo PP ((void));
532#ifdef __GNUC__
533__inline__
534#endif
535static void
536register_heapinfo ()
537{
538  __malloc_size_t block, blocks;
539
540  block = BLOCK (_heapinfo);
541  blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
542
543  /* Account for the _heapinfo block itself in the statistics.  */
544  _bytes_used += blocks * BLOCKSIZE;
545  ++_chunks_used;
546
547  /* Describe the heapinfo block itself in the heapinfo.  */
548  _heapinfo[block].busy.type = 0;
549  _heapinfo[block].busy.info.size = blocks;
550  /* Leave back-pointers for malloc_find_address.  */
551  while (--blocks > 0)
552    _heapinfo[block + blocks].busy.info.size = -blocks;
553}
554
555#ifdef USE_PTHREAD
556static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
557pthread_mutex_t _malloc_mutex;
558#endif
559
560static void
561malloc_initialize_1 ()
562{
563#ifdef GC_MCHECK
564  mcheck (NULL);
565#endif
566
567  if (__malloc_initialize_hook)
568    (*__malloc_initialize_hook) ();
569
570#ifdef USE_PTHREAD
571  {
572    pthread_mutexattr_t attr;
573
574    pthread_mutexattr_init (&attr);
575    pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
576    pthread_mutex_init (&_malloc_mutex, &attr);
577    pthread_mutexattr_destroy (&attr);
578  }
579#endif
580
581  heapsize = HEAP / BLOCKSIZE;
582  _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
583  if (_heapinfo == NULL)
584    return;
585  memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
586  _heapinfo[0].free.size = 0;
587  _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
588  _heapindex = 0;
589  _heapbase = (char *) _heapinfo;
590  _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
591
592  register_heapinfo ();
593
594  __malloc_initialized = 1;
595  PROTECT_MALLOC_STATE (1);
596  return;
597}
598
599/* Set everything up and remember that we have.  */
600int
601__malloc_initialize ()
602{
603#ifdef USE_PTHREAD
604  pthread_once (&malloc_init_once_control, malloc_initialize_1);
605#else
606  if (__malloc_initialized)
607    return 0;
608
609  malloc_initialize_1 ();
610#endif
611
612  return __malloc_initialized;
613}
614
615static int morecore_recursing;
616
617/* Get neatly aligned memory, initializing or
618   growing the heap info table as necessary. */
619static __ptr_t morecore PP ((__malloc_size_t));
620static __ptr_t
621morecore (size)
622     __malloc_size_t size;
623{
624  __ptr_t result;
625  malloc_info *newinfo, *oldinfo;
626  __malloc_size_t newsize;
627
628  if (morecore_recursing)
629    /* Avoid recursion.  The caller will know how to handle a null return.  */
630    return NULL;
631
632  result = align (size);
633  if (result == NULL)
634    return NULL;
635
636  PROTECT_MALLOC_STATE (0);
637
638  /* Check if we need to grow the info table.  */
639  if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
640    {
641      /* Calculate the new _heapinfo table size.  We do not account for the
642	 added blocks in the table itself, as we hope to place them in
643	 existing free space, which is already covered by part of the
644	 existing table.  */
645      newsize = heapsize;
646      do
647	newsize *= 2;
648      while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
649
650      /* We must not reuse existing core for the new info table when called
651	 from realloc in the case of growing a large block, because the
652	 block being grown is momentarily marked as free.  In this case
653	 _heaplimit is zero so we know not to reuse space for internal
654	 allocation.  */
655      if (_heaplimit != 0)
656	{
657	  /* First try to allocate the new info table in core we already
658	     have, in the usual way using realloc.  If realloc cannot
659	     extend it in place or relocate it to existing sufficient core,
660	     we will get called again, and the code above will notice the
661	     `morecore_recursing' flag and return null.  */
662	  int save = errno;	/* Don't want to clobber errno with ENOMEM.  */
663	  morecore_recursing = 1;
664	  newinfo = (malloc_info *) _realloc_internal
665	    (_heapinfo, newsize * sizeof (malloc_info));
666	  morecore_recursing = 0;
667	  if (newinfo == NULL)
668	    errno = save;
669	  else
670	    {
671	      /* We found some space in core, and realloc has put the old
672		 table's blocks on the free list.  Now zero the new part
673		 of the table and install the new table location.  */
674	      memset (&newinfo[heapsize], 0,
675		      (newsize - heapsize) * sizeof (malloc_info));
676	      _heapinfo = newinfo;
677	      heapsize = newsize;
678	      goto got_heap;
679	    }
680	}
681
682      /* Allocate new space for the malloc info table.  */
683      while (1)
684  	{
685 	  newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
686
687 	  /* Did it fail?  */
688 	  if (newinfo == NULL)
689 	    {
690 	      (*__morecore) (-size);
691 	      return NULL;
692 	    }
693
694 	  /* Is it big enough to record status for its own space?
695 	     If so, we win.  */
696 	  if ((__malloc_size_t) BLOCK ((char *) newinfo
697 				       + newsize * sizeof (malloc_info))
698 	      < newsize)
699 	    break;
700
701 	  /* Must try again.  First give back most of what we just got.  */
702 	  (*__morecore) (- newsize * sizeof (malloc_info));
703 	  newsize *= 2;
704  	}
705
706      /* Copy the old table to the beginning of the new,
707	 and zero the rest of the new table.  */
708      memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
709      memset (&newinfo[heapsize], 0,
710	      (newsize - heapsize) * sizeof (malloc_info));
711      oldinfo = _heapinfo;
712      _heapinfo = newinfo;
713      heapsize = newsize;
714
715      register_heapinfo ();
716
717      /* Reset _heaplimit so _free_internal never decides
718	 it can relocate or resize the info table.  */
719      _heaplimit = 0;
720      _free_internal (oldinfo);
721      PROTECT_MALLOC_STATE (0);
722
723      /* The new heap limit includes the new table just allocated.  */
724      _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
725      return result;
726    }
727
728 got_heap:
729  _heaplimit = BLOCK ((char *) result + size);
730  return result;
731}
732
733/* Allocate memory from the heap.  */
734__ptr_t
735_malloc_internal (size)
736     __malloc_size_t size;
737{
738  __ptr_t result;
739  __malloc_size_t block, blocks, lastblocks, start;
740  register __malloc_size_t i;
741  struct list *next;
742
743  /* ANSI C allows `malloc (0)' to either return NULL, or to return a
744     valid address you can realloc and free (though not dereference).
745
746     It turns out that some extant code (sunrpc, at least Ultrix's version)
747     expects `malloc (0)' to return non-NULL and breaks otherwise.
748     Be compatible.  */
749
750#if	0
751  if (size == 0)
752    return NULL;
753#endif
754
755  LOCK ();
756  PROTECT_MALLOC_STATE (0);
757
758  if (size < sizeof (struct list))
759    size = sizeof (struct list);
760
761#ifdef SUNOS_LOCALTIME_BUG
762  if (size < 16)
763    size = 16;
764#endif
765
766  /* Determine the allocation policy based on the request size.  */
767  if (size <= BLOCKSIZE / 2)
768    {
769      /* Small allocation to receive a fragment of a block.
770	 Determine the logarithm to base two of the fragment size. */
771      register __malloc_size_t log = 1;
772      --size;
773      while ((size /= 2) != 0)
774	++log;
775
776      /* Look in the fragment lists for a
777	 free fragment of the desired size. */
778      next = _fraghead[log].next;
779      if (next != NULL)
780	{
781	  /* There are free fragments of this size.
782	     Pop a fragment out of the fragment list and return it.
783	     Update the block's nfree and first counters. */
784	  result = (__ptr_t) next;
785	  next->prev->next = next->next;
786	  if (next->next != NULL)
787	    next->next->prev = next->prev;
788	  block = BLOCK (result);
789	  if (--_heapinfo[block].busy.info.frag.nfree != 0)
790	    _heapinfo[block].busy.info.frag.first = (unsigned long int)
791	      ((unsigned long int) ((char *) next->next - (char *) NULL)
792	       % BLOCKSIZE) >> log;
793
794	  /* Update the statistics.  */
795	  ++_chunks_used;
796	  _bytes_used += 1 << log;
797	  --_chunks_free;
798	  _bytes_free -= 1 << log;
799	}
800      else
801	{
802	  /* No free fragments of the desired size, so get a new block
803	     and break it into fragments, returning the first.  */
804#ifdef GC_MALLOC_CHECK
805	  result = _malloc_internal (BLOCKSIZE);
806	  PROTECT_MALLOC_STATE (0);
807#else
808	  result = malloc (BLOCKSIZE);
809#endif
810	  if (result == NULL)
811	    {
812	      PROTECT_MALLOC_STATE (1);
813	      goto out;
814	    }
815
816	  /* Link all fragments but the first into the free list.  */
817	  next = (struct list *) ((char *) result + (1 << log));
818	  next->next = NULL;
819	  next->prev = &_fraghead[log];
820	  _fraghead[log].next = next;
821
822	  for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
823	    {
824	      next = (struct list *) ((char *) result + (i << log));
825	      next->next = _fraghead[log].next;
826	      next->prev = &_fraghead[log];
827	      next->prev->next = next;
828	      next->next->prev = next;
829	    }
830
831	  /* Initialize the nfree and first counters for this block.  */
832	  block = BLOCK (result);
833	  _heapinfo[block].busy.type = log;
834	  _heapinfo[block].busy.info.frag.nfree = i - 1;
835	  _heapinfo[block].busy.info.frag.first = i - 1;
836
837	  _chunks_free += (BLOCKSIZE >> log) - 1;
838	  _bytes_free += BLOCKSIZE - (1 << log);
839	  _bytes_used -= BLOCKSIZE - (1 << log);
840	}
841    }
842  else
843    {
844      /* Large allocation to receive one or more blocks.
845	 Search the free list in a circle starting at the last place visited.
846	 If we loop completely around without finding a large enough
847	 space we will have to get more memory from the system.  */
848      blocks = BLOCKIFY (size);
849      start = block = _heapindex;
850      while (_heapinfo[block].free.size < blocks)
851	{
852	  block = _heapinfo[block].free.next;
853	  if (block == start)
854	    {
855	      /* Need to get more from the system.  Get a little extra.  */
856	      __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
857	      block = _heapinfo[0].free.prev;
858	      lastblocks = _heapinfo[block].free.size;
859	      /* Check to see if the new core will be contiguous with the
860		 final free block; if so we don't need to get as much.  */
861	      if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
862		  /* We can't do this if we will have to make the heap info
863                     table bigger to accomodate the new space.  */
864		  block + wantblocks <= heapsize &&
865		  get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
866					ADDRESS (block + lastblocks)))
867		{
868 		  /* We got it contiguously.  Which block we are extending
869		     (the `final free block' referred to above) might have
870		     changed, if it got combined with a freed info table.  */
871 		  block = _heapinfo[0].free.prev;
872  		  _heapinfo[block].free.size += (wantblocks - lastblocks);
873		  _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
874 		  _heaplimit += wantblocks - lastblocks;
875		  continue;
876		}
877	      result = morecore (wantblocks * BLOCKSIZE);
878	      if (result == NULL)
879		goto out;
880	      block = BLOCK (result);
881	      /* Put the new block at the end of the free list.  */
882	      _heapinfo[block].free.size = wantblocks;
883	      _heapinfo[block].free.prev = _heapinfo[0].free.prev;
884	      _heapinfo[block].free.next = 0;
885	      _heapinfo[0].free.prev = block;
886	      _heapinfo[_heapinfo[block].free.prev].free.next = block;
887	      ++_chunks_free;
888	      /* Now loop to use some of that block for this allocation.  */
889	    }
890	}
891
892      /* At this point we have found a suitable free list entry.
893	 Figure out how to remove what we need from the list. */
894      result = ADDRESS (block);
895      if (_heapinfo[block].free.size > blocks)
896	{
897	  /* The block we found has a bit left over,
898	     so relink the tail end back into the free list. */
899	  _heapinfo[block + blocks].free.size
900	    = _heapinfo[block].free.size - blocks;
901	  _heapinfo[block + blocks].free.next
902	    = _heapinfo[block].free.next;
903	  _heapinfo[block + blocks].free.prev
904	    = _heapinfo[block].free.prev;
905	  _heapinfo[_heapinfo[block].free.prev].free.next
906	    = _heapinfo[_heapinfo[block].free.next].free.prev
907	    = _heapindex = block + blocks;
908	}
909      else
910	{
911	  /* The block exactly matches our requirements,
912	     so just remove it from the list. */
913	  _heapinfo[_heapinfo[block].free.next].free.prev
914	    = _heapinfo[block].free.prev;
915	  _heapinfo[_heapinfo[block].free.prev].free.next
916	    = _heapindex = _heapinfo[block].free.next;
917	  --_chunks_free;
918	}
919
920      _heapinfo[block].busy.type = 0;
921      _heapinfo[block].busy.info.size = blocks;
922      ++_chunks_used;
923      _bytes_used += blocks * BLOCKSIZE;
924      _bytes_free -= blocks * BLOCKSIZE;
925
926      /* Mark all the blocks of the object just allocated except for the
927	 first with a negative number so you can find the first block by
928	 adding that adjustment.  */
929      while (--blocks > 0)
930	_heapinfo[block + blocks].busy.info.size = -blocks;
931    }
932
933  PROTECT_MALLOC_STATE (1);
934 out:
935  UNLOCK ();
936  return result;
937}
938
939__ptr_t
940malloc (size)
941     __malloc_size_t size;
942{
943  if (!__malloc_initialized && !__malloc_initialize ())
944    return NULL;
945
946  return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
947}
948
949#ifndef _LIBC
950
951/* On some ANSI C systems, some libc functions call _malloc, _free
952   and _realloc.  Make them use the GNU functions.  */
953
954__ptr_t
955_malloc (size)
956     __malloc_size_t size;
957{
958  return malloc (size);
959}
960
961void
962_free (ptr)
963     __ptr_t ptr;
964{
965  free (ptr);
966}
967
968__ptr_t
969_realloc (ptr, size)
970     __ptr_t ptr;
971     __malloc_size_t size;
972{
973  return realloc (ptr, size);
974}
975
976#endif
977/* Free a block of memory allocated by `malloc'.
978   Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
979		  Written May 1989 by Mike Haertel.
980
981This library is free software; you can redistribute it and/or
982modify it under the terms of the GNU General Public License as
983published by the Free Software Foundation; either version 2 of the
984License, or (at your option) any later version.
985
986This library is distributed in the hope that it will be useful,
987but WITHOUT ANY WARRANTY; without even the implied warranty of
988MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
989General Public License for more details.
990
991You should have received a copy of the GNU General Public
992License along with this library; see the file COPYING.  If
993not, write to the Free Software Foundation, Inc., 51 Franklin Street,
994Fifth Floor, Boston, MA 02110-1301, USA.
995
996   The author may be reached (Email) at the address mike@ai.mit.edu,
997   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
998
999#ifndef	_MALLOC_INTERNAL
1000#define _MALLOC_INTERNAL
1001#include <malloc.h>
1002#endif
1003
1004
1005/* Cope with systems lacking `memmove'.    */
1006#ifndef memmove
1007#if  (defined (MEMMOVE_MISSING) || \
1008      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1009#ifdef emacs
1010#undef	__malloc_safe_bcopy
1011#define __malloc_safe_bcopy safe_bcopy
1012#endif
1013/* This function is defined in realloc.c.  */
1014extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1015#define memmove(to, from, size)	__malloc_safe_bcopy ((from), (to), (size))
1016#endif
1017#endif
1018
1019
1020/* Debugging hook for free.  */
1021void (*__free_hook) PP ((__ptr_t __ptr));
1022
1023/* List of blocks allocated by memalign.  */
1024struct alignlist *_aligned_blocks = NULL;
1025
1026/* Return memory to the heap.
1027   Like `free' but don't call a __free_hook if there is one.  */
1028void
1029_free_internal (ptr)
1030     __ptr_t ptr;
1031{
1032  int type;
1033  __malloc_size_t block, blocks;
1034  register __malloc_size_t i;
1035  struct list *prev, *next;
1036  __ptr_t curbrk;
1037  const __malloc_size_t lesscore_threshold
1038    /* Threshold of free space at which we will return some to the system.  */
1039    = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1040
1041  register struct alignlist *l;
1042
1043  if (ptr == NULL)
1044    return;
1045
1046  LOCK ();
1047  PROTECT_MALLOC_STATE (0);
1048
1049  for (l = _aligned_blocks; l != NULL; l = l->next)
1050    if (l->aligned == ptr)
1051      {
1052	l->aligned = NULL;	/* Mark the slot in the list as free.  */
1053	ptr = l->exact;
1054	break;
1055      }
1056
1057  block = BLOCK (ptr);
1058
1059  type = _heapinfo[block].busy.type;
1060  switch (type)
1061    {
1062    case 0:
1063      /* Get as many statistics as early as we can.  */
1064      --_chunks_used;
1065      _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1066      _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1067
1068      /* Find the free cluster previous to this one in the free list.
1069	 Start searching at the last block referenced; this may benefit
1070	 programs with locality of allocation.  */
1071      i = _heapindex;
1072      if (i > block)
1073	while (i > block)
1074	  i = _heapinfo[i].free.prev;
1075      else
1076	{
1077	  do
1078	    i = _heapinfo[i].free.next;
1079	  while (i > 0 && i < block);
1080	  i = _heapinfo[i].free.prev;
1081	}
1082
1083      /* Determine how to link this block into the free list.  */
1084      if (block == i + _heapinfo[i].free.size)
1085	{
1086	  /* Coalesce this block with its predecessor.  */
1087	  _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1088	  block = i;
1089	}
1090      else
1091	{
1092	  /* Really link this block back into the free list.  */
1093	  _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1094	  _heapinfo[block].free.next = _heapinfo[i].free.next;
1095	  _heapinfo[block].free.prev = i;
1096	  _heapinfo[i].free.next = block;
1097	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
1098	  ++_chunks_free;
1099	}
1100
1101      /* Now that the block is linked in, see if we can coalesce it
1102	 with its successor (by deleting its successor from the list
1103	 and adding in its size).  */
1104      if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1105	{
1106	  _heapinfo[block].free.size
1107	    += _heapinfo[_heapinfo[block].free.next].free.size;
1108	  _heapinfo[block].free.next
1109	    = _heapinfo[_heapinfo[block].free.next].free.next;
1110	  _heapinfo[_heapinfo[block].free.next].free.prev = block;
1111	  --_chunks_free;
1112	}
1113
1114      /* How many trailing free blocks are there now?  */
1115      blocks = _heapinfo[block].free.size;
1116
1117      /* Where is the current end of accessible core?  */
1118      curbrk = (*__morecore) (0);
1119
1120      if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1121	{
1122	  /* The end of the malloc heap is at the end of accessible core.
1123	     It's possible that moving _heapinfo will allow us to
1124	     return some space to the system.  */
1125
1126 	  __malloc_size_t info_block = BLOCK (_heapinfo);
1127 	  __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1128 	  __malloc_size_t prev_block = _heapinfo[block].free.prev;
1129 	  __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1130 	  __malloc_size_t next_block = _heapinfo[block].free.next;
1131 	  __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1132
1133	  if (/* Win if this block being freed is last in core, the info table
1134		 is just before it, the previous free block is just before the
1135		 info table, and the two free blocks together form a useful
1136		 amount to return to the system.  */
1137	      (block + blocks == _heaplimit &&
1138	       info_block + info_blocks == block &&
1139	       prev_block != 0 && prev_block + prev_blocks == info_block &&
1140	       blocks + prev_blocks >= lesscore_threshold) ||
1141	      /* Nope, not the case.  We can also win if this block being
1142		 freed is just before the info table, and the table extends
1143		 to the end of core or is followed only by a free block,
1144		 and the total free space is worth returning to the system.  */
1145	      (block + blocks == info_block &&
1146	       ((info_block + info_blocks == _heaplimit &&
1147		 blocks >= lesscore_threshold) ||
1148		(info_block + info_blocks == next_block &&
1149		 next_block + next_blocks == _heaplimit &&
1150		 blocks + next_blocks >= lesscore_threshold)))
1151	      )
1152	    {
1153	      malloc_info *newinfo;
1154	      __malloc_size_t oldlimit = _heaplimit;
1155
1156	      /* Free the old info table, clearing _heaplimit to avoid
1157		 recursion into this code.  We don't want to return the
1158		 table's blocks to the system before we have copied them to
1159		 the new location.  */
1160	      _heaplimit = 0;
1161	      _free_internal (_heapinfo);
1162	      _heaplimit = oldlimit;
1163
1164	      /* Tell malloc to search from the beginning of the heap for
1165		 free blocks, so it doesn't reuse the ones just freed.  */
1166	      _heapindex = 0;
1167
1168	      /* Allocate new space for the info table and move its data.  */
1169	      newinfo = (malloc_info *) _malloc_internal (info_blocks
1170							  * BLOCKSIZE);
1171	      PROTECT_MALLOC_STATE (0);
1172	      memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1173	      _heapinfo = newinfo;
1174
1175	      /* We should now have coalesced the free block with the
1176		 blocks freed from the old info table.  Examine the entire
1177		 trailing free block to decide below whether to return some
1178		 to the system.  */
1179	      block = _heapinfo[0].free.prev;
1180	      blocks = _heapinfo[block].free.size;
1181 	    }
1182
1183	  /* Now see if we can return stuff to the system.  */
1184	  if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1185	    {
1186	      register __malloc_size_t bytes = blocks * BLOCKSIZE;
1187	      _heaplimit -= blocks;
1188	      (*__morecore) (-bytes);
1189	      _heapinfo[_heapinfo[block].free.prev].free.next
1190		= _heapinfo[block].free.next;
1191	      _heapinfo[_heapinfo[block].free.next].free.prev
1192		= _heapinfo[block].free.prev;
1193	      block = _heapinfo[block].free.prev;
1194	      --_chunks_free;
1195	      _bytes_free -= bytes;
1196	    }
1197	}
1198
1199      /* Set the next search to begin at this block.  */
1200      _heapindex = block;
1201      break;
1202
1203    default:
1204      /* Do some of the statistics.  */
1205      --_chunks_used;
1206      _bytes_used -= 1 << type;
1207      ++_chunks_free;
1208      _bytes_free += 1 << type;
1209
1210      /* Get the address of the first free fragment in this block.  */
1211      prev = (struct list *) ((char *) ADDRESS (block) +
1212			      (_heapinfo[block].busy.info.frag.first << type));
1213
1214      if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1215	{
1216	  /* If all fragments of this block are free, remove them
1217	     from the fragment list and free the whole block.  */
1218	  next = prev;
1219	  for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1220	    next = next->next;
1221	  prev->prev->next = next;
1222	  if (next != NULL)
1223	    next->prev = prev->prev;
1224	  _heapinfo[block].busy.type = 0;
1225	  _heapinfo[block].busy.info.size = 1;
1226
1227	  /* Keep the statistics accurate.  */
1228	  ++_chunks_used;
1229	  _bytes_used += BLOCKSIZE;
1230	  _chunks_free -= BLOCKSIZE >> type;
1231	  _bytes_free -= BLOCKSIZE;
1232
1233#ifdef GC_MALLOC_CHECK
1234	  _free_internal (ADDRESS (block));
1235#else
1236	  free (ADDRESS (block));
1237#endif
1238	}
1239      else if (_heapinfo[block].busy.info.frag.nfree != 0)
1240	{
1241	  /* If some fragments of this block are free, link this
1242	     fragment into the fragment list after the first free
1243	     fragment of this block. */
1244	  next = (struct list *) ptr;
1245	  next->next = prev->next;
1246	  next->prev = prev;
1247	  prev->next = next;
1248	  if (next->next != NULL)
1249	    next->next->prev = next;
1250	  ++_heapinfo[block].busy.info.frag.nfree;
1251	}
1252      else
1253	{
1254	  /* No fragments of this block are free, so link this
1255	     fragment into the fragment list and announce that
1256	     it is the first free fragment of this block. */
1257	  prev = (struct list *) ptr;
1258	  _heapinfo[block].busy.info.frag.nfree = 1;
1259	  _heapinfo[block].busy.info.frag.first = (unsigned long int)
1260	    ((unsigned long int) ((char *) ptr - (char *) NULL)
1261	     % BLOCKSIZE >> type);
1262	  prev->next = _fraghead[type].next;
1263	  prev->prev = &_fraghead[type];
1264	  prev->prev->next = prev;
1265	  if (prev->next != NULL)
1266	    prev->next->prev = prev;
1267	}
1268      break;
1269    }
1270
1271  PROTECT_MALLOC_STATE (1);
1272  UNLOCK ();
1273}
1274
1275/* Return memory to the heap.  */
1276
1277FREE_RETURN_TYPE
1278free (ptr)
1279     __ptr_t ptr;
1280{
1281  if (__free_hook != NULL)
1282    (*__free_hook) (ptr);
1283  else
1284    _free_internal (ptr);
1285}
1286
1287/* Define the `cfree' alias for `free'.  */
1288#ifdef weak_alias
1289weak_alias (free, cfree)
1290#else
1291void
1292cfree (ptr)
1293     __ptr_t ptr;
1294{
1295  free (ptr);
1296}
1297#endif
1298/* Change the size of a block allocated by `malloc'.
1299   Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1300		     Written May 1989 by Mike Haertel.
1301
1302This library is free software; you can redistribute it and/or
1303modify it under the terms of the GNU General Public License as
1304published by the Free Software Foundation; either version 2 of the
1305License, or (at your option) any later version.
1306
1307This library is distributed in the hope that it will be useful,
1308but WITHOUT ANY WARRANTY; without even the implied warranty of
1309MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1310General Public License for more details.
1311
1312You should have received a copy of the GNU General Public
1313License along with this library; see the file COPYING.  If
1314not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1315Fifth Floor, Boston, MA 02110-1301, USA.
1316
1317   The author may be reached (Email) at the address mike@ai.mit.edu,
1318   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1319
1320#ifndef	_MALLOC_INTERNAL
1321#define _MALLOC_INTERNAL
1322#include <malloc.h>
1323#endif
1324
1325
1326
1327/* Cope with systems lacking `memmove'.    */
1328#if  (defined (MEMMOVE_MISSING) || \
1329      !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1330
1331#ifdef emacs
1332#undef	__malloc_safe_bcopy
1333#define __malloc_safe_bcopy safe_bcopy
1334#else
1335
1336/* Snarfed directly from Emacs src/dispnew.c:
1337   XXX Should use system bcopy if it handles overlap.  */
1338
1339/* Like bcopy except never gets confused by overlap.  */
1340
1341void
1342__malloc_safe_bcopy (afrom, ato, size)
1343     __ptr_t afrom;
1344     __ptr_t ato;
1345     __malloc_size_t size;
1346{
1347  char *from = afrom, *to = ato;
1348
1349  if (size <= 0 || from == to)
1350    return;
1351
1352  /* If the source and destination don't overlap, then bcopy can
1353     handle it.  If they do overlap, but the destination is lower in
1354     memory than the source, we'll assume bcopy can handle that.  */
1355  if (to < from || from + size <= to)
1356    bcopy (from, to, size);
1357
1358  /* Otherwise, we'll copy from the end.  */
1359  else
1360    {
1361      register char *endf = from + size;
1362      register char *endt = to + size;
1363
1364      /* If TO - FROM is large, then we should break the copy into
1365	 nonoverlapping chunks of TO - FROM bytes each.  However, if
1366	 TO - FROM is small, then the bcopy function call overhead
1367	 makes this not worth it.  The crossover point could be about
1368	 anywhere.  Since I don't think the obvious copy loop is too
1369	 bad, I'm trying to err in its favor.  */
1370      if (to - from < 64)
1371	{
1372	  do
1373	    *--endt = *--endf;
1374	  while (endf != from);
1375	}
1376      else
1377	{
1378	  for (;;)
1379	    {
1380	      endt -= (to - from);
1381	      endf -= (to - from);
1382
1383	      if (endt < to)
1384		break;
1385
1386	      bcopy (endf, endt, to - from);
1387	    }
1388
1389	  /* If SIZE wasn't a multiple of TO - FROM, there will be a
1390	     little left over.  The amount left over is
1391	     (endt + (to - from)) - to, which is endt - from.  */
1392	  bcopy (from, to, endt - from);
1393	}
1394    }
1395}
1396#endif /* emacs */
1397
1398#ifndef memmove
1399extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1400#define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1401#endif
1402
1403#endif
1404
1405
1406#define min(A, B) ((A) < (B) ? (A) : (B))
1407
1408/* Debugging hook for realloc.  */
1409__ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1410
1411/* Resize the given region to the new size, returning a pointer
1412   to the (possibly moved) region.  This is optimized for speed;
1413   some benchmarks seem to indicate that greater compactness is
1414   achieved by unconditionally allocating and copying to a
1415   new region.  This module has incestuous knowledge of the
1416   internals of both free and malloc. */
1417__ptr_t
1418_realloc_internal (ptr, size)
1419     __ptr_t ptr;
1420     __malloc_size_t size;
1421{
1422  __ptr_t result;
1423  int type;
1424  __malloc_size_t block, blocks, oldlimit;
1425
1426  if (size == 0)
1427    {
1428      _free_internal (ptr);
1429      return _malloc_internal (0);
1430    }
1431  else if (ptr == NULL)
1432    return _malloc_internal (size);
1433
1434  block = BLOCK (ptr);
1435
1436  LOCK ();
1437  PROTECT_MALLOC_STATE (0);
1438
1439  type = _heapinfo[block].busy.type;
1440  switch (type)
1441    {
1442    case 0:
1443      /* Maybe reallocate a large block to a small fragment.  */
1444      if (size <= BLOCKSIZE / 2)
1445	{
1446	  result = _malloc_internal (size);
1447	  if (result != NULL)
1448	    {
1449	      memcpy (result, ptr, size);
1450	      _free_internal (ptr);
1451	      goto out;
1452	    }
1453	}
1454
1455      /* The new size is a large allocation as well;
1456	 see if we can hold it in place. */
1457      blocks = BLOCKIFY (size);
1458      if (blocks < _heapinfo[block].busy.info.size)
1459	{
1460	  /* The new size is smaller; return
1461	     excess memory to the free list. */
1462	  _heapinfo[block + blocks].busy.type = 0;
1463	  _heapinfo[block + blocks].busy.info.size
1464	    = _heapinfo[block].busy.info.size - blocks;
1465	  _heapinfo[block].busy.info.size = blocks;
1466	  /* We have just created a new chunk by splitting a chunk in two.
1467	     Now we will free this chunk; increment the statistics counter
1468	     so it doesn't become wrong when _free_internal decrements it.  */
1469	  ++_chunks_used;
1470	  _free_internal (ADDRESS (block + blocks));
1471	  result = ptr;
1472	}
1473      else if (blocks == _heapinfo[block].busy.info.size)
1474	/* No size change necessary.  */
1475	result = ptr;
1476      else
1477	{
1478	  /* Won't fit, so allocate a new region that will.
1479	     Free the old region first in case there is sufficient
1480	     adjacent free space to grow without moving. */
1481	  blocks = _heapinfo[block].busy.info.size;
1482	  /* Prevent free from actually returning memory to the system.  */
1483	  oldlimit = _heaplimit;
1484	  _heaplimit = 0;
1485	  _free_internal (ptr);
1486	  result = _malloc_internal (size);
1487	  PROTECT_MALLOC_STATE (0);
1488	  if (_heaplimit == 0)
1489	    _heaplimit = oldlimit;
1490	  if (result == NULL)
1491	    {
1492	      /* Now we're really in trouble.  We have to unfree
1493		 the thing we just freed.  Unfortunately it might
1494		 have been coalesced with its neighbors.  */
1495	      if (_heapindex == block)
1496	        (void) _malloc_internal (blocks * BLOCKSIZE);
1497	      else
1498		{
1499		  __ptr_t previous
1500		    = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1501		  (void) _malloc_internal (blocks * BLOCKSIZE);
1502		  _free_internal (previous);
1503		}
1504	      goto out;
1505	    }
1506	  if (ptr != result)
1507	    memmove (result, ptr, blocks * BLOCKSIZE);
1508	}
1509      break;
1510
1511    default:
1512      /* Old size is a fragment; type is logarithm
1513	 to base two of the fragment size.  */
1514      if (size > (__malloc_size_t) (1 << (type - 1)) &&
1515	  size <= (__malloc_size_t) (1 << type))
1516	/* The new size is the same kind of fragment.  */
1517	result = ptr;
1518      else
1519	{
1520	  /* The new size is different; allocate a new space,
1521	     and copy the lesser of the new size and the old. */
1522	  result = _malloc_internal (size);
1523	  if (result == NULL)
1524	    goto out;
1525	  memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1526	  _free_internal (ptr);
1527	}
1528      break;
1529    }
1530
1531  PROTECT_MALLOC_STATE (1);
1532 out:
1533  UNLOCK ();
1534  return result;
1535}
1536
1537__ptr_t
1538realloc (ptr, size)
1539     __ptr_t ptr;
1540     __malloc_size_t size;
1541{
1542  if (!__malloc_initialized && !__malloc_initialize ())
1543    return NULL;
1544
1545  return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1546    (ptr, size);
1547}
1548/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1549
1550This library is free software; you can redistribute it and/or
1551modify it under the terms of the GNU General Public License as
1552published by the Free Software Foundation; either version 2 of the
1553License, or (at your option) any later version.
1554
1555This library is distributed in the hope that it will be useful,
1556but WITHOUT ANY WARRANTY; without even the implied warranty of
1557MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1558General Public License for more details.
1559
1560You should have received a copy of the GNU General Public
1561License along with this library; see the file COPYING.  If
1562not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1563Fifth Floor, Boston, MA 02110-1301, USA.
1564
1565   The author may be reached (Email) at the address mike@ai.mit.edu,
1566   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1567
1568#ifndef	_MALLOC_INTERNAL
1569#define	_MALLOC_INTERNAL
1570#include <malloc.h>
1571#endif
1572
1573/* Allocate an array of NMEMB elements each SIZE bytes long.
1574   The entire array is initialized to zeros.  */
1575__ptr_t
1576calloc (nmemb, size)
1577     register __malloc_size_t nmemb;
1578     register __malloc_size_t size;
1579{
1580  register __ptr_t result = malloc (nmemb * size);
1581
1582  if (result != NULL)
1583    (void) memset (result, 0, nmemb * size);
1584
1585  return result;
1586}
1587/* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1588This file is part of the GNU C Library.
1589
1590The GNU C Library is free software; you can redistribute it and/or modify
1591it under the terms of the GNU General Public License as published by
1592the Free Software Foundation; either version 2, or (at your option)
1593any later version.
1594
1595The GNU C Library is distributed in the hope that it will be useful,
1596but WITHOUT ANY WARRANTY; without even the implied warranty of
1597MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1598GNU General Public License for more details.
1599
1600You should have received a copy of the GNU General Public License
1601along with the GNU C Library; see the file COPYING.  If not, write to
1602the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1603MA 02110-1301, USA.  */
1604
1605#ifndef	_MALLOC_INTERNAL
1606#define	_MALLOC_INTERNAL
1607#include <malloc.h>
1608#endif
1609
1610#ifndef	__GNU_LIBRARY__
1611#define	__sbrk	sbrk
1612#endif
1613
1614#ifdef __GNU_LIBRARY__
1615/* It is best not to declare this and cast its result on foreign operating
1616   systems with potentially hostile include files.  */
1617
1618#include <stddef.h>
1619extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1620#endif
1621
1622#ifndef NULL
1623#define NULL 0
1624#endif
1625
1626/* Allocate INCREMENT more bytes of data space,
1627   and return the start of data space, or NULL on errors.
1628   If INCREMENT is negative, shrink data space.  */
1629__ptr_t
1630__default_morecore (increment)
1631     __malloc_ptrdiff_t increment;
1632{
1633  __ptr_t result;
1634#if defined(CYGWIN)
1635  if (!bss_sbrk_did_unexec)
1636    {
1637      return bss_sbrk (increment);
1638    }
1639#endif
1640  result = (__ptr_t) __sbrk (increment);
1641  if (result == (__ptr_t) -1)
1642    return NULL;
1643  return result;
1644}
1645/* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1646
1647This library is free software; you can redistribute it and/or
1648modify it under the terms of the GNU General Public License as
1649published by the Free Software Foundation; either version 2 of the
1650License, or (at your option) any later version.
1651
1652This library is distributed in the hope that it will be useful,
1653but WITHOUT ANY WARRANTY; without even the implied warranty of
1654MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1655General Public License for more details.
1656
1657You should have received a copy of the GNU General Public
1658License along with this library; see the file COPYING.  If
1659not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1660Fifth Floor, Boston, MA 02110-1301, USA.  */
1661
1662#ifndef	_MALLOC_INTERNAL
1663#define _MALLOC_INTERNAL
1664#include <malloc.h>
1665#endif
1666
1667#if __DJGPP__ - 0 == 1
1668
1669/* There is some problem with memalign in DJGPP v1 and we are supposed
1670   to omit it.  Noone told me why, they just told me to do it.  */
1671
1672#else
1673
1674__ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1675				__malloc_size_t __alignment));
1676
1677__ptr_t
1678memalign (alignment, size)
1679     __malloc_size_t alignment;
1680     __malloc_size_t size;
1681{
1682  __ptr_t result;
1683  unsigned long int adj, lastadj;
1684
1685  if (__memalign_hook)
1686    return (*__memalign_hook) (alignment, size);
1687
1688  /* Allocate a block with enough extra space to pad the block with up to
1689     (ALIGNMENT - 1) bytes if necessary.  */
1690  result = malloc (size + alignment - 1);
1691  if (result == NULL)
1692    return NULL;
1693
1694  /* Figure out how much we will need to pad this particular block
1695     to achieve the required alignment.  */
1696  adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1697
1698  do
1699    {
1700      /* Reallocate the block with only as much excess as it needs.  */
1701      free (result);
1702      result = malloc (adj + size);
1703      if (result == NULL)	/* Impossible unless interrupted.  */
1704	return NULL;
1705
1706      lastadj = adj;
1707      adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1708      /* It's conceivable we might have been so unlucky as to get a
1709	 different block with weaker alignment.  If so, this block is too
1710	 short to contain SIZE after alignment correction.  So we must
1711	 try again and get another block, slightly larger.  */
1712    } while (adj > lastadj);
1713
1714  if (adj != 0)
1715    {
1716      /* Record this block in the list of aligned blocks, so that `free'
1717	 can identify the pointer it is passed, which will be in the middle
1718	 of an allocated block.  */
1719
1720      struct alignlist *l;
1721      for (l = _aligned_blocks; l != NULL; l = l->next)
1722	if (l->aligned == NULL)
1723	  /* This slot is free.  Use it.  */
1724	  break;
1725      if (l == NULL)
1726	{
1727	  l = (struct alignlist *) malloc (sizeof (struct alignlist));
1728	  if (l == NULL)
1729	    {
1730	      free (result);
1731	      return NULL;
1732	    }
1733	  l->next = _aligned_blocks;
1734	  _aligned_blocks = l;
1735	}
1736      l->exact = result;
1737      result = l->aligned = (char *) result + alignment - adj;
1738    }
1739
1740  return result;
1741}
1742
1743#endif /* Not DJGPP v1 */
1744/* Allocate memory on a page boundary.
1745   Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1746
1747This library is free software; you can redistribute it and/or
1748modify it under the terms of the GNU General Public License as
1749published by the Free Software Foundation; either version 2 of the
1750License, or (at your option) any later version.
1751
1752This library is distributed in the hope that it will be useful,
1753but WITHOUT ANY WARRANTY; without even the implied warranty of
1754MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1755General Public License for more details.
1756
1757You should have received a copy of the GNU General Public
1758License along with this library; see the file COPYING.  If
1759not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1760Fifth Floor, Boston, MA 02110-1301, USA.
1761
1762   The author may be reached (Email) at the address mike@ai.mit.edu,
1763   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1764
1765#if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1766
1767/* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1768   on MSDOS, where it conflicts with a system header file.  */
1769
1770#define ELIDE_VALLOC
1771
1772#endif
1773
1774#ifndef	ELIDE_VALLOC
1775
1776#if defined (__GNU_LIBRARY__) || defined (_LIBC)
1777#include <stddef.h>
1778#include <sys/cdefs.h>
1779#if defined (__GLIBC__) && __GLIBC__ >= 2
1780/* __getpagesize is already declared in <unistd.h> with return type int */
1781#else
1782extern size_t __getpagesize PP ((void));
1783#endif
1784#else
1785#include "getpagesize.h"
1786#define	 __getpagesize()	getpagesize()
1787#endif
1788
1789#ifndef	_MALLOC_INTERNAL
1790#define	_MALLOC_INTERNAL
1791#include <malloc.h>
1792#endif
1793
1794static __malloc_size_t pagesize;
1795
1796__ptr_t
1797valloc (size)
1798     __malloc_size_t size;
1799{
1800  if (pagesize == 0)
1801    pagesize = __getpagesize ();
1802
1803  return memalign (pagesize, size);
1804}
1805
1806#endif	/* Not ELIDE_VALLOC.  */
1807
1808#ifdef GC_MCHECK
1809
1810/* Standard debugging hooks for `malloc'.
1811   Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1812   Written May 1989 by Mike Haertel.
1813
1814This library is free software; you can redistribute it and/or
1815modify it under the terms of the GNU General Public License as
1816published by the Free Software Foundation; either version 2 of the
1817License, or (at your option) any later version.
1818
1819This library is distributed in the hope that it will be useful,
1820but WITHOUT ANY WARRANTY; without even the implied warranty of
1821MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1822General Public License for more details.
1823
1824You should have received a copy of the GNU General Public
1825License along with this library; see the file COPYING.  If
1826not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1827Fifth Floor, Boston, MA 02110-1301, USA.
1828
1829   The author may be reached (Email) at the address mike@ai.mit.edu,
1830   or (US mail) as Mike Haertel c/o Free Software Foundation.  */
1831
1832#ifdef emacs
1833#include <stdio.h>
1834#else
1835#ifndef	_MALLOC_INTERNAL
1836#define	_MALLOC_INTERNAL
1837#include <malloc.h>
1838#include <stdio.h>
1839#endif
1840#endif
1841
1842/* Old hook values.  */
1843static void (*old_free_hook) __P ((__ptr_t ptr));
1844static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1845static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1846
1847/* Function to call when something awful happens.  */
1848static void (*abortfunc) __P ((enum mcheck_status));
1849
1850/* Arbitrary magical numbers.  */
1851#define MAGICWORD	0xfedabeeb
1852#define MAGICFREE	0xd8675309
1853#define MAGICBYTE	((char) 0xd7)
1854#define MALLOCFLOOD	((char) 0x93)
1855#define FREEFLOOD	((char) 0x95)
1856
1857struct hdr
1858  {
1859    __malloc_size_t size;		/* Exact size requested by user.  */
1860    unsigned long int magic;	/* Magic number to check header integrity.  */
1861  };
1862
1863#if	defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1864#define flood memset
1865#else
1866static void flood __P ((__ptr_t, int, __malloc_size_t));
1867static void
1868flood (ptr, val, size)
1869     __ptr_t ptr;
1870     int val;
1871     __malloc_size_t size;
1872{
1873  char *cp = ptr;
1874  while (size--)
1875    *cp++ = val;
1876}
1877#endif
1878
1879static enum mcheck_status checkhdr __P ((const struct hdr *));
1880static enum mcheck_status
1881checkhdr (hdr)
1882     const struct hdr *hdr;
1883{
1884  enum mcheck_status status;
1885  switch (hdr->magic)
1886    {
1887    default:
1888      status = MCHECK_HEAD;
1889      break;
1890    case MAGICFREE:
1891      status = MCHECK_FREE;
1892      break;
1893    case MAGICWORD:
1894      if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1895	status = MCHECK_TAIL;
1896      else
1897	status = MCHECK_OK;
1898      break;
1899    }
1900  if (status != MCHECK_OK)
1901    (*abortfunc) (status);
1902  return status;
1903}
1904
1905static void freehook __P ((__ptr_t));
1906static void
1907freehook (ptr)
1908     __ptr_t ptr;
1909{
1910  struct hdr *hdr;
1911
1912  if (ptr)
1913    {
1914      hdr = ((struct hdr *) ptr) - 1;
1915      checkhdr (hdr);
1916      hdr->magic = MAGICFREE;
1917      flood (ptr, FREEFLOOD, hdr->size);
1918    }
1919  else
1920    hdr = NULL;
1921
1922  __free_hook = old_free_hook;
1923  free (hdr);
1924  __free_hook = freehook;
1925}
1926
1927static __ptr_t mallochook __P ((__malloc_size_t));
1928static __ptr_t
1929mallochook (size)
1930     __malloc_size_t size;
1931{
1932  struct hdr *hdr;
1933
1934  __malloc_hook = old_malloc_hook;
1935  hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1936  __malloc_hook = mallochook;
1937  if (hdr == NULL)
1938    return NULL;
1939
1940  hdr->size = size;
1941  hdr->magic = MAGICWORD;
1942  ((char *) &hdr[1])[size] = MAGICBYTE;
1943  flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1944  return (__ptr_t) (hdr + 1);
1945}
1946
1947static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
1948static __ptr_t
1949reallochook (ptr, size)
1950     __ptr_t ptr;
1951     __malloc_size_t size;
1952{
1953  struct hdr *hdr = NULL;
1954  __malloc_size_t osize = 0;
1955
1956  if (ptr)
1957    {
1958      hdr = ((struct hdr *) ptr) - 1;
1959      osize = hdr->size;
1960
1961      checkhdr (hdr);
1962      if (size < osize)
1963	flood ((char *) ptr + size, FREEFLOOD, osize - size);
1964    }
1965
1966  __free_hook = old_free_hook;
1967  __malloc_hook = old_malloc_hook;
1968  __realloc_hook = old_realloc_hook;
1969  hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1970  __free_hook = freehook;
1971  __malloc_hook = mallochook;
1972  __realloc_hook = reallochook;
1973  if (hdr == NULL)
1974    return NULL;
1975
1976  hdr->size = size;
1977  hdr->magic = MAGICWORD;
1978  ((char *) &hdr[1])[size] = MAGICBYTE;
1979  if (size > osize)
1980    flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1981  return (__ptr_t) (hdr + 1);
1982}
1983
1984static void
1985mabort (status)
1986     enum mcheck_status status;
1987{
1988  const char *msg;
1989  switch (status)
1990    {
1991    case MCHECK_OK:
1992      msg = "memory is consistent, library is buggy";
1993      break;
1994    case MCHECK_HEAD:
1995      msg = "memory clobbered before allocated block";
1996      break;
1997    case MCHECK_TAIL:
1998      msg = "memory clobbered past end of allocated block";
1999      break;
2000    case MCHECK_FREE:
2001      msg = "block freed twice";
2002      break;
2003    default:
2004      msg = "bogus mcheck_status, library is buggy";
2005      break;
2006    }
2007#ifdef __GNU_LIBRARY__
2008  __libc_fatal (msg);
2009#else
2010  fprintf (stderr, "mcheck: %s\n", msg);
2011  fflush (stderr);
2012  abort ();
2013#endif
2014}
2015
2016static int mcheck_used = 0;
2017
2018int
2019mcheck (func)
2020     void (*func) __P ((enum mcheck_status));
2021{
2022  abortfunc = (func != NULL) ? func : &mabort;
2023
2024  /* These hooks may not be safely inserted if malloc is already in use.  */
2025  if (!__malloc_initialized && !mcheck_used)
2026    {
2027      old_free_hook = __free_hook;
2028      __free_hook = freehook;
2029      old_malloc_hook = __malloc_hook;
2030      __malloc_hook = mallochook;
2031      old_realloc_hook = __realloc_hook;
2032      __realloc_hook = reallochook;
2033      mcheck_used = 1;
2034    }
2035
2036  return mcheck_used ? 0 : -1;
2037}
2038
2039enum mcheck_status
2040mprobe (__ptr_t ptr)
2041{
2042  return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2043}
2044
2045#endif /* GC_MCHECK */
2046
2047/* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2048   (do not change this comment) */
2049