1/* malloc.c - dynamic memory allocation for bash. */
2
3/*  Copyright (C) 1985-2005 Free Software Foundation, Inc.
4
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2, or (at your option)
8    any later version.
9
10    This program is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13    GNU General Public License for more details.
14
15    You should have received a copy of the GNU General Public License
16    along with this program; if not, write to the Free Software
17    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111 USA.
18
19In other words, you are welcome to use, share and improve this program.
20You are forbidden to forbid anyone else to use, share and improve
21what you give them.   Help stamp out software-hoarding!  */
22
23/*
24 * @(#)nmalloc.c 1 (Caltech) 2/21/82
25 *
26 *	U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
27 *
28 *	Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
29 *
30 * This is a very fast storage allocator.  It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size.  Blocks
32 * that don't exactly fit are passed up to the next larger size.  In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out.  To make it a little better, it
36 * warns the user when he starts to get near the end.
37 *
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
40 *
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
47 */
48
49/*
50 * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
51 * smallest allocatable block is 8 bytes.  The overhead information will
52 * go in the first int of the block, and the returned pointer will point
53 * to the second.
54 */
55
56/* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57   uncover callers that refer to freed memory, and to have malloc() write 0xdf
58   into memory as it's allocated to avoid referring to previous contents. */
59
60/* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61   handled by configure. */
62
63#if defined (HAVE_CONFIG_H)
64#  include <config.h>
65#endif /* HAVE_CONFIG_H */
66
67#if defined (SHELL)
68#  include "bashtypes.h"
69#  include "stdc.h"
70#else
71#  include <sys/types.h>
72#endif
73
74#if defined (HAVE_UNISTD_H)
75#  include <unistd.h>
76#endif
77
78/* Determine which kind of system this is.  */
79#include <signal.h>
80
81#if defined (HAVE_STRING_H)
82#  include <string.h>
83#else
84#  include <strings.h>
85#endif
86
87#include <stdio.h>
88
89/* Define getpagesize () if the system does not.  */
90#ifndef HAVE_GETPAGESIZE
91#  include "getpagesize.h"
92#endif
93
94#include "imalloc.h"
95#ifdef MALLOC_STATS
96#  include "mstats.h"
97#endif
98#ifdef MALLOC_REGISTER
99#  include "table.h"
100#endif
101#ifdef MALLOC_WATCH
102#  include "watch.h"
103#endif
104
105/* System-specific omissions. */
106#ifdef HPUX
107#  define NO_VALLOC
108#endif
109
110#define NBUCKETS	30
111
112#define ISALLOC ((char) 0xf7)	/* magic byte that implies allocation */
113#define ISFREE ((char) 0x54)	/* magic byte that implies free block */
114				/* this is for error checking only */
115#define ISMEMALIGN ((char) 0xd6)  /* Stored before the value returned by
116				     memalign, with the rest of the word
117				     being the distance to the true
118				     beginning of the block.  */
119
120
121/* We have a flag indicating whether memory is allocated, an index in
122   nextf[], a size field, and a sentinel value to determine whether or
123   not a caller wrote before the start of allocated memory; to realloc()
124   memory we either copy mh_nbytes or just change mh_nbytes if there is
125   enough room in the block for the new size.  Range checking is always
126   done. */
127union mhead {
128  bits64_t mh_align;						/* 8 */
129  struct {
130    char mi_alloc; 		/* ISALLOC or ISFREE */		/* 1 */
131    char mi_index;		/* index in nextf[] */		/* 1 */
132    /* Remainder are valid only when block is allocated */
133    u_bits16_t mi_magic2;	/* should be == MAGIC2 */	/* 2 */
134    u_bits32_t mi_nbytes;	/* # of bytes allocated */	/* 4 */
135  } minfo;
136};
137#define mh_alloc	minfo.mi_alloc
138#define mh_index	minfo.mi_index
139#define mh_nbytes	minfo.mi_nbytes
140#define mh_magic2	minfo.mi_magic2
141
142#define MOVERHEAD	sizeof(union mhead)
143#define MALIGN_MASK	7	/* one less than desired alignment */
144
145typedef union _malloc_guard {
146  char s[4];
147  u_bits32_t i;
148} mguard_t;
149
150/* Access free-list pointer of a block.
151   It is stored at block + sizeof (char *).
152   This is not a field in the minfo structure member of union mhead
153   because we want sizeof (union mhead)
154   to describe the overhead for when the block is in use,
155   and we do not want the free-list pointer to count in that.  */
156
157#define CHAIN(a) \
158  (*(union mhead **) (sizeof (char *) + (char *) (a)))
159
160/* To implement range checking, we write magic values in at the beginning
161   and end of each allocated block, and make sure they are undisturbed
162   whenever a free or a realloc occurs. */
163
164/* Written in the 2 bytes before the block's real space (-4 bytes) */
165#define MAGIC2 0x5555
166#define MSLOP  4		/* 4 bytes extra for u_bits32_t size */
167
168/* How many bytes are actually allocated for a request of size N --
169   rounded up to nearest multiple of 8 after accounting for malloc
170   overhead. */
171#define ALLOCATED_BYTES(n) \
172	(((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
173
174#define ASSERT(p) \
175  do \
176    { \
177      if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, __STRING(p), file, line); \
178    } \
179  while (0)
180
181/* Minimum and maximum bucket indices for block splitting (and to bound
182   the search for a block to split). */
183#define SPLIT_MIN	2	/* XXX - was 3 */
184#define SPLIT_MID	11
185#define SPLIT_MAX	14
186
187/* Minimum and maximum bucket indices for block coalescing. */
188#define COMBINE_MIN	2
189#define COMBINE_MAX	(pagebucket - 1)	/* XXX */
190
191#define LESSCORE_MIN	10
192#define LESSCORE_FRC	13
193
194#define STARTBUCK	1
195
196/* Flags for the internal functions. */
197#define MALLOC_WRAPPER	0x01	/* wrapper function */
198#define MALLOC_INTERNAL	0x02	/* internal function calling another */
199#define MALLOC_NOTRACE	0x04	/* don't trace this allocation or free */
200#define MALLOC_NOREG	0x08	/* don't register this allocation or free */
201
202/* Future use. */
203#define ERR_DUPFREE		0x01
204#define ERR_UNALLOC		0x02
205#define ERR_UNDERFLOW		0x04
206#define ERR_ASSERT_FAILED	0x08
207
208/* Evaluates to true if NB is appropriate for bucket NU.  NB is adjusted
209   appropriately by the caller to account for malloc overhead.  This only
210   checks that the recorded size is not too big for the bucket.  We
211   can't check whether or not it's in between NU and NU-1 because we
212   might have encountered a busy bucket when allocating and moved up to
213   the next size. */
214#define IN_BUCKET(nb, nu)	((nb) <= binsizes[(nu)])
215
216/* Use this when we want to be sure that NB is in bucket NU. */
217#define RIGHT_BUCKET(nb, nu) \
218	(((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
219
220/* nextf[i] is free list of blocks of size 2**(i + 3)  */
221
222static union mhead *nextf[NBUCKETS];
223
224/* busy[i] is nonzero while allocation or free of block size i is in progress. */
225
226static char busy[NBUCKETS];
227
228static int pagesz;	/* system page size. */
229static int pagebucket;	/* bucket for requests a page in size */
230static int maxbuck;	/* highest bucket receiving allocation request. */
231
232static char *memtop;	/* top of heap */
233
234static unsigned long binsizes[NBUCKETS] = {
235	8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
236	8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
237	1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
238	67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
239	2147483648UL, 4294967295UL
240};
241
242/* binsizes[x] == (1 << ((x) + 3)) */
243#define binsize(x)	binsizes[(x)]
244
245/* Declarations for internal functions */
246static PTR_T internal_malloc __P((size_t, const char *, int, int));
247static PTR_T internal_realloc __P((PTR_T, size_t, const char *, int, int));
248static void internal_free __P((PTR_T, const char *, int, int));
249static PTR_T internal_memalign __P((size_t, size_t, const char *, int, int));
250#ifndef NO_CALLOC
251static PTR_T internal_calloc __P((size_t, size_t, const char *, int, int));
252static void internal_cfree __P((PTR_T, const char *, int, int));
253#endif
254#ifndef NO_VALLOC
255static PTR_T internal_valloc __P((size_t, const char *, int, int));
256#endif
257
258#if defined (botch)
259extern void botch ();
260#else
261static void botch __P((const char *, const char *, int));
262#endif
263static void xbotch __P((PTR_T, int, const char *, const char *, int));
264
265#if !HAVE_DECL_SBRK
266extern char *sbrk ();
267#endif /* !HAVE_DECL_SBRK */
268
269#ifdef SHELL
270extern int interrupt_immediately;
271extern int signal_is_trapped __P((int));
272#endif
273
274#ifdef MALLOC_STATS
275struct _malstats _mstats;
276#endif /* MALLOC_STATS */
277
278/* Debugging variables available to applications. */
279int malloc_flags = 0;	/* future use */
280int malloc_trace = 0;	/* trace allocations and frees to stderr */
281int malloc_register = 0;	/* future use */
282
283#ifdef MALLOC_TRACE
284char _malloc_trace_buckets[NBUCKETS];
285
286/* These should really go into a header file. */
287extern void mtrace_alloc __P((const char *, PTR_T, size_t, const char *, int));
288extern void mtrace_free __P((PTR_T, int, const char *, int));
289#endif
290
291#if !defined (botch)
292static void
293botch (s, file, line)
294     const char *s;
295     const char *file;
296     int line;
297{
298  fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
299  (void)fflush (stderr);
300  abort ();
301}
302#endif
303
304/* print the file and line number that caused the assertion failure and
305   call botch() to do whatever the application wants with the information */
306static void
307xbotch (mem, e, s, file, line)
308     PTR_T mem;
309     int e;
310     const char *s;
311     const char *file;
312     int line;
313{
314  fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
315			file ? file : "unknown", line);
316#ifdef MALLOC_REGISTER
317  if (mem != NULL && malloc_register)
318    mregister_describe_mem (mem, stderr);
319#endif
320  (void)fflush (stderr);
321  botch(s, file, line);
322}
323
324/* Coalesce two adjacent free blocks off the free list for size NU - 1,
325   as long as we can find two adjacent free blocks.  nextf[NU -1] is
326   assumed to not be busy; the caller (morecore()) checks for this.
327   BUSY[NU] must be set to 1. */
328static void
329bcoalesce (nu)
330     register int nu;
331{
332  register union mhead *mp, *mp1, *mp2;
333  register int nbuck;
334  unsigned long siz;
335
336  nbuck = nu - 1;
337  if (nextf[nbuck] == 0 || busy[nbuck])
338    return;
339
340  busy[nbuck] = 1;
341  siz = binsize (nbuck);
342
343  mp2 = mp1 = nextf[nbuck];
344  mp = CHAIN (mp1);
345  while (mp && mp != (union mhead *)((char *)mp1 + siz))
346    {
347      mp2 = mp1;
348      mp1 = mp;
349      mp = CHAIN (mp);
350    }
351
352  if (mp == 0)
353    {
354      busy[nbuck] = 0;
355      return;
356    }
357
358  /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
359     CHAIN(mp2) must equal mp1.  Check that mp1 and mp are adjacent. */
360  if (mp2 != mp1 && CHAIN(mp2) != mp1)
361    {
362      busy[nbuck] = 0;
363      xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
364    }
365
366#ifdef MALLOC_DEBUG
367  if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
368    {
369      busy[nbuck] = 0;
370      return;	/* not adjacent */
371    }
372#endif
373
374  /* Since they are adjacent, remove them from the free list */
375  if (mp1 == nextf[nbuck])
376    nextf[nbuck] = CHAIN (mp);
377  else
378    CHAIN (mp2) = CHAIN (mp);
379  busy[nbuck] = 0;
380
381#ifdef MALLOC_STATS
382  _mstats.tbcoalesce++;
383  _mstats.ncoalesce[nbuck]++;
384#endif
385
386  /* And add the combined two blocks to nextf[NU]. */
387  mp1->mh_alloc = ISFREE;
388  mp1->mh_index = nu;
389  CHAIN (mp1) = nextf[nu];
390  nextf[nu] = mp1;
391}
392
393/* Split a block at index > NU (but less than SPLIT_MAX) into a set of
394   blocks of the correct size, and attach them to nextf[NU].  nextf[NU]
395   is assumed to be empty.  Must be called with signals blocked (e.g.,
396   by morecore()).  BUSY[NU] must be set to 1. */
397static void
398bsplit (nu)
399     register int nu;
400{
401  register union mhead *mp;
402  int nbuck, nblks, split_max;
403  unsigned long siz;
404
405  split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
406
407  if (nu >= SPLIT_MID)
408    {
409      for (nbuck = split_max; nbuck > nu; nbuck--)
410	{
411	  if (busy[nbuck] || nextf[nbuck] == 0)
412	    continue;
413	  break;
414	}
415    }
416  else
417    {
418      for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
419	{
420	  if (busy[nbuck] || nextf[nbuck] == 0)
421	    continue;
422	  break;
423	}
424    }
425
426  if (nbuck > split_max || nbuck <= nu)
427    return;
428
429  /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
430     and nbuck is below some threshold. */
431
432  /* Remove the block from the chain of larger blocks. */
433  busy[nbuck] = 1;
434  mp = nextf[nbuck];
435  nextf[nbuck] = CHAIN (mp);
436  busy[nbuck] = 0;
437
438#ifdef MALLOC_STATS
439  _mstats.tbsplit++;
440  _mstats.nsplit[nbuck]++;
441#endif
442
443  /* Figure out how many blocks we'll get. */
444  siz = binsize (nu);
445  nblks = binsize (nbuck) / siz;
446
447  /* Split the block and put it on the requested chain. */
448  nextf[nu] = mp;
449  while (1)
450    {
451      mp->mh_alloc = ISFREE;
452      mp->mh_index = nu;
453      if (--nblks <= 0) break;
454      CHAIN (mp) = (union mhead *)((char *)mp + siz);
455      mp = (union mhead *)((char *)mp + siz);
456    }
457  CHAIN (mp) = 0;
458}
459
460/* Take the memory block MP and add it to a chain < NU.  NU is the right bucket,
461   but is busy.  This avoids memory orphaning. */
462static void
463xsplit (mp, nu)
464     union mhead *mp;
465     int nu;
466{
467  union mhead *nh;
468  int nbuck, nblks, split_max;
469  unsigned long siz;
470
471  nbuck = nu - 1;
472  while (nbuck >= SPLIT_MIN && busy[nbuck])
473    nbuck--;
474  if (nbuck < SPLIT_MIN)
475    return;
476
477#ifdef MALLOC_STATS
478  _mstats.tbsplit++;
479  _mstats.nsplit[nu]++;
480#endif
481
482  /* Figure out how many blocks we'll get. */
483  siz = binsize (nu);			/* original block size */
484  nblks = siz / binsize (nbuck);	/* should be 2 most of the time */
485
486  /* And add it to nextf[nbuck] */
487  siz = binsize (nbuck);		/* XXX - resetting here */
488  nh = mp;
489  while (1)
490    {
491      mp->mh_alloc = ISFREE;
492      mp->mh_index = nbuck;
493      if (--nblks <= 0) break;
494      CHAIN (mp) = (union mhead *)((char *)mp + siz);
495      mp = (union mhead *)((char *)mp + siz);
496    }
497  busy[nbuck] = 1;
498  CHAIN (mp) = nextf[nbuck];
499  nextf[nbuck] = nh;
500  busy[nbuck] = 0;
501}
502
503static void
504block_signals (setp, osetp)
505     sigset_t *setp, *osetp;
506{
507#ifdef HAVE_POSIX_SIGNALS
508  sigfillset (setp);
509  sigemptyset (osetp);
510  sigprocmask (SIG_BLOCK, setp, osetp);
511#else
512#  if defined (HAVE_BSD_SIGNALS)
513  *osetp = sigsetmask (-1);
514#  endif
515#endif
516}
517
518static void
519unblock_signals (setp, osetp)
520     sigset_t *setp, *osetp;
521{
522#ifdef HAVE_POSIX_SIGNALS
523  sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
524#else
525#  if defined (HAVE_BSD_SIGNALS)
526  sigsetmask (*osetp);
527#  endif
528#endif
529}
530
531/* Return some memory to the system by reducing the break.  This is only
532   called with NU > pagebucket, so we're always assured of giving back
533   more than one page of memory. */
534static void
535lesscore (nu)			/* give system back some memory */
536     register int nu;		/* size index we're discarding  */
537{
538  long siz;
539
540  siz = binsize (nu);
541  /* Should check for errors here, I guess. */
542  sbrk (-siz);
543  memtop -= siz;
544
545#ifdef MALLOC_STATS
546  _mstats.nsbrk++;
547  _mstats.tsbrk -= siz;
548  _mstats.nlesscore[nu]++;
549#endif
550}
551
552/* Ask system for more memory; add to NEXTF[NU].  BUSY[NU] must be set to 1. */
553static void
554morecore (nu)
555     register int nu;		/* size index to get more of  */
556{
557  register union mhead *mp;
558  register int nblks;
559  register long siz;
560  long sbrk_amt;		/* amount to get via sbrk() */
561  sigset_t set, oset;
562  int blocked_sigs;
563
564  /* Block all signals in case we are executed from a signal handler. */
565  blocked_sigs = 0;
566#ifdef SHELL
567  if (interrupt_immediately || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
568#endif
569    {
570      block_signals (&set, &oset);
571      blocked_sigs = 1;
572    }
573
574  siz = binsize (nu);	/* size of desired block for nextf[nu] */
575
576  if (siz < 0)
577    goto morecore_done;		/* oops */
578
579#ifdef MALLOC_STATS
580  _mstats.nmorecore[nu]++;
581#endif
582
583  /* Try to split a larger block here, if we're within the range of sizes
584     to split. */
585  if (nu >= SPLIT_MIN)
586    {
587      bsplit (nu);
588      if (nextf[nu] != 0)
589	goto morecore_done;
590    }
591
592  /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
593     if we can, and we're within the range of the block coalescing limits. */
594  if (nu >= COMBINE_MIN && nu < COMBINE_MAX && busy[nu - 1] == 0 && nextf[nu - 1])
595    {
596      bcoalesce (nu);
597      if (nextf[nu] != 0)
598	goto morecore_done;
599    }
600
601  /* Take at least a page, and figure out how many blocks of the requested
602     size we're getting. */
603  if (siz <= pagesz)
604    {
605      sbrk_amt = pagesz;
606      nblks = sbrk_amt / siz;
607    }
608  else
609    {
610      /* We always want to request an integral multiple of the page size
611	 from the kernel, so let's compute whether or not `siz' is such
612	 an amount.  If it is, we can just request it.  If not, we want
613	 the smallest integral multiple of pagesize that is larger than
614	 `siz' and will satisfy the request. */
615      sbrk_amt = siz & (pagesz - 1);
616      if (sbrk_amt == 0)
617	sbrk_amt = siz;
618      else
619	sbrk_amt = siz + pagesz - sbrk_amt;
620      nblks = 1;
621    }
622
623#ifdef MALLOC_STATS
624  _mstats.nsbrk++;
625  _mstats.tsbrk += sbrk_amt;
626#endif
627
628  mp = (union mhead *) sbrk (sbrk_amt);
629
630  /* Totally out of memory. */
631  if ((long)mp == -1)
632    goto morecore_done;
633
634  memtop += sbrk_amt;
635
636  /* shouldn't happen, but just in case -- require 8-byte alignment */
637  if ((long)mp & MALIGN_MASK)
638    {
639      mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
640      nblks--;
641    }
642
643  /* save new header and link the nblks blocks together */
644  nextf[nu] = mp;
645  while (1)
646    {
647      mp->mh_alloc = ISFREE;
648      mp->mh_index = nu;
649      if (--nblks <= 0) break;
650      CHAIN (mp) = (union mhead *)((char *)mp + siz);
651      mp = (union mhead *)((char *)mp + siz);
652    }
653  CHAIN (mp) = 0;
654
655morecore_done:
656  if (blocked_sigs)
657    unblock_signals (&set, &oset);
658}
659
660static void
661malloc_debug_dummy ()
662{
663  write (1, "malloc_debug_dummy\n", 19);
664}
665
666#define PREPOP_BIN	2
667#define PREPOP_SIZE	32
668
669static int
670pagealign ()
671{
672  register int nunits;
673  register union mhead *mp;
674  long sbrk_needed;
675  char *curbrk;
676
677  pagesz = getpagesize ();
678  if (pagesz < 1024)
679    pagesz = 1024;
680
681  /* OK, how much do we need to allocate to make things page-aligned?
682     Some of this partial page will be wasted space, but we'll use as
683     much as we can.  Once we figure out how much to advance the break
684     pointer, go ahead and do it. */
685  memtop = curbrk = sbrk (0);
686  sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1));	/* sbrk(0) % pagesz */
687  if (sbrk_needed < 0)
688    sbrk_needed += pagesz;
689
690  /* Now allocate the wasted space. */
691  if (sbrk_needed)
692    {
693#ifdef MALLOC_STATS
694      _mstats.nsbrk++;
695      _mstats.tsbrk += sbrk_needed;
696#endif
697      curbrk = sbrk (sbrk_needed);
698      if ((long)curbrk == -1)
699	return -1;
700      memtop += sbrk_needed;
701
702      /* Take the memory which would otherwise be wasted and populate the most
703	 popular bin (2 == 32 bytes) with it.  Add whatever we need to curbrk
704	 to make things 32-byte aligned, compute how many 32-byte chunks we're
705	 going to get, and set up the bin. */
706      curbrk += sbrk_needed & (PREPOP_SIZE - 1);
707      sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
708      nunits = sbrk_needed / PREPOP_SIZE;
709
710      if (nunits > 0)
711	{
712	  mp = (union mhead *)curbrk;
713
714	  nextf[PREPOP_BIN] = mp;
715	  while (1)
716	    {
717	      mp->mh_alloc = ISFREE;
718	      mp->mh_index = PREPOP_BIN;
719	      if (--nunits <= 0) break;
720	      CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
721	      mp = (union mhead *)((char *)mp + PREPOP_SIZE);
722	    }
723	  CHAIN(mp) = 0;
724	}
725    }
726
727  /* compute which bin corresponds to the page size. */
728  for (nunits = 7; nunits < NBUCKETS; nunits++)
729    if (pagesz <= binsize(nunits))
730      break;
731  pagebucket = nunits;
732
733  return 0;
734}
735
736static PTR_T
737internal_malloc (n, file, line, flags)		/* get a block */
738     size_t n;
739     const char *file;
740     int line, flags;
741{
742  register union mhead *p;
743  register int nunits;
744  register char *m, *z;
745  long nbytes;
746  mguard_t mg;
747
748  /* Get the system page size and align break pointer so future sbrks will
749     be page-aligned.  The page size must be at least 1K -- anything
750     smaller is increased. */
751  if (pagesz == 0)
752    if (pagealign () < 0)
753      return ((PTR_T)NULL);
754
755  /* Figure out how many bytes are required, rounding up to the nearest
756     multiple of 8, then figure out which nextf[] area to use.  Try to
757     be smart about where to start searching -- if the number of bytes
758     needed is greater than the page size, we can start at pagebucket. */
759  nbytes = ALLOCATED_BYTES(n);
760  nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
761  for ( ; nunits < NBUCKETS; nunits++)
762    if (nbytes <= binsize(nunits))
763      break;
764
765  /* Silently reject too-large requests. */
766  if (nunits >= NBUCKETS)
767    return ((PTR_T) NULL);
768
769  /* In case this is reentrant use of malloc from signal handler,
770     pick a block size that no other malloc level is currently
771     trying to allocate.  That's the easiest harmless way not to
772     interfere with the other level of execution.  */
773#ifdef MALLOC_STATS
774  if (busy[nunits]) _mstats.nrecurse++;
775#endif
776  while (busy[nunits]) nunits++;
777  busy[nunits] = 1;
778
779  if (nunits > maxbuck)
780    maxbuck = nunits;
781
782  /* If there are no blocks of the appropriate size, go get some */
783  if (nextf[nunits] == 0)
784    morecore (nunits);
785
786  /* Get one block off the list, and set the new list head */
787  if ((p = nextf[nunits]) == NULL)
788    {
789      busy[nunits] = 0;
790      return NULL;
791    }
792  nextf[nunits] = CHAIN (p);
793  busy[nunits] = 0;
794
795  /* Check for free block clobbered */
796  /* If not for this check, we would gobble a clobbered free chain ptr
797     and bomb out on the NEXT allocate of this size block */
798  if (p->mh_alloc != ISFREE || p->mh_index != nunits)
799    xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
800
801  /* Fill in the info, and set up the magic numbers for range checking. */
802  p->mh_alloc = ISALLOC;
803  p->mh_magic2 = MAGIC2;
804  p->mh_nbytes = n;
805
806  /* End guard */
807  mg.i = n;
808  z = mg.s;
809  m = (char *) (p + 1) + n;
810  *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
811
812#ifdef MEMSCRAMBLE
813  if (n)
814    MALLOC_MEMSET ((char *)(p + 1), 0xdf, n);	/* scramble previous contents */
815#endif
816#ifdef MALLOC_STATS
817  _mstats.nmalloc[nunits]++;
818  _mstats.tmalloc[nunits]++;
819  _mstats.nmal++;
820  _mstats.bytesreq += n;
821#endif /* MALLOC_STATS */
822
823#ifdef MALLOC_TRACE
824  if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
825    mtrace_alloc ("malloc", p + 1, n, file, line);
826  else if (_malloc_trace_buckets[nunits])
827    mtrace_alloc ("malloc", p + 1, n, file, line);
828#endif
829
830#ifdef MALLOC_REGISTER
831  if (malloc_register && (flags & MALLOC_NOREG) == 0)
832    mregister_alloc ("malloc", p + 1, n, file, line);
833#endif
834
835#ifdef MALLOC_WATCH
836  if (_malloc_nwatch > 0)
837    _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
838#endif
839
840  return (PTR_T) (p + 1);
841}
842
843static void
844internal_free (mem, file, line, flags)
845     PTR_T mem;
846     const char *file;
847     int line, flags;
848{
849  register union mhead *p;
850  register char *ap, *z;
851  register int nunits;
852  register unsigned int nbytes;
853  int ubytes;		/* caller-requested size */
854  mguard_t mg;
855
856  if ((ap = (char *)mem) == 0)
857    return;
858
859  p = (union mhead *) ap - 1;
860
861  if (p->mh_alloc == ISMEMALIGN)
862    {
863      ap -= p->mh_nbytes;
864      p = (union mhead *) ap - 1;
865    }
866
867#if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER)
868  if (malloc_trace || malloc_register)
869    ubytes = p->mh_nbytes;
870#endif
871
872  if (p->mh_alloc != ISALLOC)
873    {
874      if (p->mh_alloc == ISFREE)
875	xbotch (mem, ERR_DUPFREE,
876		_("free: called with already freed block argument"), file, line);
877      else
878	xbotch (mem, ERR_UNALLOC,
879		_("free: called with unallocated block argument"), file, line);
880    }
881
882  ASSERT (p->mh_magic2 == MAGIC2);
883
884  nunits = p->mh_index;
885  nbytes = ALLOCATED_BYTES(p->mh_nbytes);
886  /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
887     are now used for the number of bytes allocated, a simple check of
888     mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
889     We sanity-check the value of mh_nbytes against the size of the blocks
890     in the appropriate bucket before we use it.  This can still cause problems
891     and obscure errors if mh_nbytes is wrong but still within range; the
892     checks against the size recorded at the end of the chunk will probably
893     fail then.  Using MALLOC_REGISTER will help here, since it saves the
894     original number of bytes requested. */
895
896  if (IN_BUCKET(nbytes, nunits) == 0)
897    xbotch (mem, ERR_UNDERFLOW,
898	    _("free: underflow detected; mh_nbytes out of range"), file, line);
899
900  ap += p->mh_nbytes;
901  z = mg.s;
902  *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
903  if (mg.i != p->mh_nbytes)
904    xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
905
906#if 1
907  if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
908#else
909  if (((char *)p + binsize(nunits) == memtop) && nunits >= LESSCORE_MIN)
910#endif
911    {
912      /* If above LESSCORE_FRC, give back unconditionally.  This should be set
913	 high enough to be infrequently encountered.  If between LESSCORE_MIN
914	 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
915	 there's already a block on the free list. */
916      if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
917	{
918	  lesscore (nunits);
919	  /* keeps the tracing and registering code in one place */
920	  goto free_return;
921	}
922    }
923
924#ifdef MEMSCRAMBLE
925  if (p->mh_nbytes)
926    MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
927#endif
928
929  ASSERT (nunits < NBUCKETS);
930
931  if (busy[nunits] == 1)
932    {
933      xsplit (p, nunits);	/* split block and add to different chain */
934      goto free_return;
935    }
936
937  p->mh_alloc = ISFREE;
938  /* Protect against signal handlers calling malloc.  */
939  busy[nunits] = 1;
940  /* Put this block on the free list.  */
941  CHAIN (p) = nextf[nunits];
942  nextf[nunits] = p;
943  busy[nunits] = 0;
944
945free_return:
946  ;		/* Empty statement in case this is the end of the function */
947
948#ifdef MALLOC_STATS
949  _mstats.nmalloc[nunits]--;
950  _mstats.nfre++;
951#endif /* MALLOC_STATS */
952
953#ifdef MALLOC_TRACE
954  if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
955    mtrace_free (mem, ubytes, file, line);
956  else if (_malloc_trace_buckets[nunits])
957    mtrace_free (mem, ubytes, file, line);
958#endif
959
960#ifdef MALLOC_REGISTER
961  if (malloc_register && (flags & MALLOC_NOREG) == 0)
962    mregister_free (mem, ubytes, file, line);
963#endif
964
965#ifdef MALLOC_WATCH
966  if (_malloc_nwatch > 0)
967    _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
968#endif
969}
970
971static PTR_T
972internal_realloc (mem, n, file, line, flags)
973     PTR_T mem;
974     register size_t n;
975     const char *file;
976     int line, flags;
977{
978  register union mhead *p;
979  register u_bits32_t tocopy;
980  register unsigned int nbytes;
981  register int nunits;
982  register char *m, *z;
983  mguard_t mg;
984
985#ifdef MALLOC_STATS
986  _mstats.nrealloc++;
987#endif
988
989  if (n == 0)
990    {
991      internal_free (mem, file, line, MALLOC_INTERNAL);
992      return (NULL);
993    }
994  if ((p = (union mhead *) mem) == 0)
995    return internal_malloc (n, file, line, MALLOC_INTERNAL);
996
997  p--;
998  nunits = p->mh_index;
999  ASSERT (nunits < NBUCKETS);
1000
1001  if (p->mh_alloc != ISALLOC)
1002    xbotch (mem, ERR_UNALLOC,
1003	    _("realloc: called with unallocated block argument"), file, line);
1004
1005  ASSERT (p->mh_magic2 == MAGIC2);
1006  nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1007  /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1008     are now used for the number of bytes allocated, a simple check of
1009     mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1010     We sanity-check the value of mh_nbytes against the size of the blocks
1011     in the appropriate bucket before we use it.  This can still cause problems
1012     and obscure errors if mh_nbytes is wrong but still within range; the
1013     checks against the size recorded at the end of the chunk will probably
1014     fail then.  Using MALLOC_REGISTER will help here, since it saves the
1015     original number of bytes requested. */
1016  if (IN_BUCKET(nbytes, nunits) == 0)
1017    xbotch (mem, ERR_UNDERFLOW,
1018	    _("realloc: underflow detected; mh_nbytes out of range"), file, line);
1019
1020  m = (char *)mem + (tocopy = p->mh_nbytes);
1021  z = mg.s;
1022  *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1023  if (mg.i != p->mh_nbytes)
1024    xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
1025
1026#ifdef MALLOC_WATCH
1027  if (_malloc_nwatch > 0)
1028    _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1029#endif
1030#ifdef MALLOC_STATS
1031  _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1032#endif
1033
1034  /* See if desired size rounds to same power of 2 as actual size. */
1035  nbytes = ALLOCATED_BYTES(n);
1036
1037  /* If ok, use the same block, just marking its size as changed.  */
1038  if (RIGHT_BUCKET(nbytes, nunits))
1039    {
1040#if 0
1041      m = (char *)mem + p->mh_nbytes;
1042#else
1043      /* Compensate for increment above. */
1044      m -= 4;
1045#endif
1046      *m++ = 0;  *m++ = 0;  *m++ = 0;  *m++ = 0;
1047      m = (char *)mem + (p->mh_nbytes = n);
1048
1049      mg.i = n;
1050      z = mg.s;
1051      *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1052
1053      return mem;
1054    }
1055
1056  if (n < tocopy)
1057    tocopy = n;
1058
1059#ifdef MALLOC_STATS
1060  _mstats.nrcopy++;
1061#endif
1062
1063  if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1064    return 0;
1065  FASTCOPY (mem, m, tocopy);
1066  internal_free (mem, file, line, MALLOC_INTERNAL);
1067
1068#ifdef MALLOC_TRACE
1069  if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1070    mtrace_alloc ("realloc", m, n, file, line);
1071  else if (_malloc_trace_buckets[nunits])
1072    mtrace_alloc ("realloc", m, n, file, line);
1073#endif
1074
1075#ifdef MALLOC_REGISTER
1076  if (malloc_register && (flags & MALLOC_NOREG) == 0)
1077    mregister_alloc ("realloc", m, n, file, line);
1078#endif
1079
1080#ifdef MALLOC_WATCH
1081  if (_malloc_nwatch > 0)
1082    _malloc_ckwatch (m, file, line, W_RESIZED, n);
1083#endif
1084
1085  return m;
1086}
1087
1088static PTR_T
1089internal_memalign (alignment, size, file, line, flags)
1090     size_t alignment;
1091     size_t size;
1092     const char *file;
1093     int line, flags;
1094{
1095  register char *ptr;
1096  register char *aligned;
1097  register union mhead *p;
1098
1099  ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1100
1101  if (ptr == 0)
1102    return 0;
1103  /* If entire block has the desired alignment, just accept it.  */
1104  if (((long) ptr & (alignment - 1)) == 0)
1105    return ptr;
1106  /* Otherwise, get address of byte in the block that has that alignment.  */
1107#if 0
1108  aligned = (char *) (((long) ptr + alignment - 1) & -alignment);
1109#else
1110  aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1111#endif
1112
1113  /* Store a suitable indication of how to free the block,
1114     so that free can find the true beginning of it.  */
1115  p = (union mhead *) aligned - 1;
1116  p->mh_nbytes = aligned - ptr;
1117  p->mh_alloc = ISMEMALIGN;
1118
1119  return aligned;
1120}
1121
1122#if !defined (NO_VALLOC)
1123/* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1124   Patching out seems cleaner than the ugly fix needed.  */
1125static PTR_T
1126internal_valloc (size, file, line, flags)
1127     size_t size;
1128     const char *file;
1129     int line, flags;
1130{
1131  return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1132}
1133#endif /* !NO_VALLOC */
1134
1135#ifndef NO_CALLOC
1136static PTR_T
1137internal_calloc (n, s, file, line, flags)
1138     size_t n, s;
1139     const char *file;
1140     int line, flags;
1141{
1142  size_t total;
1143  PTR_T result;
1144
1145  total = n * s;
1146  result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1147  if (result)
1148    memset (result, 0, total);
1149  return result;
1150}
1151
1152static void
1153internal_cfree (p, file, line, flags)
1154     PTR_T p;
1155     const char *file;
1156     int line, flags;
1157{
1158  internal_free (p, file, line, flags|MALLOC_INTERNAL);
1159}
1160#endif /* !NO_CALLOC */
1161
1162#ifdef MALLOC_STATS
1163int
1164malloc_free_blocks (size)
1165     int size;
1166{
1167  int nfree;
1168  register union mhead *p;
1169
1170  nfree = 0;
1171  for (p = nextf[size]; p; p = CHAIN (p))
1172    nfree++;
1173
1174  return nfree;
1175}
1176#endif
1177
1178#if defined (MALLOC_WRAPFUNCS)
1179PTR_T
1180sh_malloc (bytes, file, line)
1181     size_t bytes;
1182     const char *file;
1183     int line;
1184{
1185  return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1186}
1187
1188PTR_T
1189sh_realloc (ptr, size, file, line)
1190     PTR_T ptr;
1191     size_t size;
1192     const char *file;
1193     int line;
1194{
1195  return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1196}
1197
1198void
1199sh_free (mem, file, line)
1200     PTR_T mem;
1201     const char *file;
1202     int line;
1203{
1204  internal_free (mem, file, line, MALLOC_WRAPPER);
1205}
1206
1207PTR_T
1208sh_memalign (alignment, size, file, line)
1209     size_t alignment;
1210     size_t size;
1211     const char *file;
1212     int line;
1213{
1214  return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1215}
1216
1217#ifndef NO_CALLOC
1218PTR_T
1219sh_calloc (n, s, file, line)
1220     size_t n, s;
1221     const char *file;
1222     int line;
1223{
1224  return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1225}
1226
1227void
1228sh_cfree (mem, file, line)
1229     PTR_T mem;
1230     const char *file;
1231     int line;
1232{
1233  internal_cfree (mem, file, line, MALLOC_WRAPPER);
1234}
1235#endif
1236
1237#ifndef NO_VALLOC
1238PTR_T
1239sh_valloc (size, file, line)
1240     size_t size;
1241     const char *file;
1242     int line;
1243{
1244  return internal_valloc (size, file, line, MALLOC_WRAPPER);
1245}
1246#endif /* !NO_VALLOC */
1247
1248#endif /* MALLOC_WRAPFUNCS */
1249
1250/* Externally-available functions that call their internal counterparts. */
1251
1252PTR_T
1253malloc (size)
1254     size_t size;
1255{
1256  return internal_malloc (size, (char *)NULL, 0, 0);
1257}
1258
1259PTR_T
1260realloc (mem, nbytes)
1261     PTR_T mem;
1262     size_t nbytes;
1263{
1264  return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1265}
1266
1267void
1268free (mem)
1269     PTR_T mem;
1270{
1271  internal_free (mem,  (char *)NULL, 0, 0);
1272}
1273
1274PTR_T
1275memalign (alignment, size)
1276     size_t alignment;
1277     size_t size;
1278{
1279  return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1280}
1281
1282#ifndef NO_VALLOC
1283PTR_T
1284valloc (size)
1285     size_t size;
1286{
1287  return internal_valloc (size, (char *)NULL, 0, 0);
1288}
1289#endif
1290
1291#ifndef NO_CALLOC
1292PTR_T
1293calloc (n, s)
1294     size_t n, s;
1295{
1296  return internal_calloc (n, s, (char *)NULL, 0, 0);
1297}
1298
1299void
1300cfree (mem)
1301     PTR_T mem;
1302{
1303  internal_cfree (mem, (char *)NULL, 0, 0);
1304}
1305#endif
1306