1/*
2 * tclAlloc.c --
3 *
4 *	This is a very fast storage allocator.  It allocates blocks of a
5 *	small number of different sizes, and keeps free lists of each size.
6 *	Blocks that don't exactly fit are passed up to the next larger size.
7 *	Blocks over a certain size are directly allocated from the system.
8 *
9 * Copyright (c) 1983 Regents of the University of California.
10 * Copyright (c) 1996-1997 Sun Microsystems, Inc.
11 * Copyright (c) 1998-1999 by Scriptics Corporation.
12 *
13 * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
14 *
15 * See the file "license.terms" for information on usage and redistribution
16 * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
17 *
18 * RCS: @(#) $Id: tclAlloc.c,v 1.16.2.3 2007/09/22 15:46:45 das Exp $
19 */
20
21/*
22 * Windows and Unix use an alternative allocator when building with threads
23 * that has significantly reduced lock contention.
24 */
25
26#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC) || defined(TCL_MEM_DEBUG)
27
28#include "tclInt.h"
29#include "tclPort.h"
30
31#if USE_TCLALLOC
32
33#ifdef TCL_DEBUG
34#   define DEBUG
35/* #define MSTATS */
36#   define RCHECK
37#endif
38
39/*
40 * We should really make use of AC_CHECK_TYPE(caddr_t)
41 * here, but it can wait until Tcl uses config.h properly.
42 */
43#if defined(MAC_TCL) || defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
44typedef unsigned long caddr_t;
45#endif
46
47/*
48 * Alignment for allocated memory.
49 */
50
51#if defined(__APPLE__)
52#define ALLOCALIGN	16
53#else
54#define ALLOCALIGN	8
55#endif
56
57/*
58 * The overhead on a block is at least 8 bytes.  When free, this space
59 * contains a pointer to the next free block, and the bottom two bits must
60 * be zero.  When in use, the first byte is set to MAGIC, and the second
61 * byte is the size index.  The remaining bytes are for alignment.
62 * If range checking is enabled then a second word holds the size of the
63 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
64 * The order of elements is critical: ov_magic must overlay the low order
65 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
66 */
67
68union overhead {
69    union overhead *ov_next;		/* when free */
70    unsigned char ov_padding[ALLOCALIGN];/* align struct to ALLOCALIGN bytes */
71    struct {
72	unsigned char	ovu_magic0;	/* magic number */
73	unsigned char	ovu_index;	/* bucket # */
74	unsigned char	ovu_unused;	/* unused */
75	unsigned char	ovu_magic1;	/* other magic number */
76#ifdef RCHECK
77	unsigned short	ovu_rmagic;	/* range magic number */
78	unsigned long	ovu_size;	/* actual block size */
79	unsigned short  ovu_unused2;    /* padding to 8-byte align */
80#endif
81    } ovu;
82#define ov_magic0	ovu.ovu_magic0
83#define ov_magic1	ovu.ovu_magic1
84#define ov_index	ovu.ovu_index
85#define ov_rmagic	ovu.ovu_rmagic
86#define ov_size		ovu.ovu_size
87};
88
89
90#define MAGIC		0xef		/* magic # on accounting info */
91#define RMAGIC		0x5555		/* magic # on range info */
92
93#ifdef RCHECK
94#define	RSLOP		sizeof (unsigned short)
95#else
96#define	RSLOP		0
97#endif
98
99#define OVERHEAD (sizeof(union overhead) + RSLOP)
100
101/*
102 * nextf[i] is the pointer to the next free block of size 2^(i+3).  The
103 * smallest allocatable block is MINBLOCK bytes. The overhead information
104 * precedes the data area returned to the user.
105 */
106
107#define MINBLOCK	((sizeof(union overhead) + (ALLOCALIGN-1)) & ~(ALLOCALIGN-1))
108#define NBUCKETS	(13 - (MINBLOCK >> 4))
109#define MAXMALLOC	(1<<(NBUCKETS+2))
110static	union overhead *nextf[NBUCKETS];
111
112/*
113 * The following structure is used to keep track of all system memory
114 * currently owned by Tcl.  When finalizing, all this memory will
115 * be returned to the system.
116 */
117
118struct block {
119    struct block *nextPtr;	/* Linked list. */
120    struct block *prevPtr;	/* Linked list for big blocks, ensures 8-byte
121				 * alignment for suballocated blocks. */
122};
123
124static struct block *blockList;		/* Tracks the suballocated blocks. */
125static struct block bigBlocks = {	/* Big blocks aren't suballocated. */
126    &bigBlocks, &bigBlocks
127};
128
129/*
130 * The allocator is protected by a special mutex that must be
131 * explicitly initialized.  Futhermore, because Tcl_Alloc may be
132 * used before anything else in Tcl, we make this module self-initializing
133 * after all with the allocInit variable.
134 */
135
136#ifdef TCL_THREADS
137static Tcl_Mutex *allocMutexPtr;
138#endif
139static int allocInit = 0;
140
141
142#ifdef MSTATS
143
144/*
145 * nmalloc[i] is the difference between the number of mallocs and frees
146 * for a given block size.
147 */
148
149static	unsigned int nmalloc[NBUCKETS+1];
150#include <stdio.h>
151#endif
152
153#if defined(DEBUG) || defined(RCHECK)
154#define	ASSERT(p)   if (!(p)) panic(# p)
155#define RANGE_ASSERT(p) if (!(p)) panic(# p)
156#else
157#define	ASSERT(p)
158#define RANGE_ASSERT(p)
159#endif
160
161/*
162 * Prototypes for functions used only in this file.
163 */
164
165static void 		MoreCore _ANSI_ARGS_((int bucket));
166
167
168/*
169 *-------------------------------------------------------------------------
170 *
171 * TclInitAlloc --
172 *
173 *	Initialize the memory system.
174 *
175 * Results:
176 *	None.
177 *
178 * Side effects:
179 *	Initialize the mutex used to serialize allocations.
180 *
181 *-------------------------------------------------------------------------
182 */
183
184void
185TclInitAlloc()
186{
187    if (!allocInit) {
188	allocInit = 1;
189#ifdef TCL_THREADS
190	allocMutexPtr = Tcl_GetAllocMutex();
191#endif
192    }
193}
194
195/*
196 *-------------------------------------------------------------------------
197 *
198 * TclFinalizeAllocSubsystem --
199 *
200 *	Release all resources being used by this subsystem, including
201 *	aggressively freeing all memory allocated by TclpAlloc() that
202 *	has not yet been released with TclpFree().
203 *
204 *	After this function is called, all memory allocated with
205 *	TclpAlloc() should be considered unusable.
206 *
207 * Results:
208 *	None.
209 *
210 * Side effects:
211 *	This subsystem is self-initializing, since memory can be
212 *	allocated before Tcl is formally initialized.  After this call,
213 *	this subsystem has been reset to its initial state and is
214 *	usable again.
215 *
216 *-------------------------------------------------------------------------
217 */
218
219void
220TclFinalizeAllocSubsystem()
221{
222    unsigned int i;
223    struct block *blockPtr, *nextPtr;
224
225    Tcl_MutexLock(allocMutexPtr);
226    for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
227	nextPtr = blockPtr->nextPtr;
228	TclpSysFree(blockPtr);
229    }
230    blockList = NULL;
231
232    for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
233	nextPtr = blockPtr->nextPtr;
234	TclpSysFree(blockPtr);
235	blockPtr = nextPtr;
236    }
237    bigBlocks.nextPtr = &bigBlocks;
238    bigBlocks.prevPtr = &bigBlocks;
239
240    for (i = 0; i < NBUCKETS; i++) {
241	nextf[i] = NULL;
242#ifdef MSTATS
243	nmalloc[i] = 0;
244#endif
245    }
246#ifdef MSTATS
247    nmalloc[i] = 0;
248#endif
249    Tcl_MutexUnlock(allocMutexPtr);
250}
251
252/*
253 *----------------------------------------------------------------------
254 *
255 * TclpAlloc --
256 *
257 *	Allocate more memory.
258 *
259 * Results:
260 *	None.
261 *
262 * Side effects:
263 *	None.
264 *
265 *----------------------------------------------------------------------
266 */
267
268char *
269TclpAlloc(nbytes)
270    unsigned int nbytes;	/* Number of bytes to allocate. */
271{
272    register union overhead *op;
273    register long bucket;
274    register unsigned amt;
275    struct block *bigBlockPtr = NULL;
276
277    if (!allocInit) {
278	/*
279	 * We have to make the "self initializing" because Tcl_Alloc
280	 * may be used before any other part of Tcl.  E.g., see
281	 * main() for tclsh!
282	 */
283	TclInitAlloc();
284    }
285    Tcl_MutexLock(allocMutexPtr);
286    /*
287     * First the simple case: we simple allocate big blocks directly
288     */
289    if (nbytes >= MAXMALLOC - OVERHEAD) {
290	if (nbytes <= UINT_MAX - OVERHEAD - sizeof(struct block)) {
291	    bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
292		    (sizeof(struct block) + OVERHEAD + nbytes), 0);
293	}
294	if (bigBlockPtr == NULL) {
295	    Tcl_MutexUnlock(allocMutexPtr);
296	    return NULL;
297	}
298	bigBlockPtr->nextPtr = bigBlocks.nextPtr;
299	bigBlocks.nextPtr = bigBlockPtr;
300	bigBlockPtr->prevPtr = &bigBlocks;
301	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
302
303	op = (union overhead *) (bigBlockPtr + 1);
304	op->ov_magic0 = op->ov_magic1 = MAGIC;
305	op->ov_index = 0xff;
306#ifdef MSTATS
307	nmalloc[NBUCKETS]++;
308#endif
309#ifdef RCHECK
310	/*
311	 * Record allocated size of block and
312	 * bound space with magic numbers.
313	 */
314	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
315	op->ov_rmagic = RMAGIC;
316	*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
317#endif
318	Tcl_MutexUnlock(allocMutexPtr);
319	return (void *)(op+1);
320    }
321    /*
322     * Convert amount of memory requested into closest block size
323     * stored in hash buckets which satisfies request.
324     * Account for space used per block for accounting.
325     */
326
327    amt = MINBLOCK;		/* size of first bucket */
328    bucket = MINBLOCK >> 4;
329
330    while (nbytes + OVERHEAD > amt) {
331	amt <<= 1;
332	if (amt == 0) {
333	    Tcl_MutexUnlock(allocMutexPtr);
334	    return (NULL);
335	}
336	bucket++;
337    }
338    ASSERT( bucket < NBUCKETS );
339
340    /*
341     * If nothing in hash bucket right now,
342     * request more memory from the system.
343     */
344    if ((op = nextf[bucket]) == NULL) {
345	MoreCore(bucket);
346	if ((op = nextf[bucket]) == NULL) {
347	    Tcl_MutexUnlock(allocMutexPtr);
348	    return (NULL);
349	}
350    }
351    /*
352     * Remove from linked list
353     */
354    nextf[bucket] = op->ov_next;
355    op->ov_magic0 = op->ov_magic1 = MAGIC;
356    op->ov_index = (unsigned char) bucket;
357#ifdef MSTATS
358    nmalloc[bucket]++;
359#endif
360#ifdef RCHECK
361    /*
362     * Record allocated size of block and
363     * bound space with magic numbers.
364     */
365    op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
366    op->ov_rmagic = RMAGIC;
367    *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
368#endif
369    Tcl_MutexUnlock(allocMutexPtr);
370    return ((char *)(op + 1));
371}
372
373/*
374 *----------------------------------------------------------------------
375 *
376 * MoreCore --
377 *
378 *	Allocate more memory to the indicated bucket.
379 *
380 *	Assumes Mutex is already held.
381 *
382 * Results:
383 *	None.
384 *
385 * Side effects:
386 *	Attempts to get more memory from the system.
387 *
388 *----------------------------------------------------------------------
389 */
390
391static void
392MoreCore(bucket)
393    int bucket;		/* What bucket to allocat to. */
394{
395    register union overhead *op;
396    register long sz;		/* size of desired block */
397    long amt;			/* amount to allocate */
398    int nblks;			/* how many blocks we get */
399    struct block *blockPtr;
400
401    /*
402     * sbrk_size <= 0 only for big, FLUFFY, requests (about
403     * 2^30 bytes on a VAX, I think) or for a negative arg.
404     */
405    sz = 1 << (bucket + 3);
406    ASSERT(sz > 0);
407
408    amt = MAXMALLOC;
409    nblks = amt / sz;
410    ASSERT(nblks*sz == amt);
411
412    blockPtr = (struct block *) TclpSysAlloc((unsigned)
413	    (sizeof(struct block) + amt), 1);
414    /* no more room! */
415    if (blockPtr == NULL) {
416	return;
417    }
418    blockPtr->nextPtr = blockList;
419    blockList = blockPtr;
420
421    op = (union overhead *) (blockPtr + 1);
422
423    /*
424     * Add new memory allocated to that on
425     * free list for this hash bucket.
426     */
427    nextf[bucket] = op;
428    while (--nblks > 0) {
429	op->ov_next = (union overhead *)((caddr_t)op + sz);
430	op = (union overhead *)((caddr_t)op + sz);
431    }
432    op->ov_next = (union overhead *)NULL;
433}
434
435/*
436 *----------------------------------------------------------------------
437 *
438 * TclpFree --
439 *
440 *	Free memory.
441 *
442 * Results:
443 *	None.
444 *
445 * Side effects:
446 *	None.
447 *
448 *----------------------------------------------------------------------
449 */
450
451void
452TclpFree(cp)
453    char *cp;		/* Pointer to memory to free. */
454{
455    register long size;
456    register union overhead *op;
457    struct block *bigBlockPtr;
458
459    if (cp == NULL) {
460	return;
461    }
462
463    Tcl_MutexLock(allocMutexPtr);
464    op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
465
466    ASSERT(op->ov_magic0 == MAGIC);		/* make sure it was in use */
467    ASSERT(op->ov_magic1 == MAGIC);
468    if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
469	Tcl_MutexUnlock(allocMutexPtr);
470	return;
471    }
472
473    RANGE_ASSERT(op->ov_rmagic == RMAGIC);
474    RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
475    size = op->ov_index;
476    if ( size == 0xff ) {
477#ifdef MSTATS
478	nmalloc[NBUCKETS]--;
479#endif
480	bigBlockPtr = (struct block *) op - 1;
481	bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
482	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
483	TclpSysFree(bigBlockPtr);
484	Tcl_MutexUnlock(allocMutexPtr);
485	return;
486    }
487    ASSERT(size < NBUCKETS);
488    op->ov_next = nextf[size];	/* also clobbers ov_magic */
489    nextf[size] = op;
490#ifdef MSTATS
491    nmalloc[size]--;
492#endif
493    Tcl_MutexUnlock(allocMutexPtr);
494}
495
496/*
497 *----------------------------------------------------------------------
498 *
499 * TclpRealloc --
500 *
501 *	Reallocate memory.
502 *
503 * Results:
504 *	None.
505 *
506 * Side effects:
507 *	None.
508 *
509 *----------------------------------------------------------------------
510 */
511
512char *
513TclpRealloc(cp, nbytes)
514    char *cp;			/* Pointer to alloced block. */
515    unsigned int nbytes;	/* New size of memory. */
516{
517    int i;
518    union overhead *op;
519    struct block *bigBlockPtr;
520    int expensive;
521    unsigned long maxsize;
522
523    if (cp == NULL) {
524	return (TclpAlloc(nbytes));
525    }
526
527    Tcl_MutexLock(allocMutexPtr);
528
529    op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
530
531    ASSERT(op->ov_magic0 == MAGIC);		/* make sure it was in use */
532    ASSERT(op->ov_magic1 == MAGIC);
533    if (op->ov_magic0 != MAGIC || op->ov_magic1 != MAGIC) {
534	Tcl_MutexUnlock(allocMutexPtr);
535	return NULL;
536    }
537
538    RANGE_ASSERT(op->ov_rmagic == RMAGIC);
539    RANGE_ASSERT(*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
540    i = op->ov_index;
541
542    /*
543     * If the block isn't in a bin, just realloc it.
544     */
545
546    if (i == 0xff) {
547	struct block *prevPtr, *nextPtr;
548	bigBlockPtr = (struct block *) op - 1;
549	prevPtr = bigBlockPtr->prevPtr;
550	nextPtr = bigBlockPtr->nextPtr;
551	bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
552		sizeof(struct block) + OVERHEAD + nbytes);
553	if (bigBlockPtr == NULL) {
554	    Tcl_MutexUnlock(allocMutexPtr);
555	    return NULL;
556	}
557
558	if (prevPtr->nextPtr != bigBlockPtr) {
559	    /*
560	     * If the block has moved, splice the new block into the list where
561	     * the old block used to be.
562	     */
563
564	    prevPtr->nextPtr = bigBlockPtr;
565	    nextPtr->prevPtr = bigBlockPtr;
566	}
567
568	op = (union overhead *) (bigBlockPtr + 1);
569#ifdef MSTATS
570	nmalloc[NBUCKETS]++;
571#endif
572#ifdef RCHECK
573	/*
574	 * Record allocated size of block and update magic number bounds.
575	 */
576
577	op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
578	*(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
579#endif
580	Tcl_MutexUnlock(allocMutexPtr);
581	return (char *)(op+1);
582    }
583    maxsize = 1 << (i+3);
584    expensive = 0;
585    if ( nbytes + OVERHEAD > maxsize ) {
586	expensive = 1;
587    } else if ( i > 0 && nbytes + OVERHEAD < (maxsize/2) ) {
588	expensive = 1;
589    }
590
591    if (expensive) {
592	void *newp;
593
594	Tcl_MutexUnlock(allocMutexPtr);
595
596	newp = TclpAlloc(nbytes);
597	if ( newp == NULL ) {
598	    return NULL;
599	}
600	maxsize -= OVERHEAD;
601	if ( maxsize < nbytes )
602	    nbytes = maxsize;
603	memcpy((VOID *) newp, (VOID *) cp, (size_t) nbytes);
604	TclpFree(cp);
605	return newp;
606    }
607
608    /*
609     * Ok, we don't have to copy, it fits as-is
610     */
611#ifdef RCHECK
612    op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
613    *(unsigned short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
614#endif
615    Tcl_MutexUnlock(allocMutexPtr);
616    return(cp);
617}
618
619/*
620 *----------------------------------------------------------------------
621 *
622 * mstats --
623 *
624 *	Prints two lines of numbers, one showing the length of the
625 *	free list for each size category, the second showing the
626 *	number of mallocs - frees for each size category.
627 *
628 * Results:
629 *	None.
630 *
631 * Side effects:
632 *	None.
633 *
634 *----------------------------------------------------------------------
635 */
636
637#ifdef MSTATS
638void
639mstats(s)
640    char *s;	/* Where to write info. */
641{
642    register int i, j;
643    register union overhead *p;
644    int totfree = 0,
645	totused = 0;
646
647    Tcl_MutexLock(allocMutexPtr);
648    fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
649    for (i = 0; i < NBUCKETS; i++) {
650	for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
651	    fprintf(stderr, " %d", j);
652	totfree += j * (1 << (i + 3));
653    }
654    fprintf(stderr, "\nused:\t");
655    for (i = 0; i < NBUCKETS; i++) {
656	fprintf(stderr, " %d", nmalloc[i]);
657	totused += nmalloc[i] * (1 << (i + 3));
658    }
659    fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
660	    totused, totfree);
661    fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
662	    MAXMALLOC, nmalloc[NBUCKETS]);
663    Tcl_MutexUnlock(allocMutexPtr);
664}
665#endif
666
667#else  /* !USE_TCLALLOC */
668
669/*
670 *----------------------------------------------------------------------
671 *
672 * TclpAlloc --
673 *
674 *	Allocate more memory.
675 *
676 * Results:
677 *	None.
678 *
679 * Side effects:
680 *	None.
681 *
682 *----------------------------------------------------------------------
683 */
684
685char *
686TclpAlloc(nbytes)
687    unsigned int nbytes;	/* Number of bytes to allocate. */
688{
689    return (char*) malloc(nbytes);
690}
691
692/*
693 *----------------------------------------------------------------------
694 *
695 * TclpFree --
696 *
697 *	Free memory.
698 *
699 * Results:
700 *	None.
701 *
702 * Side effects:
703 *	None.
704 *
705 *----------------------------------------------------------------------
706 */
707
708void
709TclpFree(cp)
710    char *cp;		/* Pointer to memory to free. */
711{
712    free(cp);
713    return;
714}
715
716/*
717 *----------------------------------------------------------------------
718 *
719 * TclpRealloc --
720 *
721 *	Reallocate memory.
722 *
723 * Results:
724 *	None.
725 *
726 * Side effects:
727 *	None.
728 *
729 *----------------------------------------------------------------------
730 */
731
732char *
733TclpRealloc(cp, nbytes)
734    char *cp;			/* Pointer to alloced block. */
735    unsigned int nbytes;	/* New size of memory. */
736{
737    return (char*) realloc(cp, nbytes);
738}
739
740#endif /* !USE_TCLALLOC */
741#endif /* !TCL_THREADS */
742