1/*
2 * tclAlloc.c --
3 *
4 *	This is a very fast storage allocator. It allocates blocks of a small
5 *	number of different sizes, and keeps free lists of each size. Blocks
6 *	that don't exactly fit are passed up to the next larger size. Blocks
7 *	over a certain size are directly allocated from the system.
8 *
9 * Copyright (c) 1983 Regents of the University of California.
10 * Copyright (c) 1996-1997 Sun Microsystems, Inc.
11 * Copyright (c) 1998-1999 by Scriptics Corporation.
12 *
13 * Portions contributed by Chris Kingsley, Jack Jansen and Ray Johnson.
14 *
15 * See the file "license.terms" for information on usage and redistribution of
16 * this file, and for a DISCLAIMER OF ALL WARRANTIES.
17 *
18 * RCS: @(#) $Id: tclAlloc.c,v 1.27.2.1 2009/09/29 04:43:58 dgp Exp $
19 */
20
21/*
22 * Windows and Unix use an alternative allocator when building with threads
23 * that has significantly reduced lock contention.
24 */
25
26#include "tclInt.h"
27#if !defined(TCL_THREADS) || !defined(USE_THREAD_ALLOC)
28
29#if USE_TCLALLOC
30
31#ifdef TCL_DEBUG
32#   define DEBUG
33/* #define MSTATS */
34#   define RCHECK
35#endif
36
37/*
38 * We should really make use of AC_CHECK_TYPE(caddr_t) here, but it can wait
39 * until Tcl uses config.h properly.
40 */
41
42#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__BORLANDC__)
43typedef unsigned long caddr_t;
44#endif
45
46/*
47 * The overhead on a block is at least 8 bytes. When free, this space contains
48 * a pointer to the next free block, and the bottom two bits must be zero.
49 * When in use, the first byte is set to MAGIC, and the second byte is the
50 * size index. The remaining bytes are for alignment. If range checking is
51 * enabled then a second word holds the size of the requested block, less 1,
52 * rounded up to a multiple of sizeof(RMAGIC). The order of elements is
53 * critical: ov.magic must overlay the low order bits of ov.next, and ov.magic
54 * can not be a valid ov.next bit pattern.
55 */
56
57union overhead {
58    union overhead *next;		/* when free */
59    unsigned char padding[TCL_ALLOCALIGN];	/* align struct to TCL_ALLOCALIGN bytes */
60    struct {
61	unsigned char magic0;		/* magic number */
62	unsigned char index;		/* bucket # */
63	unsigned char unused;		/* unused */
64	unsigned char magic1;		/* other magic number */
65#ifdef RCHECK
66	unsigned short rmagic;		/* range magic number */
67	unsigned long size;		/* actual block size */
68	unsigned short unused2;		/* padding to 8-byte align */
69#endif
70    } ovu;
71#define overMagic0	ovu.magic0
72#define overMagic1	ovu.magic1
73#define bucketIndex	ovu.index
74#define rangeCheckMagic	ovu.rmagic
75#define realBlockSize	ovu.size
76};
77
78
79#define MAGIC		0xef	/* magic # on accounting info */
80#define RMAGIC		0x5555	/* magic # on range info */
81
82#ifdef RCHECK
83#define	RSLOP		sizeof (unsigned short)
84#else
85#define	RSLOP		0
86#endif
87
88#define OVERHEAD (sizeof(union overhead) + RSLOP)
89
90/*
91 * Macro to make it easier to refer to the end-of-block guard magic.
92 */
93
94#define BLOCK_END(overPtr) \
95    (*(unsigned short *)((caddr_t)((overPtr) + 1) + (overPtr)->realBlockSize))
96
97/*
98 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
99 * smallest allocatable block is MINBLOCK bytes. The overhead information
100 * precedes the data area returned to the user.
101 */
102
103#define MINBLOCK	((sizeof(union overhead) + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
104#define NBUCKETS	(13 - (MINBLOCK >> 4))
105#define MAXMALLOC	(1<<(NBUCKETS+2))
106static union overhead *nextf[NBUCKETS];
107
108/*
109 * The following structure is used to keep track of all system memory
110 * currently owned by Tcl. When finalizing, all this memory will be returned
111 * to the system.
112 */
113
114struct block {
115    struct block *nextPtr;	/* Linked list. */
116    struct block *prevPtr;	/* Linked list for big blocks, ensures 8-byte
117				 * alignment for suballocated blocks. */
118};
119
120static struct block *blockList;	/* Tracks the suballocated blocks. */
121static struct block bigBlocks={	/* Big blocks aren't suballocated. */
122    &bigBlocks, &bigBlocks
123};
124
125/*
126 * The allocator is protected by a special mutex that must be explicitly
127 * initialized. Futhermore, because Tcl_Alloc may be used before anything else
128 * in Tcl, we make this module self-initializing after all with the allocInit
129 * variable.
130 */
131
132#ifdef TCL_THREADS
133static Tcl_Mutex *allocMutexPtr;
134#endif
135static int allocInit = 0;
136
137#ifdef MSTATS
138
139/*
140 * numMallocs[i] is the difference between the number of mallocs and frees for
141 * a given block size.
142 */
143
144static	unsigned int numMallocs[NBUCKETS+1];
145#include <stdio.h>
146#endif
147
148#if defined(DEBUG) || defined(RCHECK)
149#define	ASSERT(p)	if (!(p)) Tcl_Panic(# p)
150#define RANGE_ASSERT(p) if (!(p)) Tcl_Panic(# p)
151#else
152#define	ASSERT(p)
153#define RANGE_ASSERT(p)
154#endif
155
156/*
157 * Prototypes for functions used only in this file.
158 */
159
160static void 		MoreCore(int bucket);
161
162/*
163 *-------------------------------------------------------------------------
164 *
165 * TclInitAlloc --
166 *
167 *	Initialize the memory system.
168 *
169 * Results:
170 *	None.
171 *
172 * Side effects:
173 *	Initialize the mutex used to serialize allocations.
174 *
175 *-------------------------------------------------------------------------
176 */
177
178void
179TclInitAlloc(void)
180{
181    if (!allocInit) {
182	allocInit = 1;
183#ifdef TCL_THREADS
184	allocMutexPtr = Tcl_GetAllocMutex();
185#endif
186    }
187}
188
189/*
190 *-------------------------------------------------------------------------
191 *
192 * TclFinalizeAllocSubsystem --
193 *
194 *	Release all resources being used by this subsystem, including
195 *	aggressively freeing all memory allocated by TclpAlloc() that has not
196 *	yet been released with TclpFree().
197 *
198 *	After this function is called, all memory allocated with TclpAlloc()
199 *	should be considered unusable.
200 *
201 * Results:
202 *	None.
203 *
204 * Side effects:
205 *	This subsystem is self-initializing, since memory can be allocated
206 *	before Tcl is formally initialized. After this call, this subsystem
207 *	has been reset to its initial state and is usable again.
208 *
209 *-------------------------------------------------------------------------
210 */
211
212void
213TclFinalizeAllocSubsystem(void)
214{
215    unsigned int i;
216    struct block *blockPtr, *nextPtr;
217
218    Tcl_MutexLock(allocMutexPtr);
219    for (blockPtr = blockList; blockPtr != NULL; blockPtr = nextPtr) {
220	nextPtr = blockPtr->nextPtr;
221	TclpSysFree(blockPtr);
222    }
223    blockList = NULL;
224
225    for (blockPtr = bigBlocks.nextPtr; blockPtr != &bigBlocks; ) {
226	nextPtr = blockPtr->nextPtr;
227	TclpSysFree(blockPtr);
228	blockPtr = nextPtr;
229    }
230    bigBlocks.nextPtr = &bigBlocks;
231    bigBlocks.prevPtr = &bigBlocks;
232
233    for (i=0 ; i<NBUCKETS ; i++) {
234	nextf[i] = NULL;
235#ifdef MSTATS
236	numMallocs[i] = 0;
237#endif
238    }
239#ifdef MSTATS
240    numMallocs[i] = 0;
241#endif
242    Tcl_MutexUnlock(allocMutexPtr);
243}
244
245/*
246 *----------------------------------------------------------------------
247 *
248 * TclpAlloc --
249 *
250 *	Allocate more memory.
251 *
252 * Results:
253 *	None.
254 *
255 * Side effects:
256 *	None.
257 *
258 *----------------------------------------------------------------------
259 */
260
261char *
262TclpAlloc(
263    unsigned int numBytes)	/* Number of bytes to allocate. */
264{
265    register union overhead *overPtr;
266    register long bucket;
267    register unsigned amount;
268    struct block *bigBlockPtr = NULL;
269
270    if (!allocInit) {
271	/*
272	 * We have to make the "self initializing" because Tcl_Alloc may be
273	 * used before any other part of Tcl. E.g., see main() for tclsh!
274	 */
275
276	TclInitAlloc();
277    }
278    Tcl_MutexLock(allocMutexPtr);
279
280    /*
281     * First the simple case: we simple allocate big blocks directly.
282     */
283
284    if (numBytes >= MAXMALLOC - OVERHEAD) {
285	if (numBytes <= UINT_MAX - OVERHEAD -sizeof(struct block)) {
286	    bigBlockPtr = (struct block *) TclpSysAlloc((unsigned)
287		    (sizeof(struct block) + OVERHEAD + numBytes), 0);
288	}
289	if (bigBlockPtr == NULL) {
290	    Tcl_MutexUnlock(allocMutexPtr);
291	    return NULL;
292	}
293	bigBlockPtr->nextPtr = bigBlocks.nextPtr;
294	bigBlocks.nextPtr = bigBlockPtr;
295	bigBlockPtr->prevPtr = &bigBlocks;
296	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr;
297
298	overPtr = (union overhead *) (bigBlockPtr + 1);
299	overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
300	overPtr->bucketIndex = 0xff;
301#ifdef MSTATS
302	numMallocs[NBUCKETS]++;
303#endif
304
305#ifdef RCHECK
306	/*
307	 * Record allocated size of block and bound space with magic numbers.
308	 */
309
310	overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
311	overPtr->rangeCheckMagic = RMAGIC;
312	BLOCK_END(overPtr) = RMAGIC;
313#endif
314
315	Tcl_MutexUnlock(allocMutexPtr);
316	return (void *)(overPtr+1);
317    }
318
319    /*
320     * Convert amount of memory requested into closest block size stored in
321     * hash buckets which satisfies request. Account for space used per block
322     * for accounting.
323     */
324
325    amount = MINBLOCK;		/* size of first bucket */
326    bucket = MINBLOCK >> 4;
327
328    while (numBytes + OVERHEAD > amount) {
329	amount <<= 1;
330	if (amount == 0) {
331	    Tcl_MutexUnlock(allocMutexPtr);
332	    return NULL;
333	}
334	bucket++;
335    }
336    ASSERT(bucket < NBUCKETS);
337
338    /*
339     * If nothing in hash bucket right now, request more memory from the
340     * system.
341     */
342
343    if ((overPtr = nextf[bucket]) == NULL) {
344	MoreCore(bucket);
345	if ((overPtr = nextf[bucket]) == NULL) {
346	    Tcl_MutexUnlock(allocMutexPtr);
347	    return NULL;
348	}
349    }
350
351    /*
352     * Remove from linked list
353     */
354
355    nextf[bucket] = overPtr->next;
356    overPtr->overMagic0 = overPtr->overMagic1 = MAGIC;
357    overPtr->bucketIndex = (unsigned char) bucket;
358
359#ifdef MSTATS
360    numMallocs[bucket]++;
361#endif
362
363#ifdef RCHECK
364    /*
365     * Record allocated size of block and bound space with magic numbers.
366     */
367
368    overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
369    overPtr->rangeCheckMagic = RMAGIC;
370    BLOCK_END(overPtr) = RMAGIC;
371#endif
372
373    Tcl_MutexUnlock(allocMutexPtr);
374    return ((char *)(overPtr + 1));
375}
376
377/*
378 *----------------------------------------------------------------------
379 *
380 * MoreCore --
381 *
382 *	Allocate more memory to the indicated bucket.
383 *
384 *	Assumes Mutex is already held.
385 *
386 * Results:
387 *	None.
388 *
389 * Side effects:
390 *	Attempts to get more memory from the system.
391 *
392 *----------------------------------------------------------------------
393 */
394
395static void
396MoreCore(
397    int bucket)			/* What bucket to allocat to. */
398{
399    register union overhead *overPtr;
400    register long size;		/* size of desired block */
401    long amount;		/* amount to allocate */
402    int numBlocks;		/* how many blocks we get */
403    struct block *blockPtr;
404
405    /*
406     * sbrk_size <= 0 only for big, FLUFFY, requests (about 2^30 bytes on a
407     * VAX, I think) or for a negative arg.
408     */
409
410    size = 1 << (bucket + 3);
411    ASSERT(size > 0);
412
413    amount = MAXMALLOC;
414    numBlocks = amount / size;
415    ASSERT(numBlocks*size == amount);
416
417    blockPtr = (struct block *) TclpSysAlloc((unsigned)
418	    (sizeof(struct block) + amount), 1);
419    /* no more room! */
420    if (blockPtr == NULL) {
421	return;
422    }
423    blockPtr->nextPtr = blockList;
424    blockList = blockPtr;
425
426    overPtr = (union overhead *) (blockPtr + 1);
427
428    /*
429     * Add new memory allocated to that on free list for this hash bucket.
430     */
431
432    nextf[bucket] = overPtr;
433    while (--numBlocks > 0) {
434	overPtr->next = (union overhead *)((caddr_t)overPtr + size);
435	overPtr = (union overhead *)((caddr_t)overPtr + size);
436    }
437    overPtr->next = NULL;
438}
439
440/*
441 *----------------------------------------------------------------------
442 *
443 * TclpFree --
444 *
445 *	Free memory.
446 *
447 * Results:
448 *	None.
449 *
450 * Side effects:
451 *	None.
452 *
453 *----------------------------------------------------------------------
454 */
455
456void
457TclpFree(
458    char *oldPtr)		/* Pointer to memory to free. */
459{
460    register long size;
461    register union overhead *overPtr;
462    struct block *bigBlockPtr;
463
464    if (oldPtr == NULL) {
465	return;
466    }
467
468    Tcl_MutexLock(allocMutexPtr);
469    overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
470
471    ASSERT(overPtr->overMagic0 == MAGIC);	/* make sure it was in use */
472    ASSERT(overPtr->overMagic1 == MAGIC);
473    if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
474	Tcl_MutexUnlock(allocMutexPtr);
475	return;
476    }
477
478    RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
479    RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
480    size = overPtr->bucketIndex;
481    if (size == 0xff) {
482#ifdef MSTATS
483	numMallocs[NBUCKETS]--;
484#endif
485
486	bigBlockPtr = (struct block *) overPtr - 1;
487	bigBlockPtr->prevPtr->nextPtr = bigBlockPtr->nextPtr;
488	bigBlockPtr->nextPtr->prevPtr = bigBlockPtr->prevPtr;
489	TclpSysFree(bigBlockPtr);
490
491	Tcl_MutexUnlock(allocMutexPtr);
492	return;
493    }
494    ASSERT(size < NBUCKETS);
495    overPtr->next = nextf[size];	/* also clobbers overMagic */
496    nextf[size] = overPtr;
497
498#ifdef MSTATS
499    numMallocs[size]--;
500#endif
501
502    Tcl_MutexUnlock(allocMutexPtr);
503}
504
505/*
506 *----------------------------------------------------------------------
507 *
508 * TclpRealloc --
509 *
510 *	Reallocate memory.
511 *
512 * Results:
513 *	None.
514 *
515 * Side effects:
516 *	None.
517 *
518 *----------------------------------------------------------------------
519 */
520
521char *
522TclpRealloc(
523    char *oldPtr,		/* Pointer to alloced block. */
524    unsigned int numBytes)	/* New size of memory. */
525{
526    int i;
527    union overhead *overPtr;
528    struct block *bigBlockPtr;
529    int expensive;
530    unsigned long maxSize;
531
532    if (oldPtr == NULL) {
533	return TclpAlloc(numBytes);
534    }
535
536    Tcl_MutexLock(allocMutexPtr);
537
538    overPtr = (union overhead *)((caddr_t)oldPtr - sizeof (union overhead));
539
540    ASSERT(overPtr->overMagic0 == MAGIC);	/* make sure it was in use */
541    ASSERT(overPtr->overMagic1 == MAGIC);
542    if (overPtr->overMagic0 != MAGIC || overPtr->overMagic1 != MAGIC) {
543	Tcl_MutexUnlock(allocMutexPtr);
544	return NULL;
545    }
546
547    RANGE_ASSERT(overPtr->rangeCheckMagic == RMAGIC);
548    RANGE_ASSERT(BLOCK_END(overPtr) == RMAGIC);
549    i = overPtr->bucketIndex;
550
551    /*
552     * If the block isn't in a bin, just realloc it.
553     */
554
555    if (i == 0xff) {
556	struct block *prevPtr, *nextPtr;
557	bigBlockPtr = (struct block *) overPtr - 1;
558	prevPtr = bigBlockPtr->prevPtr;
559	nextPtr = bigBlockPtr->nextPtr;
560	bigBlockPtr = (struct block *) TclpSysRealloc(bigBlockPtr,
561		sizeof(struct block) + OVERHEAD + numBytes);
562	if (bigBlockPtr == NULL) {
563	    Tcl_MutexUnlock(allocMutexPtr);
564	    return NULL;
565	}
566
567	if (prevPtr->nextPtr != bigBlockPtr) {
568	    /*
569	     * If the block has moved, splice the new block into the list
570	     * where the old block used to be.
571	     */
572
573	    prevPtr->nextPtr = bigBlockPtr;
574	    nextPtr->prevPtr = bigBlockPtr;
575	}
576
577	overPtr = (union overhead *) (bigBlockPtr + 1);
578
579#ifdef MSTATS
580	numMallocs[NBUCKETS]++;
581#endif
582
583#ifdef RCHECK
584	/*
585	 * Record allocated size of block and update magic number bounds.
586	 */
587
588	overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
589	BLOCK_END(overPtr) = RMAGIC;
590#endif
591
592	Tcl_MutexUnlock(allocMutexPtr);
593	return (char *)(overPtr+1);
594    }
595    maxSize = 1 << (i+3);
596    expensive = 0;
597    if (numBytes+OVERHEAD > maxSize) {
598	expensive = 1;
599    } else if (i>0 && numBytes+OVERHEAD < maxSize/2) {
600	expensive = 1;
601    }
602
603    if (expensive) {
604	void *newPtr;
605
606	Tcl_MutexUnlock(allocMutexPtr);
607
608	newPtr = TclpAlloc(numBytes);
609	if (newPtr == NULL) {
610	    return NULL;
611	}
612	maxSize -= OVERHEAD;
613	if (maxSize < numBytes) {
614	    numBytes = maxSize;
615	}
616	memcpy(newPtr, oldPtr, (size_t) numBytes);
617	TclpFree(oldPtr);
618	return newPtr;
619    }
620
621    /*
622     * Ok, we don't have to copy, it fits as-is
623     */
624
625#ifdef RCHECK
626    overPtr->realBlockSize = (numBytes + RSLOP - 1) & ~(RSLOP - 1);
627    BLOCK_END(overPtr) = RMAGIC;
628#endif
629
630    Tcl_MutexUnlock(allocMutexPtr);
631    return(oldPtr);
632}
633
634/*
635 *----------------------------------------------------------------------
636 *
637 * mstats --
638 *
639 *	Prints two lines of numbers, one showing the length of the free list
640 *	for each size category, the second showing the number of mallocs -
641 *	frees for each size category.
642 *
643 * Results:
644 *	None.
645 *
646 * Side effects:
647 *	None.
648 *
649 *----------------------------------------------------------------------
650 */
651
652#ifdef MSTATS
653void
654mstats(
655    char *s)			/* Where to write info. */
656{
657    register int i, j;
658    register union overhead *overPtr;
659    int totalFree = 0, totalUsed = 0;
660
661    Tcl_MutexLock(allocMutexPtr);
662
663    fprintf(stderr, "Memory allocation statistics %s\nTclpFree:\t", s);
664    for (i = 0; i < NBUCKETS; i++) {
665	for (j=0, overPtr=nextf[i]; overPtr; overPtr=overPtr->next, j++) {
666	    fprintf(stderr, " %d", j);
667	}
668	totalFree += j * (1 << (i + 3));
669    }
670
671    fprintf(stderr, "\nused:\t");
672    for (i = 0; i < NBUCKETS; i++) {
673	fprintf(stderr, " %d", numMallocs[i]);
674	totalUsed += numMallocs[i] * (1 << (i + 3));
675    }
676
677    fprintf(stderr, "\n\tTotal small in use: %d, total free: %d\n",
678	    totalUsed, totalFree);
679    fprintf(stderr, "\n\tNumber of big (>%d) blocks in use: %d\n",
680	    MAXMALLOC, numMallocs[NBUCKETS]);
681
682    Tcl_MutexUnlock(allocMutexPtr);
683}
684#endif
685
686#else	/* !USE_TCLALLOC */
687
688/*
689 *----------------------------------------------------------------------
690 *
691 * TclpAlloc --
692 *
693 *	Allocate more memory.
694 *
695 * Results:
696 *	None.
697 *
698 * Side effects:
699 *	None.
700 *
701 *----------------------------------------------------------------------
702 */
703
704char *
705TclpAlloc(
706    unsigned int numBytes)	/* Number of bytes to allocate. */
707{
708    return (char*) malloc(numBytes);
709}
710
711/*
712 *----------------------------------------------------------------------
713 *
714 * TclpFree --
715 *
716 *	Free memory.
717 *
718 * Results:
719 *	None.
720 *
721 * Side effects:
722 *	None.
723 *
724 *----------------------------------------------------------------------
725 */
726
727void
728TclpFree(
729    char *oldPtr)		/* Pointer to memory to free. */
730{
731    free(oldPtr);
732    return;
733}
734
735/*
736 *----------------------------------------------------------------------
737 *
738 * TclpRealloc --
739 *
740 *	Reallocate memory.
741 *
742 * Results:
743 *	None.
744 *
745 * Side effects:
746 *	None.
747 *
748 *----------------------------------------------------------------------
749 */
750
751char *
752TclpRealloc(
753    char *oldPtr,		/* Pointer to alloced block. */
754    unsigned int numBytes)	/* New size of memory. */
755{
756    return (char*) realloc(oldPtr, numBytes);
757}
758
759#endif /* !USE_TCLALLOC */
760#endif /* !TCL_THREADS */
761
762/*
763 * Local Variables:
764 * mode: c
765 * c-basic-offset: 4
766 * fill-column: 78
767 * End:
768 */
769