1/*
2 * tclThreadAlloc.c --
3 *
4 *	This is a very fast storage allocator for used with threads (designed
5 *	avoid lock contention).  The basic strategy is to allocate memory in
6 *  	fixed size blocks from block caches.
7 *
8 * The Initial Developer of the Original Code is America Online, Inc.
9 * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
10 *
11 * See the file "license.terms" for information on usage and redistribution
12 * of this file, and for a DISCLAIMER OF ALL WARRANTIES.
13 *
14 * RCS: @(#) $Id: tclThreadAlloc.c,v 1.4.2.8 2007/06/29 03:17:33 das Exp $
15 */
16
17#include "tclInt.h"
18
19#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC) && !defined(TCL_MEM_DEBUG)
20
21#ifdef WIN32
22#include "tclWinInt.h"
23#else
24extern Tcl_Mutex *TclpNewAllocMutex(void);
25extern void *TclpGetAllocCache(void);
26extern void TclpSetAllocCache(void *);
27#endif
28
29/*
30 * If range checking is enabled, an additional byte will be allocated
31 * to store the magic number at the end of the requested memory.
32 */
33
34#ifndef RCHECK
35#ifdef  NDEBUG
36#define RCHECK		0
37#else
38#define RCHECK		1
39#endif
40#endif
41
42/*
43 * The following define the number of Tcl_Obj's to allocate/move
44 * at a time and the high water mark to prune a per-thread cache.
45 * On a 32 bit system, sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
46 *
47 */
48
49#define NOBJALLOC	 800
50#define NOBJHIGH	1200
51
52/*
53 * Alignment for allocated memory.
54 */
55
56#if defined(__APPLE__)
57#define ALLOCALIGN	16
58#else
59#define ALLOCALIGN	8
60#endif
61
62/*
63 * The following union stores accounting information for
64 * each block including two small magic numbers and
65 * a bucket number when in use or a next pointer when
66 * free.  The original requested size (not including
67 * the Block overhead) is also maintained.
68 */
69
70typedef union Block {
71    struct {
72	union {
73	    union Block *next;		/* Next in free list. */
74	    struct {
75		unsigned char magic1;	/* First magic number. */
76		unsigned char bucket;	/* Bucket block allocated from. */
77		unsigned char unused;	/* Padding. */
78		unsigned char magic2;	/* Second magic number. */
79	    } s;
80	} u;
81	size_t reqSize;			/* Requested allocation size. */
82    } b;
83    unsigned char padding[ALLOCALIGN];
84} Block;
85#define b_next		b.u.next
86#define b_bucket	b.u.s.bucket
87#define b_magic1	b.u.s.magic1
88#define b_magic2	b.u.s.magic2
89#define MAGIC		0xef
90#define b_reqsize	b.reqSize
91
92/*
93 * The following defines the minimum and and maximum block sizes and the number
94 * of buckets in the bucket cache.
95 */
96
97#define MINALLOC	((sizeof(Block) + 8 + (ALLOCALIGN-1)) & ~(ALLOCALIGN-1))
98#define NBUCKETS	(11 - (MINALLOC >> 5))
99#define MAXALLOC	(MINALLOC << (NBUCKETS - 1))
100
101/*
102 * The following structure defines a bucket of blocks with
103 * various accounting and statistics information.
104 */
105
106typedef struct Bucket {
107    Block *firstPtr;
108    long nfree;
109    long nget;
110    long nput;
111    long nwait;
112    long nlock;
113    long nrequest;
114} Bucket;
115
116/*
117 * The following structure defines a cache of buckets and objs.
118 */
119
120typedef struct Cache {
121    struct Cache  *nextPtr;
122    Tcl_ThreadId   owner;
123    Tcl_Obj       *firstObjPtr;
124    int            nobjs;
125    int	           nsysalloc;
126    Bucket         buckets[NBUCKETS];
127} Cache;
128
129/*
130 * The following array specifies various per-bucket
131 * limits and locks.  The values are statically initialized
132 * to avoid calculating them repeatedly.
133 */
134
135struct binfo {
136    size_t blocksize;	/* Bucket blocksize. */
137    int maxblocks;	/* Max blocks before move to share. */
138    int nmove;		/* Num blocks to move to share. */
139    Tcl_Mutex *lockPtr; /* Share bucket lock. */
140} binfo[NBUCKETS];
141
142/*
143 * Static functions defined in this file.
144 */
145
146static void LockBucket(Cache *cachePtr, int bucket);
147static void UnlockBucket(Cache *cachePtr, int bucket);
148static void PutBlocks(Cache *cachePtr, int bucket, int nmove);
149static int  GetBlocks(Cache *cachePtr, int bucket);
150static Block *Ptr2Block(char *ptr);
151static char *Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize);
152static void MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove);
153
154/*
155 * Local variables defined in this file and initialized at
156 * startup.
157 */
158
159static Tcl_Mutex *listLockPtr;
160static Tcl_Mutex *objLockPtr;
161static Cache     sharedCache;
162static Cache    *sharedPtr = &sharedCache;
163static Cache    *firstCachePtr = &sharedCache;
164
165
166/*
167 *----------------------------------------------------------------------
168 *
169 *  GetCache ---
170 *
171 *	Gets per-thread memory cache, allocating it if necessary.
172 *
173 * Results:
174 *	Pointer to cache.
175 *
176 * Side effects:
177 *  	None.
178 *
179 *----------------------------------------------------------------------
180 */
181
182static Cache *
183GetCache(void)
184{
185    Cache *cachePtr;
186
187    /*
188     * Check for first-time initialization.
189     */
190
191    if (listLockPtr == NULL) {
192	Tcl_Mutex *initLockPtr;
193	unsigned int i;
194
195	initLockPtr = Tcl_GetAllocMutex();
196	Tcl_MutexLock(initLockPtr);
197	if (listLockPtr == NULL) {
198	    listLockPtr = TclpNewAllocMutex();
199	    objLockPtr = TclpNewAllocMutex();
200	    for (i = 0; i < NBUCKETS; ++i) {
201		binfo[i].blocksize = MINALLOC << i;
202		binfo[i].maxblocks = 1 << (NBUCKETS - 1 - i);
203		binfo[i].nmove = i < NBUCKETS-1 ? 1 << (NBUCKETS - 2 - i) : 1;
204	        binfo[i].lockPtr = TclpNewAllocMutex();
205	    }
206	}
207	Tcl_MutexUnlock(initLockPtr);
208    }
209
210    /*
211     * Get this thread's cache, allocating if necessary.
212     */
213
214    cachePtr = TclpGetAllocCache();
215    if (cachePtr == NULL) {
216    	cachePtr = calloc(1, sizeof(Cache));
217    	if (cachePtr == NULL) {
218	    panic("alloc: could not allocate new cache");
219    	}
220    	Tcl_MutexLock(listLockPtr);
221    	cachePtr->nextPtr = firstCachePtr;
222    	firstCachePtr = cachePtr;
223    	Tcl_MutexUnlock(listLockPtr);
224    	cachePtr->owner = Tcl_GetCurrentThread();
225	TclpSetAllocCache(cachePtr);
226    }
227    return cachePtr;
228}
229
230
231/*
232 *----------------------------------------------------------------------
233 *
234 *  TclFreeAllocCache --
235 *
236 *	Flush and delete a cache, removing from list of caches.
237 *
238 * Results:
239 *	None.
240 *
241 * Side effects:
242 *	None.
243 *
244 *----------------------------------------------------------------------
245 */
246
247void
248TclFreeAllocCache(void *arg)
249{
250    Cache *cachePtr = arg;
251    Cache **nextPtrPtr;
252    register unsigned int bucket;
253
254    /*
255     * Flush blocks.
256     */
257
258    for (bucket = 0; bucket < NBUCKETS; ++bucket) {
259	if (cachePtr->buckets[bucket].nfree > 0) {
260	    PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].nfree);
261	}
262    }
263
264    /*
265     * Flush objs.
266     */
267
268    if (cachePtr->nobjs > 0) {
269    	Tcl_MutexLock(objLockPtr);
270    	MoveObjs(cachePtr, sharedPtr, cachePtr->nobjs);
271    	Tcl_MutexUnlock(objLockPtr);
272    }
273
274    /*
275     * Remove from pool list.
276     */
277
278    Tcl_MutexLock(listLockPtr);
279    nextPtrPtr = &firstCachePtr;
280    while (*nextPtrPtr != cachePtr) {
281	nextPtrPtr = &(*nextPtrPtr)->nextPtr;
282    }
283    *nextPtrPtr = cachePtr->nextPtr;
284    cachePtr->nextPtr = NULL;
285    Tcl_MutexUnlock(listLockPtr);
286    free(cachePtr);
287}
288
289
290/*
291 *----------------------------------------------------------------------
292 *
293 *  TclpAlloc --
294 *
295 *	Allocate memory.
296 *
297 * Results:
298 *	Pointer to memory just beyond Block pointer.
299 *
300 * Side effects:
301 *	May allocate more blocks for a bucket.
302 *
303 *----------------------------------------------------------------------
304 */
305
306char *
307TclpAlloc(unsigned int reqsize)
308{
309    Cache         *cachePtr;
310    Block         *blockPtr;
311    register int   bucket;
312    size_t  	   size;
313
314    if (sizeof(int) >= sizeof(size_t)) {
315	/* An unsigned int overflow can also be a size_t overflow */
316	const size_t zero = 0;
317	const size_t max = ~zero;
318
319	if (((size_t) reqsize) > max - sizeof(Block) - RCHECK) {
320	    /* Requested allocation exceeds memory */
321	    return NULL;
322	}
323    }
324
325    cachePtr = TclpGetAllocCache();
326    if (cachePtr == NULL) {
327	cachePtr = GetCache();
328    }
329
330    /*
331     * Increment the requested size to include room for
332     * the Block structure.  Call malloc() directly if the
333     * required amount is greater than the largest block,
334     * otherwise pop the smallest block large enough,
335     * allocating more blocks if necessary.
336     */
337
338    blockPtr = NULL;
339    size = reqsize + sizeof(Block);
340#if RCHECK
341    ++size;
342#endif
343    if (size > MAXALLOC) {
344	bucket = NBUCKETS;
345    	blockPtr = malloc(size);
346	if (blockPtr != NULL) {
347	    cachePtr->nsysalloc += reqsize;
348	}
349    } else {
350    	bucket = 0;
351    	while (binfo[bucket].blocksize < size) {
352    	    ++bucket;
353    	}
354    	if (cachePtr->buckets[bucket].nfree || GetBlocks(cachePtr, bucket)) {
355	    blockPtr = cachePtr->buckets[bucket].firstPtr;
356	    cachePtr->buckets[bucket].firstPtr = blockPtr->b_next;
357	    --cachePtr->buckets[bucket].nfree;
358    	    ++cachePtr->buckets[bucket].nget;
359	    cachePtr->buckets[bucket].nrequest += reqsize;
360	}
361    }
362    if (blockPtr == NULL) {
363    	return NULL;
364    }
365    return Block2Ptr(blockPtr, bucket, reqsize);
366}
367
368
369/*
370 *----------------------------------------------------------------------
371 *
372 *  TclpFree --
373 *
374 *	Return blocks to the thread block cache.
375 *
376 * Results:
377 *	None.
378 *
379 * Side effects:
380 *	May move blocks to shared cache.
381 *
382 *----------------------------------------------------------------------
383 */
384
385void
386TclpFree(char *ptr)
387{
388    if (ptr != NULL) {
389	Cache  *cachePtr = TclpGetAllocCache();
390	Block *blockPtr;
391	int bucket;
392
393	if (cachePtr == NULL) {
394	    cachePtr = GetCache();
395	}
396
397	/*
398	 * Get the block back from the user pointer and
399	 * call system free directly for large blocks.
400	 * Otherwise, push the block back on the bucket and
401	 * move blocks to the shared cache if there are now
402	 * too many free.
403	 */
404
405	blockPtr = Ptr2Block(ptr);
406	bucket = blockPtr->b_bucket;
407	if (bucket == NBUCKETS) {
408	    cachePtr->nsysalloc -= blockPtr->b_reqsize;
409	    free(blockPtr);
410	} else {
411	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
412	    blockPtr->b_next = cachePtr->buckets[bucket].firstPtr;
413	    cachePtr->buckets[bucket].firstPtr = blockPtr;
414	    ++cachePtr->buckets[bucket].nfree;
415	    ++cachePtr->buckets[bucket].nput;
416	    if (cachePtr != sharedPtr &&
417		    cachePtr->buckets[bucket].nfree > binfo[bucket].maxblocks) {
418		PutBlocks(cachePtr, bucket, binfo[bucket].nmove);
419	    }
420	}
421    }
422}
423
424
425/*
426 *----------------------------------------------------------------------
427 *
428 *  TclpRealloc --
429 *
430 *	Re-allocate memory to a larger or smaller size.
431 *
432 * Results:
433 *	Pointer to memory just beyond Block pointer.
434 *
435 * Side effects:
436 *	Previous memory, if any, may be freed.
437 *
438 *----------------------------------------------------------------------
439 */
440
441char *
442TclpRealloc(char *ptr, unsigned int reqsize)
443{
444    Cache *cachePtr;
445    Block *blockPtr;
446    void *new;
447    size_t size, min;
448    int bucket;
449
450    if (ptr == NULL) {
451	return TclpAlloc(reqsize);
452    }
453
454    if (sizeof(int) >= sizeof(size_t)) {
455	/* An unsigned int overflow can also be a size_t overflow */
456	const size_t zero = 0;
457	const size_t max = ~zero;
458
459	if (((size_t) reqsize) > max - sizeof(Block) - RCHECK) {
460	    /* Requested allocation exceeds memory */
461	    return NULL;
462	}
463    }
464
465    cachePtr = TclpGetAllocCache();
466    if (cachePtr == NULL) {
467	cachePtr = GetCache();
468    }
469
470    /*
471     * If the block is not a system block and fits in place,
472     * simply return the existing pointer.  Otherwise, if the block
473     * is a system block and the new size would also require a system
474     * block, call realloc() directly.
475     */
476
477    blockPtr = Ptr2Block(ptr);
478    size = reqsize + sizeof(Block);
479#if RCHECK
480    ++size;
481#endif
482    bucket = blockPtr->b_bucket;
483    if (bucket != NBUCKETS) {
484	if (bucket > 0) {
485	    min = binfo[bucket-1].blocksize;
486	} else {
487	    min = 0;
488	}
489	if (size > min && size <= binfo[bucket].blocksize) {
490	    cachePtr->buckets[bucket].nrequest -= blockPtr->b_reqsize;
491	    cachePtr->buckets[bucket].nrequest += reqsize;
492	    return Block2Ptr(blockPtr, bucket, reqsize);
493	}
494    } else if (size > MAXALLOC) {
495	cachePtr->nsysalloc -= blockPtr->b_reqsize;
496	cachePtr->nsysalloc += reqsize;
497	blockPtr = realloc(blockPtr, size);
498	if (blockPtr == NULL) {
499	    return NULL;
500	}
501	return Block2Ptr(blockPtr, NBUCKETS, reqsize);
502    }
503
504    /*
505     * Finally, perform an expensive malloc/copy/free.
506     */
507
508    new = TclpAlloc(reqsize);
509    if (new != NULL) {
510	if (reqsize > blockPtr->b_reqsize) {
511	    reqsize = blockPtr->b_reqsize;
512	}
513    	memcpy(new, ptr, reqsize);
514    	TclpFree(ptr);
515    }
516    return new;
517}
518
519
520/*
521 *----------------------------------------------------------------------
522 *
523 * TclThreadAllocObj --
524 *
525 *	Allocate a Tcl_Obj from the per-thread cache.
526 *
527 * Results:
528 *	Pointer to uninitialized Tcl_Obj.
529 *
530 * Side effects:
531 *	May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's
532 *  	if list is empty.
533 *
534 *----------------------------------------------------------------------
535 */
536
537Tcl_Obj *
538TclThreadAllocObj(void)
539{
540    register Cache *cachePtr = TclpGetAllocCache();
541    register int nmove;
542    register Tcl_Obj *objPtr;
543    Tcl_Obj *newObjsPtr;
544
545    if (cachePtr == NULL) {
546	cachePtr = GetCache();
547    }
548
549    /*
550     * Get this thread's obj list structure and move
551     * or allocate new objs if necessary.
552     */
553
554    if (cachePtr->nobjs == 0) {
555    	Tcl_MutexLock(objLockPtr);
556	nmove = sharedPtr->nobjs;
557	if (nmove > 0) {
558	    if (nmove > NOBJALLOC) {
559		nmove = NOBJALLOC;
560	    }
561	    MoveObjs(sharedPtr, cachePtr, nmove);
562	}
563    	Tcl_MutexUnlock(objLockPtr);
564	if (cachePtr->nobjs == 0) {
565	    cachePtr->nobjs = nmove = NOBJALLOC;
566	    newObjsPtr = malloc(sizeof(Tcl_Obj) * nmove);
567	    if (newObjsPtr == NULL) {
568		panic("alloc: could not allocate %d new objects", nmove);
569	    }
570	    while (--nmove >= 0) {
571		objPtr = &newObjsPtr[nmove];
572		objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
573		cachePtr->firstObjPtr = objPtr;
574	    }
575	}
576    }
577
578    /*
579     * Pop the first object.
580     */
581
582    objPtr = cachePtr->firstObjPtr;
583    cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
584    --cachePtr->nobjs;
585    return objPtr;
586}
587
588
589/*
590 *----------------------------------------------------------------------
591 *
592 * TclThreadFreeObj --
593 *
594 *	Return a free Tcl_Obj to the per-thread cache.
595 *
596 * Results:
597 *	None.
598 *
599 * Side effects:
600 *	May move free Tcl_Obj's to shared list upon hitting high
601 *  	water mark.
602 *
603 *----------------------------------------------------------------------
604 */
605
606void
607TclThreadFreeObj(Tcl_Obj *objPtr)
608{
609    Cache *cachePtr = TclpGetAllocCache();
610
611    if (cachePtr == NULL) {
612	cachePtr = GetCache();
613    }
614
615    /*
616     * Get this thread's list and push on the free Tcl_Obj.
617     */
618
619    objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
620    cachePtr->firstObjPtr = objPtr;
621    ++cachePtr->nobjs;
622
623    /*
624     * If the number of free objects has exceeded the high
625     * water mark, move some blocks to the shared list.
626     */
627
628    if (cachePtr->nobjs > NOBJHIGH) {
629	Tcl_MutexLock(objLockPtr);
630	MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
631	Tcl_MutexUnlock(objLockPtr);
632    }
633}
634
635
636/*
637 *----------------------------------------------------------------------
638 *
639 * Tcl_GetMemoryInfo --
640 *
641 *	Return a list-of-lists of memory stats.
642 *
643 * Results:
644 *	None.
645 *
646 * Side effects:
647 *  	List appended to given dstring.
648 *
649 *----------------------------------------------------------------------
650 */
651
652void
653Tcl_GetMemoryInfo(Tcl_DString *dsPtr)
654{
655    Cache *cachePtr;
656    char buf[200];
657    unsigned int n;
658
659    Tcl_MutexLock(listLockPtr);
660    cachePtr = firstCachePtr;
661    while (cachePtr != NULL) {
662	Tcl_DStringStartSublist(dsPtr);
663	if (cachePtr == sharedPtr) {
664    	    Tcl_DStringAppendElement(dsPtr, "shared");
665	} else {
666	    sprintf(buf, "thread%d", (int) cachePtr->owner);
667    	    Tcl_DStringAppendElement(dsPtr, buf);
668	}
669	for (n = 0; n < NBUCKETS; ++n) {
670    	    sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
671		(unsigned long) binfo[n].blocksize,
672		cachePtr->buckets[n].nfree,
673		cachePtr->buckets[n].nget,
674		cachePtr->buckets[n].nput,
675		cachePtr->buckets[n].nrequest,
676		cachePtr->buckets[n].nlock,
677		cachePtr->buckets[n].nwait);
678	    Tcl_DStringAppendElement(dsPtr, buf);
679	}
680	Tcl_DStringEndSublist(dsPtr);
681	    cachePtr = cachePtr->nextPtr;
682    }
683    Tcl_MutexUnlock(listLockPtr);
684}
685
686
687/*
688 *----------------------------------------------------------------------
689 *
690 * MoveObjs --
691 *
692 *	Move Tcl_Obj's between caches.
693 *
694 * Results:
695 *	None.
696 *
697 * Side effects:
698 *  	None.
699 *
700 *----------------------------------------------------------------------
701 */
702
703static void
704MoveObjs(Cache *fromPtr, Cache *toPtr, int nmove)
705{
706    register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
707    Tcl_Obj *fromFirstObjPtr = objPtr;
708
709    toPtr->nobjs += nmove;
710    fromPtr->nobjs -= nmove;
711
712    /*
713     * Find the last object to be moved; set the next one
714     * (the first one not to be moved) as the first object
715     * in the 'from' cache.
716     */
717
718    while (--nmove) {
719	objPtr = objPtr->internalRep.otherValuePtr;
720    }
721    fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
722
723    /*
724     * Move all objects as a block - they are already linked to
725     * each other, we just have to update the first and last.
726     */
727
728    objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
729    toPtr->firstObjPtr = fromFirstObjPtr;
730}
731
732
733/*
734 *----------------------------------------------------------------------
735 *
736 *  Block2Ptr, Ptr2Block --
737 *
738 *	Convert between internal blocks and user pointers.
739 *
740 * Results:
741 *	User pointer or internal block.
742 *
743 * Side effects:
744 *	Invalid blocks will abort the server.
745 *
746 *----------------------------------------------------------------------
747 */
748
749static char *
750Block2Ptr(Block *blockPtr, int bucket, unsigned int reqsize)
751{
752    register void *ptr;
753
754    blockPtr->b_magic1 = blockPtr->b_magic2 = MAGIC;
755    blockPtr->b_bucket = bucket;
756    blockPtr->b_reqsize = reqsize;
757    ptr = ((void *) (blockPtr + 1));
758#if RCHECK
759    ((unsigned char *)(ptr))[reqsize] = MAGIC;
760#endif
761    return (char *) ptr;
762}
763
764static Block *
765Ptr2Block(char *ptr)
766{
767    register Block *blockPtr;
768
769    blockPtr = (((Block *) ptr) - 1);
770    if (blockPtr->b_magic1 != MAGIC
771#if RCHECK
772	|| ((unsigned char *) ptr)[blockPtr->b_reqsize] != MAGIC
773#endif
774	|| blockPtr->b_magic2 != MAGIC) {
775	panic("alloc: invalid block: %p: %x %x %x\n",
776	    blockPtr, blockPtr->b_magic1, blockPtr->b_magic2,
777	    ((unsigned char *) ptr)[blockPtr->b_reqsize]);
778    }
779    return blockPtr;
780}
781
782
783/*
784 *----------------------------------------------------------------------
785 *
786 *  LockBucket, UnlockBucket --
787 *
788 *	Set/unset the lock to access a bucket in the shared cache.
789 *
790 * Results:
791 *  	None.
792 *
793 * Side effects:
794 *	Lock activity and contention are monitored globally and on
795 *  	a per-cache basis.
796 *
797 *----------------------------------------------------------------------
798 */
799
800static void
801LockBucket(Cache *cachePtr, int bucket)
802{
803#if 0
804    if (Tcl_MutexTryLock(binfo[bucket].lockPtr) != TCL_OK) {
805	Tcl_MutexLock(binfo[bucket].lockPtr);
806    	++cachePtr->buckets[bucket].nwait;
807    	++sharedPtr->buckets[bucket].nwait;
808    }
809#else
810    Tcl_MutexLock(binfo[bucket].lockPtr);
811#endif
812    ++cachePtr->buckets[bucket].nlock;
813    ++sharedPtr->buckets[bucket].nlock;
814}
815
816
817static void
818UnlockBucket(Cache *cachePtr, int bucket)
819{
820    Tcl_MutexUnlock(binfo[bucket].lockPtr);
821}
822
823
824/*
825 *----------------------------------------------------------------------
826 *
827 *  PutBlocks --
828 *
829 *	Return unused blocks to the shared cache.
830 *
831 * Results:
832 *	None.
833 *
834 * Side effects:
835 *	None.
836 *
837 *----------------------------------------------------------------------
838 */
839
840static void
841PutBlocks(Cache *cachePtr, int bucket, int nmove)
842{
843    register Block *lastPtr, *firstPtr;
844    register int n = nmove;
845
846    /*
847     * Before acquiring the lock, walk the block list to find
848     * the last block to be moved.
849     */
850
851    firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
852    while (--n > 0) {
853	lastPtr = lastPtr->b_next;
854    }
855    cachePtr->buckets[bucket].firstPtr = lastPtr->b_next;
856    cachePtr->buckets[bucket].nfree -= nmove;
857
858    /*
859     * Aquire the lock and place the list of blocks at the front
860     * of the shared cache bucket.
861     */
862
863    LockBucket(cachePtr, bucket);
864    lastPtr->b_next = sharedPtr->buckets[bucket].firstPtr;
865    sharedPtr->buckets[bucket].firstPtr = firstPtr;
866    sharedPtr->buckets[bucket].nfree += nmove;
867    UnlockBucket(cachePtr, bucket);
868}
869
870
871/*
872 *----------------------------------------------------------------------
873 *
874 *  GetBlocks --
875 *
876 *	Get more blocks for a bucket.
877 *
878 * Results:
879 *	1 if blocks where allocated, 0 otherwise.
880 *
881 * Side effects:
882 *	Cache may be filled with available blocks.
883 *
884 *----------------------------------------------------------------------
885 */
886
887static int
888GetBlocks(Cache *cachePtr, int bucket)
889{
890    register Block *blockPtr;
891    register int n;
892    register size_t size;
893
894    /*
895     * First, atttempt to move blocks from the shared cache.  Note
896     * the potentially dirty read of nfree before acquiring the lock
897     * which is a slight performance enhancement.  The value is
898     * verified after the lock is actually acquired.
899     */
900
901    if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].nfree > 0) {
902	LockBucket(cachePtr, bucket);
903	if (sharedPtr->buckets[bucket].nfree > 0) {
904
905	    /*
906	     * Either move the entire list or walk the list to find
907	     * the last block to move.
908	     */
909
910	    n = binfo[bucket].nmove;
911	    if (n >= sharedPtr->buckets[bucket].nfree) {
912		cachePtr->buckets[bucket].firstPtr =
913		    sharedPtr->buckets[bucket].firstPtr;
914		cachePtr->buckets[bucket].nfree =
915		    sharedPtr->buckets[bucket].nfree;
916		sharedPtr->buckets[bucket].firstPtr = NULL;
917		sharedPtr->buckets[bucket].nfree = 0;
918	    } else {
919		blockPtr = sharedPtr->buckets[bucket].firstPtr;
920		cachePtr->buckets[bucket].firstPtr = blockPtr;
921		sharedPtr->buckets[bucket].nfree -= n;
922		cachePtr->buckets[bucket].nfree = n;
923		while (--n > 0) {
924    		    blockPtr = blockPtr->b_next;
925		}
926		sharedPtr->buckets[bucket].firstPtr = blockPtr->b_next;
927		blockPtr->b_next = NULL;
928	    }
929	}
930	UnlockBucket(cachePtr, bucket);
931    }
932
933    if (cachePtr->buckets[bucket].nfree == 0) {
934
935	/*
936	 * If no blocks could be moved from shared, first look for a
937	 * larger block in this cache to split up.
938	 */
939
940    	blockPtr = NULL;
941	n = NBUCKETS;
942	size = 0; /* lint */
943	while (--n > bucket) {
944    	    if (cachePtr->buckets[n].nfree > 0) {
945		size = binfo[n].blocksize;
946		blockPtr = cachePtr->buckets[n].firstPtr;
947		cachePtr->buckets[n].firstPtr = blockPtr->b_next;
948		--cachePtr->buckets[n].nfree;
949		break;
950	    }
951	}
952
953	/*
954	 * Otherwise, allocate a big new block directly.
955	 */
956
957	if (blockPtr == NULL) {
958	    size = MAXALLOC;
959	    blockPtr = malloc(size);
960	    if (blockPtr == NULL) {
961		return 0;
962	    }
963	}
964
965	/*
966	 * Split the larger block into smaller blocks for this bucket.
967	 */
968
969	n = size / binfo[bucket].blocksize;
970	cachePtr->buckets[bucket].nfree = n;
971	cachePtr->buckets[bucket].firstPtr = blockPtr;
972	while (--n > 0) {
973	    blockPtr->b_next = (Block *)
974		((char *) blockPtr + binfo[bucket].blocksize);
975	    blockPtr = blockPtr->b_next;
976	}
977	blockPtr->b_next = NULL;
978    }
979    return 1;
980}
981
982/*
983 *----------------------------------------------------------------------
984 *
985 * TclFinalizeThreadAlloc --
986 *
987 *	This procedure is used to destroy all private resources used in
988 *	this file.
989 *
990 * Results:
991 *	None.
992 *
993 * Side effects:
994 *	None.
995 *
996 *----------------------------------------------------------------------
997 */
998
999void
1000TclFinalizeThreadAlloc()
1001{
1002    unsigned int i;
1003
1004    for (i = 0; i < NBUCKETS; ++i) {
1005        TclpFreeAllocMutex(binfo[i].lockPtr);
1006        binfo[i].lockPtr = NULL;
1007    }
1008
1009    TclpFreeAllocMutex(objLockPtr);
1010    objLockPtr = NULL;
1011
1012    TclpFreeAllocMutex(listLockPtr);
1013    listLockPtr = NULL;
1014
1015    TclpFreeAllocCache(NULL);
1016}
1017
1018#else /* ! defined(TCL_THREADS) && ! defined(USE_THREAD_ALLOC) */
1019
1020/*
1021 *----------------------------------------------------------------------
1022 *
1023 * TclFinalizeThreadAlloc --
1024 *
1025 *	This procedure is used to destroy all private resources used in
1026 *	this file.
1027 *
1028 * Results:
1029 *	None.
1030 *
1031 * Side effects:
1032 *	None.
1033 *
1034 *----------------------------------------------------------------------
1035 */
1036
1037void
1038TclFinalizeThreadAlloc()
1039{
1040    Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use.");
1041}
1042
1043#endif /* TCL_THREADS */
1044