Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: downloads/tcl8.5.2/generic/tclThreadAlloc.c @ 63

Last change on this file since 63 was 25, checked in by landauf, 16 years ago

added tcl to libs

File size: 24.3 KB
Line 
1/*
2 * tclThreadAlloc.c --
3 *
4 *      This is a very fast storage allocator for used with threads (designed
5 *      avoid lock contention). The basic strategy is to allocate memory in
6 *      fixed size blocks from block caches.
7 *
8 * The Initial Developer of the Original Code is America Online, Inc.
9 * Portions created by AOL are Copyright (C) 1999 America Online, Inc.
10 *
11 * See the file "license.terms" for information on usage and redistribution of
12 * this file, and for a DISCLAIMER OF ALL WARRANTIES.
13 *
14 * RCS: @(#) $Id: tclThreadAlloc.c,v 1.27 2008/03/20 09:49:16 dkf Exp $
15 */
16
17#include "tclInt.h"
18#if defined(TCL_THREADS) && defined(USE_THREAD_ALLOC)
19
20/*
21 * If range checking is enabled, an additional byte will be allocated to store
22 * the magic number at the end of the requested memory.
23 */
24
25#ifndef RCHECK
26#ifdef  NDEBUG
27#define RCHECK          0
28#else
29#define RCHECK          1
30#endif
31#endif
32
33/*
34 * The following define the number of Tcl_Obj's to allocate/move at a time and
35 * the high water mark to prune a per-thread cache. On a 32 bit system,
36 * sizeof(Tcl_Obj) = 24 so 800 * 24 = ~16k.
37 */
38
39#define NOBJALLOC       800
40#define NOBJHIGH        1200
41
42/*
43 * The following union stores accounting information for each block including
44 * two small magic numbers and a bucket number when in use or a next pointer
45 * when free. The original requested size (not including the Block overhead)
46 * is also maintained.
47 */
48
49typedef union Block {
50    struct {
51        union {
52            union Block *next;          /* Next in free list. */
53            struct {
54                unsigned char magic1;   /* First magic number. */
55                unsigned char bucket;   /* Bucket block allocated from. */
56                unsigned char unused;   /* Padding. */
57                unsigned char magic2;   /* Second magic number. */
58            } s;
59        } u;
60        size_t reqSize;                 /* Requested allocation size. */
61    } b;
62    unsigned char padding[TCL_ALLOCALIGN];
63} Block;
64#define nextBlock       b.u.next
65#define sourceBucket    b.u.s.bucket
66#define magicNum1       b.u.s.magic1
67#define magicNum2       b.u.s.magic2
68#define MAGIC           0xEF
69#define blockReqSize    b.reqSize
70
71/*
72 * The following defines the minimum and and maximum block sizes and the number
73 * of buckets in the bucket cache.
74 */
75
76#define MINALLOC        ((sizeof(Block) + 8 + (TCL_ALLOCALIGN-1)) & ~(TCL_ALLOCALIGN-1))
77#define NBUCKETS        (11 - (MINALLOC >> 5))
78#define MAXALLOC        (MINALLOC << (NBUCKETS - 1))
79
80/*
81 * The following structure defines a bucket of blocks with various accounting
82 * and statistics information.
83 */
84
85typedef struct Bucket {
86    Block *firstPtr;            /* First block available */
87    long numFree;               /* Number of blocks available */
88
89    /* All fields below for accounting only */
90
91    long numRemoves;            /* Number of removes from bucket */
92    long numInserts;            /* Number of inserts into bucket */
93    long numWaits;              /* Number of waits to acquire a lock */
94    long numLocks;              /* Number of locks acquired */
95    long totalAssigned;         /* Total space assigned to bucket */
96} Bucket;
97
98/*
99 * The following structure defines a cache of buckets and objs, of which there
100 * will be (at most) one per thread.
101 */
102
103typedef struct Cache {
104    struct Cache *nextPtr;      /* Linked list of cache entries */
105    Tcl_ThreadId owner;         /* Which thread's cache is this? */
106    Tcl_Obj *firstObjPtr;       /* List of free objects for thread */
107    int numObjects;             /* Number of objects for thread */
108    int totalAssigned;          /* Total space assigned to thread */
109    Bucket buckets[NBUCKETS];   /* The buckets for this thread */
110} Cache;
111
112/*
113 * The following array specifies various per-bucket limits and locks. The
114 * values are statically initialized to avoid calculating them repeatedly.
115 */
116
117static struct {
118    size_t blockSize;           /* Bucket blocksize. */
119    int maxBlocks;              /* Max blocks before move to share. */
120    int numMove;                /* Num blocks to move to share. */
121    Tcl_Mutex *lockPtr;         /* Share bucket lock. */
122} bucketInfo[NBUCKETS];
123
124/*
125 * Static functions defined in this file.
126 */
127
128static Cache *  GetCache(void);
129static void     LockBucket(Cache *cachePtr, int bucket);
130static void     UnlockBucket(Cache *cachePtr, int bucket);
131static void     PutBlocks(Cache *cachePtr, int bucket, int numMove);
132static int      GetBlocks(Cache *cachePtr, int bucket);
133static Block *  Ptr2Block(char *ptr);
134static char *   Block2Ptr(Block *blockPtr, int bucket, unsigned int reqSize);
135static void     MoveObjs(Cache *fromPtr, Cache *toPtr, int numMove);
136
137/*
138 * Local variables defined in this file and initialized at startup.
139 */
140
141static Tcl_Mutex *listLockPtr;
142static Tcl_Mutex *objLockPtr;
143static Cache sharedCache;
144static Cache *sharedPtr = &sharedCache;
145static Cache *firstCachePtr = &sharedCache;
146
147/*
148 *----------------------------------------------------------------------
149 *
150 * GetCache ---
151 *
152 *      Gets per-thread memory cache, allocating it if necessary.
153 *
154 * Results:
155 *      Pointer to cache.
156 *
157 * Side effects:
158 *      None.
159 *
160 *----------------------------------------------------------------------
161 */
162
163static Cache *
164GetCache(void)
165{
166    Cache *cachePtr;
167
168    /*
169     * Check for first-time initialization.
170     */
171
172    if (listLockPtr == NULL) {
173        Tcl_Mutex *initLockPtr;
174        unsigned int i;
175
176        initLockPtr = Tcl_GetAllocMutex();
177        Tcl_MutexLock(initLockPtr);
178        if (listLockPtr == NULL) {
179            listLockPtr = TclpNewAllocMutex();
180            objLockPtr = TclpNewAllocMutex();
181            for (i = 0; i < NBUCKETS; ++i) {
182                bucketInfo[i].blockSize = MINALLOC << i;
183                bucketInfo[i].maxBlocks = 1 << (NBUCKETS - 1 - i);
184                bucketInfo[i].numMove = i < NBUCKETS - 1 ?
185                        1 << (NBUCKETS - 2 - i) : 1;
186                bucketInfo[i].lockPtr = TclpNewAllocMutex();
187            }
188        }
189        Tcl_MutexUnlock(initLockPtr);
190    }
191
192    /*
193     * Get this thread's cache, allocating if necessary.
194     */
195
196    cachePtr = TclpGetAllocCache();
197    if (cachePtr == NULL) {
198        cachePtr = calloc(1, sizeof(Cache));
199        if (cachePtr == NULL) {
200            Tcl_Panic("alloc: could not allocate new cache");
201        }
202        Tcl_MutexLock(listLockPtr);
203        cachePtr->nextPtr = firstCachePtr;
204        firstCachePtr = cachePtr;
205        Tcl_MutexUnlock(listLockPtr);
206        cachePtr->owner = Tcl_GetCurrentThread();
207        TclpSetAllocCache(cachePtr);
208    }
209    return cachePtr;
210}
211
212/*
213 *----------------------------------------------------------------------
214 *
215 * TclFreeAllocCache --
216 *
217 *      Flush and delete a cache, removing from list of caches.
218 *
219 * Results:
220 *      None.
221 *
222 * Side effects:
223 *      None.
224 *
225 *----------------------------------------------------------------------
226 */
227
228void
229TclFreeAllocCache(
230    void *arg)
231{
232    Cache *cachePtr = arg;
233    Cache **nextPtrPtr;
234    register unsigned int bucket;
235
236    /*
237     * Flush blocks.
238     */
239
240    for (bucket = 0; bucket < NBUCKETS; ++bucket) {
241        if (cachePtr->buckets[bucket].numFree > 0) {
242            PutBlocks(cachePtr, bucket, cachePtr->buckets[bucket].numFree);
243        }
244    }
245
246    /*
247     * Flush objs.
248     */
249
250    if (cachePtr->numObjects > 0) {
251        Tcl_MutexLock(objLockPtr);
252        MoveObjs(cachePtr, sharedPtr, cachePtr->numObjects);
253        Tcl_MutexUnlock(objLockPtr);
254    }
255
256    /*
257     * Remove from pool list.
258     */
259
260    Tcl_MutexLock(listLockPtr);
261    nextPtrPtr = &firstCachePtr;
262    while (*nextPtrPtr != cachePtr) {
263        nextPtrPtr = &(*nextPtrPtr)->nextPtr;
264    }
265    *nextPtrPtr = cachePtr->nextPtr;
266    cachePtr->nextPtr = NULL;
267    Tcl_MutexUnlock(listLockPtr);
268    free(cachePtr);
269}
270
271/*
272 *----------------------------------------------------------------------
273 *
274 * TclpAlloc --
275 *
276 *      Allocate memory.
277 *
278 * Results:
279 *      Pointer to memory just beyond Block pointer.
280 *
281 * Side effects:
282 *      May allocate more blocks for a bucket.
283 *
284 *----------------------------------------------------------------------
285 */
286
287char *
288TclpAlloc(
289    unsigned int reqSize)
290{
291    Cache *cachePtr = TclpGetAllocCache();
292    Block *blockPtr;
293    register int bucket;
294    size_t size;
295
296    if (cachePtr == NULL) {
297        cachePtr = GetCache();
298    }
299
300    /*
301     * Increment the requested size to include room for the Block structure.
302     * Call malloc() directly if the required amount is greater than the
303     * largest block, otherwise pop the smallest block large enough,
304     * allocating more blocks if necessary.
305     */
306
307    blockPtr = NULL;
308    size = reqSize + sizeof(Block);
309#if RCHECK
310    ++size;
311#endif
312    if (size > MAXALLOC) {
313        bucket = NBUCKETS;
314        blockPtr = malloc(size);
315        if (blockPtr != NULL) {
316            cachePtr->totalAssigned += reqSize;
317        }
318    } else {
319        bucket = 0;
320        while (bucketInfo[bucket].blockSize < size) {
321            ++bucket;
322        }
323        if (cachePtr->buckets[bucket].numFree || GetBlocks(cachePtr, bucket)) {
324            blockPtr = cachePtr->buckets[bucket].firstPtr;
325            cachePtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
326            --cachePtr->buckets[bucket].numFree;
327            ++cachePtr->buckets[bucket].numRemoves;
328            cachePtr->buckets[bucket].totalAssigned += reqSize;
329        }
330    }
331    if (blockPtr == NULL) {
332        return NULL;
333    }
334    return Block2Ptr(blockPtr, bucket, reqSize);
335}
336
337/*
338 *----------------------------------------------------------------------
339 *
340 * TclpFree --
341 *
342 *      Return blocks to the thread block cache.
343 *
344 * Results:
345 *      None.
346 *
347 * Side effects:
348 *      May move blocks to shared cache.
349 *
350 *----------------------------------------------------------------------
351 */
352
353void
354TclpFree(
355    char *ptr)
356{
357    Cache *cachePtr;
358    Block *blockPtr;
359    int bucket;
360
361    if (ptr == NULL) {
362        return;
363    }
364
365    cachePtr = TclpGetAllocCache();
366    if (cachePtr == NULL) {
367        cachePtr = GetCache();
368    }
369
370    /*
371     * Get the block back from the user pointer and call system free directly
372     * for large blocks. Otherwise, push the block back on the bucket and move
373     * blocks to the shared cache if there are now too many free.
374     */
375
376    blockPtr = Ptr2Block(ptr);
377    bucket = blockPtr->sourceBucket;
378    if (bucket == NBUCKETS) {
379        cachePtr->totalAssigned -= blockPtr->blockReqSize;
380        free(blockPtr);
381        return;
382    }
383
384    cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
385    blockPtr->nextBlock = cachePtr->buckets[bucket].firstPtr;
386    cachePtr->buckets[bucket].firstPtr = blockPtr;
387    ++cachePtr->buckets[bucket].numFree;
388    ++cachePtr->buckets[bucket].numInserts;
389
390    if (cachePtr != sharedPtr &&
391            cachePtr->buckets[bucket].numFree > bucketInfo[bucket].maxBlocks) {
392        PutBlocks(cachePtr, bucket, bucketInfo[bucket].numMove);
393    }
394}
395
396/*
397 *----------------------------------------------------------------------
398 *
399 * TclpRealloc --
400 *
401 *      Re-allocate memory to a larger or smaller size.
402 *
403 * Results:
404 *      Pointer to memory just beyond Block pointer.
405 *
406 * Side effects:
407 *      Previous memory, if any, may be freed.
408 *
409 *----------------------------------------------------------------------
410 */
411
412char *
413TclpRealloc(
414    char *ptr,
415    unsigned int reqSize)
416{
417    Cache *cachePtr = TclpGetAllocCache();
418    Block *blockPtr;
419    void *newPtr;
420    size_t size, min;
421    int bucket;
422
423    if (ptr == NULL) {
424        return TclpAlloc(reqSize);
425    }
426
427    if (cachePtr == NULL) {
428        cachePtr = GetCache();
429    }
430
431    /*
432     * If the block is not a system block and fits in place, simply return the
433     * existing pointer. Otherwise, if the block is a system block and the new
434     * size would also require a system block, call realloc() directly.
435     */
436
437    blockPtr = Ptr2Block(ptr);
438    size = reqSize + sizeof(Block);
439#if RCHECK
440    ++size;
441#endif
442    bucket = blockPtr->sourceBucket;
443    if (bucket != NBUCKETS) {
444        if (bucket > 0) {
445            min = bucketInfo[bucket-1].blockSize;
446        } else {
447            min = 0;
448        }
449        if (size > min && size <= bucketInfo[bucket].blockSize) {
450            cachePtr->buckets[bucket].totalAssigned -= blockPtr->blockReqSize;
451            cachePtr->buckets[bucket].totalAssigned += reqSize;
452            return Block2Ptr(blockPtr, bucket, reqSize);
453        }
454    } else if (size > MAXALLOC) {
455        cachePtr->totalAssigned -= blockPtr->blockReqSize;
456        cachePtr->totalAssigned += reqSize;
457        blockPtr = realloc(blockPtr, size);
458        if (blockPtr == NULL) {
459            return NULL;
460        }
461        return Block2Ptr(blockPtr, NBUCKETS, reqSize);
462    }
463
464    /*
465     * Finally, perform an expensive malloc/copy/free.
466     */
467
468    newPtr = TclpAlloc(reqSize);
469    if (newPtr != NULL) {
470        if (reqSize > blockPtr->blockReqSize) {
471            reqSize = blockPtr->blockReqSize;
472        }
473        memcpy(newPtr, ptr, reqSize);
474        TclpFree(ptr);
475    }
476    return newPtr;
477}
478
479/*
480 *----------------------------------------------------------------------
481 *
482 * TclThreadAllocObj --
483 *
484 *      Allocate a Tcl_Obj from the per-thread cache.
485 *
486 * Results:
487 *      Pointer to uninitialized Tcl_Obj.
488 *
489 * Side effects:
490 *      May move Tcl_Obj's from shared cached or allocate new Tcl_Obj's if
491 *      list is empty.
492 *
493 *----------------------------------------------------------------------
494 */
495
496Tcl_Obj *
497TclThreadAllocObj(void)
498{
499    register Cache *cachePtr = TclpGetAllocCache();
500    register Tcl_Obj *objPtr;
501
502    if (cachePtr == NULL) {
503        cachePtr = GetCache();
504    }
505
506    /*
507     * Get this thread's obj list structure and move or allocate new objs if
508     * necessary.
509     */
510
511    if (cachePtr->numObjects == 0) {
512        register int numMove;
513
514        Tcl_MutexLock(objLockPtr);
515        numMove = sharedPtr->numObjects;
516        if (numMove > 0) {
517            if (numMove > NOBJALLOC) {
518                numMove = NOBJALLOC;
519            }
520            MoveObjs(sharedPtr, cachePtr, numMove);
521        }
522        Tcl_MutexUnlock(objLockPtr);
523        if (cachePtr->numObjects == 0) {
524            Tcl_Obj *newObjsPtr;
525
526            cachePtr->numObjects = numMove = NOBJALLOC;
527            newObjsPtr = malloc(sizeof(Tcl_Obj) * numMove);
528            if (newObjsPtr == NULL) {
529                Tcl_Panic("alloc: could not allocate %d new objects", numMove);
530            }
531            while (--numMove >= 0) {
532                objPtr = &newObjsPtr[numMove];
533                objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
534                cachePtr->firstObjPtr = objPtr;
535            }
536        }
537    }
538
539    /*
540     * Pop the first object.
541     */
542
543    objPtr = cachePtr->firstObjPtr;
544    cachePtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
545    --cachePtr->numObjects;
546    return objPtr;
547}
548
549/*
550 *----------------------------------------------------------------------
551 *
552 * TclThreadFreeObj --
553 *
554 *      Return a free Tcl_Obj to the per-thread cache.
555 *
556 * Results:
557 *      None.
558 *
559 * Side effects:
560 *      May move free Tcl_Obj's to shared list upon hitting high water mark.
561 *
562 *----------------------------------------------------------------------
563 */
564
565void
566TclThreadFreeObj(
567    Tcl_Obj *objPtr)
568{
569    Cache *cachePtr = TclpGetAllocCache();
570
571    if (cachePtr == NULL) {
572        cachePtr = GetCache();
573    }
574
575    /*
576     * Get this thread's list and push on the free Tcl_Obj.
577     */
578
579    objPtr->internalRep.otherValuePtr = cachePtr->firstObjPtr;
580    cachePtr->firstObjPtr = objPtr;
581    ++cachePtr->numObjects;
582
583    /*
584     * If the number of free objects has exceeded the high water mark, move
585     * some blocks to the shared list.
586     */
587
588    if (cachePtr->numObjects > NOBJHIGH) {
589        Tcl_MutexLock(objLockPtr);
590        MoveObjs(cachePtr, sharedPtr, NOBJALLOC);
591        Tcl_MutexUnlock(objLockPtr);
592    }
593}
594
595/*
596 *----------------------------------------------------------------------
597 *
598 * Tcl_GetMemoryInfo --
599 *
600 *      Return a list-of-lists of memory stats.
601 *
602 * Results:
603 *      None.
604 *
605 * Side effects:
606 *      List appended to given dstring.
607 *
608 *----------------------------------------------------------------------
609 */
610
611void
612Tcl_GetMemoryInfo(
613    Tcl_DString *dsPtr)
614{
615    Cache *cachePtr;
616    char buf[200];
617    unsigned int n;
618
619    Tcl_MutexLock(listLockPtr);
620    cachePtr = firstCachePtr;
621    while (cachePtr != NULL) {
622        Tcl_DStringStartSublist(dsPtr);
623        if (cachePtr == sharedPtr) {
624            Tcl_DStringAppendElement(dsPtr, "shared");
625        } else {
626            sprintf(buf, "thread%p", cachePtr->owner);
627            Tcl_DStringAppendElement(dsPtr, buf);
628        }
629        for (n = 0; n < NBUCKETS; ++n) {
630            sprintf(buf, "%lu %ld %ld %ld %ld %ld %ld",
631                    (unsigned long) bucketInfo[n].blockSize,
632                    cachePtr->buckets[n].numFree,
633                    cachePtr->buckets[n].numRemoves,
634                    cachePtr->buckets[n].numInserts,
635                    cachePtr->buckets[n].totalAssigned,
636                    cachePtr->buckets[n].numLocks,
637                    cachePtr->buckets[n].numWaits);
638            Tcl_DStringAppendElement(dsPtr, buf);
639        }
640        Tcl_DStringEndSublist(dsPtr);
641        cachePtr = cachePtr->nextPtr;
642    }
643    Tcl_MutexUnlock(listLockPtr);
644}
645
646/*
647 *----------------------------------------------------------------------
648 *
649 * MoveObjs --
650 *
651 *      Move Tcl_Obj's between caches.
652 *
653 * Results:
654 *      None.
655 *
656 * Side effects:
657 *      None.
658 *
659 *----------------------------------------------------------------------
660 */
661
662static void
663MoveObjs(
664    Cache *fromPtr,
665    Cache *toPtr,
666    int numMove)
667{
668    register Tcl_Obj *objPtr = fromPtr->firstObjPtr;
669    Tcl_Obj *fromFirstObjPtr = objPtr;
670
671    toPtr->numObjects += numMove;
672    fromPtr->numObjects -= numMove;
673
674    /*
675     * Find the last object to be moved; set the next one (the first one not
676     * to be moved) as the first object in the 'from' cache.
677     */
678
679    while (--numMove) {
680        objPtr = objPtr->internalRep.otherValuePtr;
681    }
682    fromPtr->firstObjPtr = objPtr->internalRep.otherValuePtr;
683
684    /*
685     * Move all objects as a block - they are already linked to each other, we
686     * just have to update the first and last.
687     */
688
689    objPtr->internalRep.otherValuePtr = toPtr->firstObjPtr;
690    toPtr->firstObjPtr = fromFirstObjPtr;
691}
692
693/*
694 *----------------------------------------------------------------------
695 *
696 * Block2Ptr, Ptr2Block --
697 *
698 *      Convert between internal blocks and user pointers.
699 *
700 * Results:
701 *      User pointer or internal block.
702 *
703 * Side effects:
704 *      Invalid blocks will abort the server.
705 *
706 *----------------------------------------------------------------------
707 */
708
709static char *
710Block2Ptr(
711    Block *blockPtr,
712    int bucket,
713    unsigned int reqSize)
714{
715    register void *ptr;
716
717    blockPtr->magicNum1 = blockPtr->magicNum2 = MAGIC;
718    blockPtr->sourceBucket = bucket;
719    blockPtr->blockReqSize = reqSize;
720    ptr = ((void *) (blockPtr + 1));
721#if RCHECK
722    ((unsigned char *)(ptr))[reqSize] = MAGIC;
723#endif
724    return (char *) ptr;
725}
726
727static Block *
728Ptr2Block(
729    char *ptr)
730{
731    register Block *blockPtr;
732
733    blockPtr = (((Block *) ptr) - 1);
734    if (blockPtr->magicNum1 != MAGIC || blockPtr->magicNum2 != MAGIC) {
735        Tcl_Panic("alloc: invalid block: %p: %x %x",
736                blockPtr, blockPtr->magicNum1, blockPtr->magicNum2);
737    }
738#if RCHECK
739    if (((unsigned char *) ptr)[blockPtr->blockReqSize] != MAGIC) {
740        Tcl_Panic("alloc: invalid block: %p: %x %x %x",
741                blockPtr, blockPtr->magicNum1, blockPtr->magicNum2,
742                ((unsigned char *) ptr)[blockPtr->blockReqSize]);
743    }
744#endif
745    return blockPtr;
746}
747
748/*
749 *----------------------------------------------------------------------
750 *
751 * LockBucket, UnlockBucket --
752 *
753 *      Set/unset the lock to access a bucket in the shared cache.
754 *
755 * Results:
756 *      None.
757 *
758 * Side effects:
759 *      Lock activity and contention are monitored globally and on a per-cache
760 *      basis.
761 *
762 *----------------------------------------------------------------------
763 */
764
765static void
766LockBucket(
767    Cache *cachePtr,
768    int bucket)
769{
770#if 0
771    if (Tcl_MutexTryLock(bucketInfo[bucket].lockPtr) != TCL_OK) {
772        Tcl_MutexLock(bucketInfo[bucket].lockPtr);
773        ++cachePtr->buckets[bucket].numWaits;
774        ++sharedPtr->buckets[bucket].numWaits;
775    }
776#else
777    Tcl_MutexLock(bucketInfo[bucket].lockPtr);
778#endif
779    ++cachePtr->buckets[bucket].numLocks;
780    ++sharedPtr->buckets[bucket].numLocks;
781}
782
783static void
784UnlockBucket(
785    Cache *cachePtr,
786    int bucket)
787{
788    Tcl_MutexUnlock(bucketInfo[bucket].lockPtr);
789}
790
791/*
792 *----------------------------------------------------------------------
793 *
794 * PutBlocks --
795 *
796 *      Return unused blocks to the shared cache.
797 *
798 * Results:
799 *      None.
800 *
801 * Side effects:
802 *      None.
803 *
804 *----------------------------------------------------------------------
805 */
806
807static void
808PutBlocks(
809    Cache *cachePtr,
810    int bucket,
811    int numMove)
812{
813    register Block *lastPtr, *firstPtr;
814    register int n = numMove;
815
816    /*
817     * Before acquiring the lock, walk the block list to find the last block
818     * to be moved.
819     */
820
821    firstPtr = lastPtr = cachePtr->buckets[bucket].firstPtr;
822    while (--n > 0) {
823        lastPtr = lastPtr->nextBlock;
824    }
825    cachePtr->buckets[bucket].firstPtr = lastPtr->nextBlock;
826    cachePtr->buckets[bucket].numFree -= numMove;
827
828    /*
829     * Aquire the lock and place the list of blocks at the front of the shared
830     * cache bucket.
831     */
832
833    LockBucket(cachePtr, bucket);
834    lastPtr->nextBlock = sharedPtr->buckets[bucket].firstPtr;
835    sharedPtr->buckets[bucket].firstPtr = firstPtr;
836    sharedPtr->buckets[bucket].numFree += numMove;
837    UnlockBucket(cachePtr, bucket);
838}
839
840/*
841 *----------------------------------------------------------------------
842 *
843 * GetBlocks --
844 *
845 *      Get more blocks for a bucket.
846 *
847 * Results:
848 *      1 if blocks where allocated, 0 otherwise.
849 *
850 * Side effects:
851 *      Cache may be filled with available blocks.
852 *
853 *----------------------------------------------------------------------
854 */
855
856static int
857GetBlocks(
858    Cache *cachePtr,
859    int bucket)
860{
861    register Block *blockPtr;
862    register int n;
863
864    /*
865     * First, atttempt to move blocks from the shared cache. Note the
866     * potentially dirty read of numFree before acquiring the lock which is a
867     * slight performance enhancement. The value is verified after the lock is
868     * actually acquired.
869     */
870
871    if (cachePtr != sharedPtr && sharedPtr->buckets[bucket].numFree > 0) {
872        LockBucket(cachePtr, bucket);
873        if (sharedPtr->buckets[bucket].numFree > 0) {
874
875            /*
876             * Either move the entire list or walk the list to find the last
877             * block to move.
878             */
879
880            n = bucketInfo[bucket].numMove;
881            if (n >= sharedPtr->buckets[bucket].numFree) {
882                cachePtr->buckets[bucket].firstPtr =
883                        sharedPtr->buckets[bucket].firstPtr;
884                cachePtr->buckets[bucket].numFree =
885                        sharedPtr->buckets[bucket].numFree;
886                sharedPtr->buckets[bucket].firstPtr = NULL;
887                sharedPtr->buckets[bucket].numFree = 0;
888            } else {
889                blockPtr = sharedPtr->buckets[bucket].firstPtr;
890                cachePtr->buckets[bucket].firstPtr = blockPtr;
891                sharedPtr->buckets[bucket].numFree -= n;
892                cachePtr->buckets[bucket].numFree = n;
893                while (--n > 0) {
894                    blockPtr = blockPtr->nextBlock;
895                }
896                sharedPtr->buckets[bucket].firstPtr = blockPtr->nextBlock;
897                blockPtr->nextBlock = NULL;
898            }
899        }
900        UnlockBucket(cachePtr, bucket);
901    }
902
903    if (cachePtr->buckets[bucket].numFree == 0) {
904        register size_t size;
905
906        /*
907         * If no blocks could be moved from shared, first look for a larger
908         * block in this cache to split up.
909         */
910
911        blockPtr = NULL;
912        n = NBUCKETS;
913        size = 0; /* lint */
914        while (--n > bucket) {
915            if (cachePtr->buckets[n].numFree > 0) {
916                size = bucketInfo[n].blockSize;
917                blockPtr = cachePtr->buckets[n].firstPtr;
918                cachePtr->buckets[n].firstPtr = blockPtr->nextBlock;
919                --cachePtr->buckets[n].numFree;
920                break;
921            }
922        }
923
924        /*
925         * Otherwise, allocate a big new block directly.
926         */
927
928        if (blockPtr == NULL) {
929            size = MAXALLOC;
930            blockPtr = malloc(size);
931            if (blockPtr == NULL) {
932                return 0;
933            }
934        }
935
936        /*
937         * Split the larger block into smaller blocks for this bucket.
938         */
939
940        n = size / bucketInfo[bucket].blockSize;
941        cachePtr->buckets[bucket].numFree = n;
942        cachePtr->buckets[bucket].firstPtr = blockPtr;
943        while (--n > 0) {
944            blockPtr->nextBlock = (Block *)
945                ((char *) blockPtr + bucketInfo[bucket].blockSize);
946            blockPtr = blockPtr->nextBlock;
947        }
948        blockPtr->nextBlock = NULL;
949    }
950    return 1;
951}
952
953/*
954 *----------------------------------------------------------------------
955 *
956 * TclFinalizeThreadAlloc --
957 *
958 *      This procedure is used to destroy all private resources used in this
959 *      file.
960 *
961 * Results:
962 *      None.
963 *
964 * Side effects:
965 *      None.
966 *
967 *----------------------------------------------------------------------
968 */
969
970void
971TclFinalizeThreadAlloc(void)
972{
973    unsigned int i;
974
975    for (i = 0; i < NBUCKETS; ++i) {
976        TclpFreeAllocMutex(bucketInfo[i].lockPtr);
977        bucketInfo[i].lockPtr = NULL;
978    }
979
980    TclpFreeAllocMutex(objLockPtr);
981    objLockPtr = NULL;
982
983    TclpFreeAllocMutex(listLockPtr);
984    listLockPtr = NULL;
985
986    TclpFreeAllocCache(NULL);
987}
988
989#else /* !(TCL_THREADS && USE_THREAD_ALLOC) */
990/*
991 *----------------------------------------------------------------------
992 *
993 * Tcl_GetMemoryInfo --
994 *
995 *      Return a list-of-lists of memory stats.
996 *
997 * Results:
998 *      None.
999 *
1000 * Side effects:
1001 *      List appended to given dstring.
1002 *
1003 *----------------------------------------------------------------------
1004 */
1005
1006void
1007Tcl_GetMemoryInfo(
1008    Tcl_DString *dsPtr)
1009{
1010    Tcl_Panic("Tcl_GetMemoryInfo called when threaded memory allocator not in use");
1011}
1012
1013/*
1014 *----------------------------------------------------------------------
1015 *
1016 * TclFinalizeThreadAlloc --
1017 *
1018 *      This procedure is used to destroy all private resources used in this
1019 *      file.
1020 *
1021 * Results:
1022 *      None.
1023 *
1024 * Side effects:
1025 *      None.
1026 *
1027 *----------------------------------------------------------------------
1028 */
1029
1030void
1031TclFinalizeThreadAlloc(void)
1032{
1033    Tcl_Panic("TclFinalizeThreadAlloc called when threaded memory allocator not in use");
1034}
1035#endif /* TCL_THREADS && USE_THREAD_ALLOC */
1036
1037/*
1038 * Local Variables:
1039 * mode: c
1040 * c-basic-offset: 4
1041 * fill-column: 78
1042 * End:
1043 */
Note: See TracBrowser for help on using the repository browser.