1/*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20/*
21    Zone.h
22    Garbage Collected Heap
23    Copyright (c) 2004-2011 Apple Inc. All rights reserved.
24 */
25
26#pragma once
27#ifndef __AUTO_ZONE_CORE__
28#define __AUTO_ZONE_CORE__
29
30#include "auto_zone.h"
31#include "auto_impl_utilities.h"
32#include "auto_weak.h"
33
34#include "Bitmap.h"
35#include "Configuration.h"
36#include "Definitions.h"
37#include "Large.h"
38#include "Locks.h"
39#include "Admin.h"
40#include "Region.h"
41#include "Statistics.h"
42#include "Subzone.h"
43#include "SubzonePartition.h"
44#include "Thread.h"
45
46#include <algorithm>
47#include <cassert>
48
49namespace Auto {
50
51    //
52    // Forward declarations.
53    //
54    class Monitor;
55    class ResourceTracker;
56    class SubzoneBlockRef;
57    class LargeBlockRef;
58
59    typedef PointerArray<VMMemoryAllocator> PointerList;
60
61    typedef std::vector<Range, AuxAllocator<Range> > RangeVector;
62    class ObjectAssociationMap : public PtrPtrMap, public AuxAllocated {}; // <rdar://problem/7212101> Reduce space usage for each association.
63    typedef __gnu_cxx::hash_map<void *, ObjectAssociationMap*, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > AssociationsHashMap;
64
65
66    //----- Zone -----//
67
68    enum State {
69        idle, scanning, enlivening, finalizing, reclaiming
70    };
71
72#define worker_print(fmt, args...)
73//#define worker_print(fmt, args...) malloc_printf("worker %d: "fmt, Zone::worker_thread_id(), args);
74
75
76    class Zone {
77
78#define INVALID_THREAD_KEY_VALUE ((Thread *)-1)
79
80      public:
81
82        malloc_zone_t         basic_zone;
83
84        // collection control
85        auto_collection_control_t       control;
86
87        // statistics
88        spin_lock_t           stats_lock;               // only affects fields below; only a write lock; read access may not be accurate, as we lock statistics independently of the main data structures
89
90        // weak references
91        usword_t              num_weak_refs;
92        usword_t              max_weak_refs;
93        struct weak_entry_t  *weak_refs_table;
94        spin_lock_t           weak_refs_table_lock;
95
96        dispatch_once_t       _zone_init_predicate;
97        dispatch_queue_t      _collection_queue;
98        uint32_t              _collection_count;
99        uint32_t              _collector_disable_count;     // counter for external disable-collector API
100        uint8_t               _pending_collections[AUTO_ZONE_COLLECT_GLOBAL_MODE_COUNT]; // count of pending collections for each mode
101        pthread_mutex_t       _collection_mutex;
102
103        dispatch_source_t     _pressure_source;
104
105        bool                  _compaction_pending;          // true if the compaction timer is armed.
106        dispatch_source_t     _compaction_timer;            // resume this timer to trigger in the future.
107        dispatch_time_t       _compaction_next_time;        // next allowed time for compaction.
108
109      private:
110
111        //
112        // Shared information
113        //
114        // watch out for static initialization
115        static volatile int32_t _zone_count;                // used to generate _zone_id
116        static Zone           *_first_zone;                 // for debugging
117
118        //
119        // thread management
120        //
121        Thread                *_registered_threads;         // linked list of registered threads
122        pthread_key_t          _registered_threads_key;     // pthread key for looking up Thread instance for this zone
123        pthread_mutex_t        _registered_threads_mutex;   // protects _registered_threads and _enlivening_enabled
124        bool                   _enlivening_enabled;         // tracks whether new threads should be initialized with enlivening on
125        bool                   _enlivening_complete;        // tracks whether or not enlivening has been performed on this collection cycle.
126
127        pthread_mutex_t       _mark_bits_mutex;             // protects the per-Region and Large block mark bits.
128
129        //
130        // memory management
131        //
132        Bitmap                 _in_subzone;                 // indicates which allocations are used for subzone region
133        Bitmap                 _in_large;                   // indicates which allocations are used for large blocks
134        Large                 *_large_list;                 // doubly linked list of large allocations
135        spin_lock_t            _large_lock;                 // protects _large_list, _in_large, and large block refcounts
136        PtrHashSet             _roots;                      // hash set of registered roots (globals)
137        pthread_mutex_t        _roots_lock;                 // protects _roots
138        RangeVector            _datasegments;               // registered data segments.
139        spin_lock_t            _datasegments_lock;          // protects _datasegments
140        PtrHashSet             _zombies;                    // hash set of zombies
141        spin_lock_t            _zombies_lock;               // protects _zombies
142        Region                *_region_list;                // singly linked list of subzone regions
143        spin_lock_t            _region_lock;                // protects _region_list
144        bool                   _repair_write_barrier;       // true if write barrier needs to be repaired after full collection.
145        Range                  _coverage;                   // range of managed memory
146        spin_lock_t            _coverage_lock;              // protects _coverage
147        Statistics             _stats;                      // statistics for this zone
148        volatile usword_t      _allocation_counter;         // byte allocation counter (reset after each collection).
149        volatile usword_t      _triggered_threshold;        // stores _allocation_counter after reset for post collection statistics
150        ResourceTracker        *_resource_tracker_list;     // list of registered external resource trackers
151        pthread_mutex_t        _resource_tracker_lock;      // protects _resource_tracker_list (use a separate lock because we call out with it held)
152        PointerList            _garbage_list;               // vm_map allocated pages to hold the garbage list.
153        size_t                 _large_garbage_count;        // how many blocks in the _garbage_list are large (at the end).
154        AssociationsHashMap    _associations;               // associative references object -> ObjectAssociationMap*.
155        PtrSizeHashMap         _hashes;                     // associative hash codes.
156        pthread_rwlock_t       _associations_lock;          // protects _associations & _hashes
157        volatile enum State    _state;                      // the state of the collector
158        uint64_t               _average_collection_time;
159        volatile int32_t       _collection_checking_enabled;// count of times the collector checking enabled count was called
160
161#if UseArena
162        void                    *_arena;                    // the actual 32G space (region low, larges high)
163        void                    *_large_start;              // half-way into arena + size of bitmaps needed for region
164        Bitmap                  _large_bits;                // bitmap of top half - tracks quanta used for large blocks
165        spin_lock_t             _large_bits_lock;           // protects _large_bits
166#endif
167        SubzonePartition        _partition;                 // partitioned subzones
168
169        pthread_mutex_t         _worker_lock;
170        pthread_cond_t          _worker_cond;
171        usword_t                _worker_count;
172        usword_t                _sleeping_workers;
173        boolean_t               _has_work;
174        boolean_t               (*_worker_func)(void *, boolean_t, boolean_t);
175        void                    *_worker_arg;
176
177        pthread_mutex_t         _compaction_lock;
178        boolean_t               _compaction_disabled;
179
180        //
181        // thread safe Large deallocation routines.
182        //
183        void deallocate_large(Large *large, void *block);
184        void deallocate_large_internal(Large *large, void *block);
185
186
187        //
188        // allocate_region
189        //
190        // Allocate and initialize a new subzone region.
191        //
192        Region *allocate_region();
193
194
195        //
196        // allocate_large
197        //
198        // Allocates a large block from the universal pool (directly from vm_memory.)
199        //
200        void *allocate_large(Thread &thread, usword_t &size, const usword_t layout, bool clear, bool refcount_is_one);
201
202
203        //
204        // find_large
205        //
206        // Find a large block in this zone.
207        //
208        inline Large *find_large(void *block) { return Large::large(block); }
209
210
211        //
212        // deallocate_small_medium
213        //
214        // Release memory allocated for a small block
215        //
216        void deallocate_small_medium(void *block);
217
218
219      public:
220
221        //
222        // raw memory allocation
223        //
224
225#if UseArena
226
227        // set our one region up
228        void *arena_allocate_region(usword_t newsize);
229#endif
230
231        // on 32-bit w/o arena, goes directly to vm system
232        // w/arena, allocate from the top of the arena
233        void *arena_allocate_large(usword_t size);
234
235        //
236        // raw memory deallocation
237        //
238        void arena_deallocate(void *, size_t size);
239
240        //
241        // admin_offset
242        //
243        // Return the number of bytes to the beginning of the first admin data item.
244        //
245        static inline const usword_t admin_offset() { return align(sizeof(Zone), page_size); }
246
247
248        //
249        // bytes_needed
250        //
251        // Calculate the number of bytes needed for zone data
252        //
253        static inline const usword_t bytes_needed() {
254            usword_t in_subzone_size = Bitmap::bytes_needed(subzone_quantum_max);
255            usword_t in_large_size = Bitmap::bytes_needed(allocate_quantum_large_max);
256#if UseArena
257            usword_t arena_size = Bitmap::bytes_needed(allocate_quantum_large_max);
258#else
259            usword_t arena_size = 0;
260#endif
261            return admin_offset() + in_subzone_size + in_large_size + arena_size;
262        }
263
264
265        //
266        // allocator
267        //
268        inline void *operator new(const size_t size) {
269#if DEBUG
270            // allocate zone data
271            void *allocation_address = allocate_guarded_memory(bytes_needed());
272#else
273            void *allocation_address = allocate_memory(bytes_needed());
274#endif
275
276            if (!allocation_address) error("Can not allocate zone");
277
278            return allocation_address;
279
280        }
281
282
283        //
284        // deallocator
285        //
286        inline void operator delete(void *zone) {
287#if DEBUG
288            // release zone data
289            if (zone) deallocate_guarded_memory(zone, bytes_needed());
290#else
291            if (zone) deallocate_memory(zone, bytes_needed());
292#endif
293        }
294
295
296        //
297        // setup_shared
298        //
299        // Initialize information used by all zones.
300        //
301        static void setup_shared();
302
303        //
304        // allocate_thread_key
305        //
306        // attempt to allocate a static pthread key for use when creating a new zone
307        // returns the new key, or 0 if no keys are available.
308        //
309        static pthread_key_t allocate_thread_key();
310
311        //
312        // Constructors
313        //
314        Zone(pthread_key_t thread_registration_key);
315
316
317        //
318        // Destructor
319        //
320        ~Zone();
321
322
323        //
324        // zone
325        //
326        // Returns the lowest index zone - for debugging purposes only (no locks.)
327        //
328        static inline Zone *zone() { return _first_zone; }
329
330
331        //
332        // Accessors
333        //
334        inline Thread         *threads()                    { return _registered_threads; }
335        inline pthread_mutex_t *threads_mutex()             { return &_registered_threads_mutex; }
336        inline Region         *region_list()                { return _region_list; }
337        inline spin_lock_t    *region_lock()                { return &_region_lock; }
338        inline Large          *large_list()                 { return _large_list; }
339        inline spin_lock_t    *large_lock()                 { return &_large_lock; }
340        inline Statistics     &statistics()                 { return _stats; }
341        inline Range          &coverage()                   { return _coverage; }
342        inline PointerList    &garbage_list()               { return _garbage_list; }
343        inline size_t          large_garbage_count()  const { return _large_garbage_count; }
344        dispatch_queue_t       collection_queue() const     { return _collection_queue; }
345        inline bool            compaction_disabled() const  { return _compaction_disabled; }
346        inline bool            compaction_enabled() const   { return !_compaction_disabled; }
347        inline pthread_key_t   thread_key() const           { return _registered_threads_key; }
348
349        inline void           add_blocks_and_bytes(int64_t block_count, int64_t byte_count) { _stats.add_count(block_count); _stats.add_size(byte_count); }
350
351        inline Thread *current_thread_direct() {
352            if (_pthread_has_direct_tsd()) {
353                #define CASE_FOR_DIRECT_KEY(key) case key: return (Thread *)_pthread_getspecific_direct(key)
354                switch (_registered_threads_key) {
355                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY0);
356                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY1);
357                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY2);
358                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY3);
359                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY4);
360                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY5);
361                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY6);
362                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY7);
363                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY8);
364                CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY9);
365                default: return NULL;
366                }
367            } else {
368                return (Thread *)pthread_getspecific(_registered_threads_key);
369            }
370        }
371
372        //
373        // current_thread
374        //
375        // If the calling thread is registered with the collector, returns the registered Thread object.
376        // If the calling thread is not registered, returns NULL.
377        //
378        inline Thread *current_thread() {
379            Thread *thread = current_thread_direct();
380            if (__builtin_expect(thread == INVALID_THREAD_KEY_VALUE, 0)) {
381                // If we see this then it means some pthread destructor ran after the
382                // zone's destructor and tried to look up a Thread object (tried to perform a GC operation).
383                // The collector's destructor needs to run last. We treat this as a fatal error so we will notice immediately.
384                // Investigate as a pthreads bug in the ordering of static (Apple internal) pthread keys.
385                auto_fatal("Zone::current_thread(): pthread looked up after unregister. Pthreads static key destructor ordering issue?\n");
386            }
387            return thread;
388        }
389
390        //
391        // registered_thread
392        //
393        // Returns the Thread object for the calling thread.
394        // If the calling thread is not registered, it is registered implicitly, and if warn_if_unregistered is true an error message is logged.
395        //
396        inline Thread &registered_thread() {
397            Thread *thread = current_thread();
398            if (!thread) {
399                auto_error(this, "GC operation on unregistered thread. Thread registered implicitly. Break on auto_zone_thread_registration_error() to debug.", NULL);
400                auto_zone_thread_registration_error();
401                return register_thread();
402            }
403            return *thread;
404        }
405
406        //
407        // destroy_registered_thread
408        //
409        // Pthread key destructor. The collector has a critical dependency on the ordering of pthread destructors.
410        // This destructor must run after any other code which might possibly call into the collector.
411        // We have arranged with pthreads to have our well known key destructor called after any dynamic keys and
412        // any static (Apple internal) keys that might call into the collector (Foundation, CF). On the last iteration
413        // (PTHREAD_DESTRUCTOR_ITERATIONS) we unregister the thread.
414        // Note that this implementation is non-portable due to our agreement with pthreads.
415        //
416        static void destroy_registered_thread(void *key_value);
417
418        inline void           set_state(enum State ns)      { _state = ns; }
419        inline bool           is_state(enum State ns)       { return _state == ns; }
420
421        inline pthread_mutex_t      *roots_lock()           { return &_roots_lock; }
422        inline PtrHashSet           &roots()                { return _roots; }
423
424        inline pthread_rwlock_t     *associations_lock()    { return &_associations_lock; }
425        inline AssociationsHashMap  &associations()         { return _associations; }
426
427#if UseArena
428        inline void *         arena()                       { return _arena; }
429#else
430        inline void *         arena()                       { return (void *)0; }
431#endif
432
433        inline void           adjust_allocation_counter(usword_t n)  { auto_atomic_add(n, &_allocation_counter); }
434
435        //
436        // subzone_index
437        //
438        // Returns a subzone index for an arbitrary pointer.  Note that this is relative to absolute memory.  subzone_index in
439        // Region is relative memory.
440        //
441        static inline const usword_t subzone_index(void *address) { return (((usword_t)address & mask(arena_size_log2)) >> subzone_quantum_log2); }
442
443
444        //
445        // subzone_count
446        //
447        // Returns a number of subzone quantum for a given size.
448        //
449        static inline const usword_t subzone_count(const size_t size) { return partition2(size, subzone_quantum_log2); }
450
451
452        //
453        // activate_subzone
454        //
455        // Marks the subzone as being active.
456        //
457        inline void activate_subzone(Subzone *subzone) { _in_subzone.set_bit_atomic(subzone_index(subzone)); }
458
459
460        //
461        // address_in_arena
462        //
463        // Given arbitrary address, is it in the arena of GC allocated memory
464        //
465        inline bool address_in_arena(const void *address) const {
466#if UseArena
467            //return (((usword_t)address) >> arena_size_log2) == (((usword_t)_arena) >> arena_size_log2);
468            return ((usword_t)address & ~mask(arena_size_log2)) == (usword_t)_arena;
469#else
470            return true;
471#endif
472        }
473
474
475        //
476        // in_subzone_memory
477        //
478        // Returns true if address is in auto managed memory.
479        //
480        inline const bool in_subzone_memory(void *address) const { return address_in_arena(address) && (bool)_in_subzone.bit(subzone_index(address)); }
481
482
483        //
484        // in_subzone_bitmap
485        //
486        // Returns true if address is in a subzone that is in use, as determined by the subzone bitmap.
487        //
488        inline const bool in_subzone_bitmap(void *address) const { return (bool)_in_subzone.bit(subzone_index(address)); }
489
490
491        //
492        // in_large_memory
493        //
494        // Returns true if address is in auto managed memory.  Since side data is smaller than a large quantum we'll not
495        // concern ourselves with rounding.
496        //
497        inline const bool in_large_memory(void *address) const {
498#if UseArena
499            usword_t arena_q = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
500            return address_in_arena(address) && (arena_q < allocate_quantum_large_max) && (bool)_large_bits.bit(arena_q);
501#else
502            // since vm_allocate() returns addresses in arbitrary locations, can only really tell by calling Large::block_start() in 32-bit mode.
503            return address_in_arena(address);
504#endif
505        }
506
507
508        //
509        // in_large_bitmap
510        //
511        // Returns true if the large bitmap bit corresponding to address is set.
512        //
513        inline const bool in_large_bitmap(void *address) const { return (bool)_in_large.bit(Large::quantum_index(address)); }
514
515
516        //
517        // good_block_size
518        //
519        // Return a block size which maximizes memory usage (no slop.)
520        //
521        static inline const usword_t good_block_size(usword_t size) {
522            if (size <= allocate_quantum_large)  return align2(size, allocate_quantum_medium_log2);
523            return align2(size, allocate_quantum_small_log2);
524        }
525
526
527        //
528        // is_block
529        //
530        // Determines if the specfied address is a block in this zone.
531        //
532        inline bool is_block(void *address) {
533            return _coverage.in_range(address) && block_is_start(address);
534        }
535
536
537        //
538        // block_allocate
539        //
540        // Allocate a block of memory from the zone.  layout indicates whether the block is an
541        // object or not and whether it is scanned or not.
542        //
543        void *block_allocate(Thread &thread, const size_t size, const usword_t layout, const bool clear, bool refcount_is_one);
544
545        //
546        // batch_allocate
547        //
548        // Allocate many blocks of memory from the zone.  layout indicates whether the block is an
549        // object or not and whether it is scanned or not. All allocated blocks are zeroed.
550        // Returns the number of blocks allocated.
551        //
552        unsigned batch_allocate(Thread &thread, size_t size, const usword_t layout, const bool clear, const bool refcount_is_one, void **results, unsigned num_requested);
553
554        //
555        // block_deallocate
556        //
557        // Release a block of memory from the zone, lazily while scanning.
558        //
559        void block_deallocate(SubzoneBlockRef block);
560        void block_deallocate(LargeBlockRef block);
561
562
563        //
564        // block_is_start_large
565        //
566        // Return true if arbitrary address is the start of a large block.
567        //
568        inline bool block_is_start_large(void *address) {
569            if (Large::is_start(address)) {
570#if UseArena
571                // compute q using signed shift, the convert to unsigned to detect out of range address using the q < allocate_quantum_large_max test
572                usword_t arena_q = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
573                return (arena_q < allocate_quantum_large_max) && in_large_bitmap(address);
574#else
575                return in_large_bitmap(address);
576#endif
577            }
578            return false;
579        }
580
581
582        //
583        // block_is_start
584        //
585        // Return true if the arbitrary address is the start of a block.
586        // Broken down because of high frequency of use.
587        //
588        inline bool block_is_start(void *address) {
589            if (in_subzone_memory(address)) {
590                usword_t q;
591                return Subzone::subzone(address)->block_is_start(address, &q);
592            }
593            return block_is_start_large(address);
594        }
595
596
597        //
598        // block_start_large
599        //
600        // Return the start of a large block.
601        //
602        Large *block_start_large(void *address);
603
604
605        //
606        // block_start
607        //
608        // Return the base block address of an arbitrary address.
609        // Broken down because of high frequency of use.
610        //
611        void *block_start(void *address);
612
613
614        //
615        // block_layout
616        //
617        // Return the layout of a block.
618        //
619        usword_t block_layout(void *address);
620
621
622        //
623        // block_set_layout
624        //
625        // Set the layout of a block.
626        //
627        void block_set_layout(void *address, const usword_t layout);
628
629
630      private:
631        //
632        // close_locks
633        //
634        // acquires all locks for critical sections whose behavior changes during scanning
635        // enlivening_lock is and must already be held; all other critical sections must
636        // order their locks with enlivening_lock acquired first
637        //
638        inline void close_locks() {
639                // acquire all locks for sections that have predicated enlivening work
640            // (These locks are in an arbitary order)
641
642            _partition.lock();
643
644            // Eventually we'll acquire these as well as we reintroduce ConditionBarrier
645            //spin_lock(&_retains_lock);          // retain/release
646            //spin_lock(&weak_refs_table_lock);   // weak references
647            //spin_lock(&_associations_lock);     // associative references
648            //spin_lock(&_roots_lock);            // global roots
649        }
650
651        inline void open_locks() {
652            //spin_unlock(&_roots_lock);
653            //spin_unlock(&_associations_lock);
654            //spin_unlock(&weak_refs_table_lock);
655            //spin_unlock(&_retains_lock);
656            _partition.unlock();
657         }
658
659      public:
660
661        //
662        // is_locked
663        //
664        // Called by debuggers, with all other threads suspended, to determine if any locks are held that might cause a deadlock from this thread.
665        //
666        bool is_locked();
667
668
669        //
670        // add_subzone
671        //
672        // when out of subzones, add another one, allocating region if necessary
673        // return false if region can't be allocated
674        //
675        bool add_subzone(Admin *admin);
676
677        //
678        // block_refcount
679        //
680        // Returns the reference count of the specified block.
681        //
682        template <class BlockRef> usword_t block_refcount(BlockRef block) { return block.refcount(); }
683
684
685        //
686        // block_increment_refcount
687        //
688        // Increment the reference count of the specified block.
689        //
690        template <class BlockRef> usword_t block_increment_refcount(BlockRef block) {
691            int refcount;
692
693            Thread &thread = registered_thread();
694            refcount = block.inc_refcount();
695            if (refcount == 1) {
696                ConditionBarrier barrier(thread.needs_enlivening());
697                if (barrier) block.enliven();
698            }
699            return refcount;
700        }
701
702
703        //
704        // block_decrement_refcount
705        //
706        // Decrement the reference count of the specified block.
707        //
708        template <class BlockRef> usword_t block_decrement_refcount(BlockRef block) { return block.dec_refcount(); }
709
710        //
711        // is_local
712        //
713        // Returns true if the known-to-be-a-block is a thread local node.
714        //
715        inline bool is_local(void *block) {
716            if (in_subzone_memory(block)) {
717                Subzone *subzone = Subzone::subzone(block);
718                return subzone->is_thread_local(subzone->quantum_index_unchecked(block));
719            }
720            return false;
721        }
722
723
724        //
725        // block_is_garbage
726        //
727        // Returns true if the specified block is flagged as garbage.  Only valid
728        // during finalization.
729        //
730        inline bool block_is_garbage(void *block) {
731            if (in_subzone_memory(block)) {
732                Subzone *subzone = Subzone::subzone(block);
733                usword_t q;
734                return subzone->block_is_start(block, &q) && subzone->is_garbage(q);
735            } else if (block_is_start_large(block)) {
736                return Large::large(block)->is_garbage();
737            }
738
739            return false;
740        }
741
742        //
743        // set_associative_ref
744        //
745        // Creates an association between a given block, a unique pointer-sized key, and a pointer value.
746        //
747        void set_associative_ref(void *block, void *key, void *value);
748
749
750        //
751        // get_associative_ref
752        //
753        // Returns the associated pointer value for a given block and key.
754        //
755        void *get_associative_ref(void *block, void *key);
756
757
758        //
759        // get_associative_hash
760        //
761        // Returns the associated (random) hash value for a given block.
762        //
763        size_t get_associative_hash(void *block);
764
765
766        //
767        // erase_associations_internal
768        //
769        // Assuming association lock held, do the dissassociation dance
770        //
771        void erase_associations_internal(void *block);
772
773        //
774        // erase_assocations
775        //
776        // Removes all associations for a given block. Used to
777        // clear associations for explicitly deallocated blocks.
778        // When the collector frees blocks, it uses a different code
779        // path, to minimize locking overhead. See free_garbage().
780        //
781        void erase_associations(void *block);
782
783        //
784        // erase_associations_in_range
785        //
786        // Called by remove_datasegment() below, when a data segment is unloaded
787        // to automatically break associations referenced by global objects (@string constants).
788        //
789        void erase_associations_in_range(const Range &r);
790
791        //
792        // visit_associations_for_key
793        //
794        // Produces all associations for a given unique key.
795        //
796        void visit_associations_for_key(void *key, boolean_t (^visitor) (void *object, void *value));
797
798        //
799        // sort_free_lists
800        //
801        // Rebuilds all the admin free lists from subzone side data. Requires that the caller hold the SubzonePartition locked.
802        // The newly rebuilt free lists will be sorted.
803        //
804        void sort_free_lists();
805
806        //
807        // add_root
808        //
809        // Adds the address as a known root.
810        // Performs the assignment in a race-safe way.
811        // Escapes thread-local value if necessary.
812        //
813        template <class BlockRef> inline void add_root(void *root, BlockRef value) {
814            Thread &thread = registered_thread();
815            thread.block_escaped(value);
816
817            UnconditionalBarrier barrier(thread.needs_enlivening());
818            Mutex lock(&_roots_lock);
819            if (_roots.find(root) == _roots.end()) {
820                _roots.insert(root);
821            }
822            // whether new or old, make sure it gets scanned
823            // if new, well, that's obvious, but if old the scanner may already have scanned
824            // this root and we'll never see this value otherwise
825            if (barrier) value.enliven();
826            *(void **)root = value.address();
827        }
828
829
830        //
831        // add_root_no_barrier
832        //
833        // Adds the address as a known root.
834        //
835        inline void add_root_no_barrier(void *root) {
836#if DEBUG
837            // this currently fires if somebody uses the wrong version of objc_atomicCompareAndSwap*
838            //if (in_subzone_memory(root)) __builtin_trap();
839#endif
840
841            Mutex lock(&_roots_lock);
842            if (_roots.find(root) == _roots.end()) {
843                _roots.insert(root);
844            }
845        }
846
847        //
848        // copy_roots
849        //
850        // Takes a snapshot of the registered roots during scanning.
851        //
852        inline void copy_roots(PointerList &list) {
853            Mutex lock(&_roots_lock);
854            usword_t count = _roots.size();
855            list.clear_count();
856            list.grow(count);
857            list.set_count(count);
858            std::copy(_roots.begin(), _roots.end(), (void**)list.buffer());
859        }
860
861        //
862        // copy_roots
863        //
864        // Takes a snapshot of the registered roots during scanning.
865        //
866        inline void copy_roots(PtrVector &list) {
867            Mutex lock(&_roots_lock);
868            usword_t count = _roots.size();
869            list.resize(count);
870            std::copy(_roots.begin(), _roots.end(), list.begin());
871        }
872
873
874        // remove_root
875        //
876        // Removes the address from the known roots.
877        //
878        inline void remove_root(void *root) {
879            Mutex lock(&_roots_lock);
880            PtrHashSet::iterator iter = _roots.find(root);
881            if (iter != _roots.end()) {
882                _roots.erase(iter);
883            }
884        }
885
886
887        //
888        // is_root
889        //
890        // Returns whether or not the address has been registered.
891        //
892        inline bool is_root(void *address) {
893            Mutex lock(&_roots_lock);
894            PtrHashSet::iterator iter = _roots.find(address);
895            return (iter != _roots.end());
896        }
897
898        //
899        // RangeLess
900        //
901        // Compares two ranges, returning true IFF r1 is left of r2 on the number line.
902        // Returns false if the ranges overlap in any way.
903        //
904        struct RangeLess {
905          bool operator()(const Range &r1, const Range &r2) const {
906            return (r1.address() < r2.address()) && (r1.end() <= r2.address()); // overlapping ranges will always return false.
907          }
908        };
909
910        //
911        // add_datasegment
912        //
913        // Adds the given data segment address range to a list of known data segments, which is searched by is_global_address().
914        //
915        inline void add_datasegment(const Range &r) {
916            SpinLock lock(&_datasegments_lock);
917            RangeVector::iterator i = std::lower_bound(_datasegments.begin(), _datasegments.end(), r, RangeLess());
918            _datasegments.insert(i, r);
919        }
920
921        //
922        // RangeExcludes
923        //
924        // Returns false if the address lies outside the given range.
925        //
926        struct RangeExcludes {
927            Range _range;
928            RangeExcludes(const Range &r) : _range(r) {}
929            bool operator()(void *address) { return !_range.in_range(address); }
930        };
931
932        //
933        // RootRemover
934        //
935        // Used by remove_datasegment() below, removes an address from the
936        // root table. Simply an artifact for use with std::for_each().
937        //
938        struct RootRemover {
939            PtrHashSet &_roots;
940            RootRemover(PtrHashSet &roots) : _roots(roots) {}
941            void operator()(void *address) {
942                PtrHashSet::iterator iter = _roots.find(address);
943                if (iter != _roots.end()) _roots.erase(iter);
944            }
945        };
946
947        //
948        // remove_datasegment
949        //
950        // Removes the given data segment address range from the list of known address ranges.
951        //
952        inline void remove_datasegment(const Range &r) {
953            {
954                SpinLock lock(&_datasegments_lock);
955                // could use std::lower_bound(), or std::equal_range() to speed this up, since they use binary search to find the range.
956                // _datasegments.erase(std::remove(_datasegments.begin(), _datasegments.end(), r, _datasegments.end()));
957                RangeVector::iterator i = std::lower_bound(_datasegments.begin(), _datasegments.end(), r, RangeLess());
958                if (i != _datasegments.end()) _datasegments.erase(i);
959            }
960            {
961                // When a bundle gets unloaded, scour the roots table to make sure no stale roots are left behind.
962                Mutex lock(&_roots_lock);
963                PtrVector rootsToRemove;
964                std::remove_copy_if(_roots.begin(), _roots.end(), std::back_inserter(rootsToRemove), RangeExcludes(r));
965                std::for_each(rootsToRemove.begin(), rootsToRemove.end(), RootRemover(_roots));
966            }
967            erase_associations_in_range(r);
968            weak_unregister_data_segment(this, r.address(), r.size());
969        }
970
971        inline void add_datasegment(void *address, size_t size) { add_datasegment(Range(address, size)); }
972        inline void remove_datasegment(void *address, size_t size) { remove_datasegment(Range(address, size)); }
973
974        //
975        // is_global_address
976        //
977        // Binary searches the registered data segment address ranges to determine whether the address could be referring to
978        // a global variable.
979        //
980        inline bool is_global_address(void *address) {
981            SpinLock lock(&_datasegments_lock);
982            return is_global_address_nolock(address);
983        }
984
985        inline bool is_global_address_nolock(void *address) {
986            return std::binary_search(_datasegments.begin(), _datasegments.end(), Range(address, sizeof(void*)), RangeLess());
987        }
988
989#if DEBUG
990        //
991        // DATASEGMENT REGISTRATION UNIT TEST
992        //
993        struct RangePrinter {
994            void operator() (const Range &r) {
995                printf("{ address = %p, end = %p }\n", r.address(), r.end());
996            }
997        };
998
999        inline void print_datasegments() {
1000            SpinLock lock(&_datasegments_lock);
1001            std::for_each(_datasegments.begin(), _datasegments.end(), RangePrinter());
1002        }
1003
1004        void test_datasegments() {
1005            Range r1((void*)0x1000, 512), r2((void*)0xA000, 512);
1006            add_datasegment(r1);
1007            add_datasegment(r2);
1008            print_datasegments();
1009            Range r3(r1), r4(r2);
1010            r3.adjust(r1.size()), r4.adjust(-r2.size());
1011            add_datasegment(r3);
1012            add_datasegment(r4);
1013            print_datasegments();
1014            assert(is_global_address(r1.address()));
1015            assert(is_global_address(displace(r1.address(), 0x10)));
1016            assert(is_global_address(displace(r1.end(), -sizeof(void*))));
1017            assert(is_global_address(displace(r2.address(), 0xA0)));
1018            assert(is_global_address(displace(r3.address(), 0x30)));
1019            assert(is_global_address(displace(r4.address(), 0x40)));
1020            remove_datasegment(r2);
1021            print_datasegments();
1022            assert(!is_global_address(displace(r2.address(), 0xA0)));
1023            remove_datasegment(r1);
1024            assert(!is_global_address(displace(r1.address(), 0x10)));
1025            print_datasegments();
1026            remove_datasegment(r3);
1027            remove_datasegment(r4);
1028            print_datasegments();
1029        }
1030#endif
1031
1032        //
1033        // erase_weak
1034        //
1035        // unregisters any weak references contained within known AUTO_OBJECT
1036        //
1037        inline void erase_weak(void *ptr) {
1038            if (control.weak_layout_for_address) {
1039                const unsigned char* weak_layout = control.weak_layout_for_address((auto_zone_t*)zone, ptr);
1040                if (weak_layout) weak_unregister_with_layout(this, (void**)ptr, weak_layout);
1041            }
1042        }
1043
1044        //
1045        // add_zombie
1046        //
1047        // Adds address to the zombie set.
1048        //
1049        inline void add_zombie(void *address) {
1050            SpinLock lock(&_zombies_lock);
1051            if (_zombies.find(address) == _zombies.end()) {
1052                _zombies.insert(address);
1053            }
1054        }
1055
1056
1057        //
1058        // is_zombie
1059        //
1060        // Returns whether or not the address is in the zombie set.
1061        //
1062        inline bool is_zombie(void *address) {
1063            SpinLock lock(&_zombies_lock);
1064            PtrHashSet::iterator iter = _zombies.find(address);
1065            return (iter != _zombies.end());
1066        }
1067
1068        //
1069        // clear_zombies
1070        //
1071        inline void clear_zombies() {
1072            SpinLock lock(&_zombies_lock);
1073            _zombies.clear();
1074        }
1075
1076        //
1077        // zombify_internal
1078        //
1079        // Called by free_garbage() on blocks added to the zombie set.
1080        //
1081        // Assumes admin/large locks are held by the caller.
1082        //
1083        template <class BlockRef> void zombify_internal(BlockRef block) {
1084            erase_weak(block.address());
1085            // callback morphs the object into a zombie.
1086            if (control.resurrect) control.resurrect((auto_zone_t *)this, block.address());
1087            block.set_layout(AUTO_OBJECT_UNSCANNED);
1088            block.dec_refcount_no_lock();
1089        }
1090
1091        //
1092        // set_write_barrier
1093        //
1094        // Set the write barrier byte corresponding to the specified address.
1095        // If scanning is going on then the value is marked pending.
1096        //
1097        template <class DestBlock, class ValueBlock> void set_write_barrier(Thread &thread, DestBlock dest_block, const void **dest_addr, ValueBlock value_block, const void *value) {
1098            thread.track_local_assignment(dest_block, value_block);
1099
1100            UnconditionalBarrier barrier(thread.needs_enlivening());
1101            if (barrier) value_block.enliven();
1102            *dest_addr = value;
1103            // only need to mark the card if value can possibly be collected by generational
1104            if (value_block.is_thread_local() || value_block.is_new())
1105                dest_block.mark_card(dest_addr);
1106        }
1107
1108
1109        //
1110        // set_write_barrier_range
1111        //
1112        // Set the write barrier bytes corresponding to the specified address & length.
1113        // Returns if the address is within an allocated block (and barrier set)
1114        //
1115        bool set_write_barrier_range(void *address, const usword_t size);
1116
1117
1118        //
1119        // set_write_barrier
1120        //
1121        // Set the write barrier byte corresponding to the specified address.
1122        // Returns if the address is within an allocated block (and barrier set)
1123        //
1124        bool set_write_barrier(void *address);
1125
1126
1127        //
1128        // mark_write_barriers_untouched
1129        //
1130        // iterate through all the write barriers and mark the live cards as provisionally untouched.
1131        //
1132        void mark_write_barriers_untouched();
1133
1134
1135        //
1136        // clear_untouched_write_barriers
1137        //
1138        // iterate through all the write barriers and clear all the cards still marked as untouched.
1139        //
1140        void clear_untouched_write_barriers();
1141
1142
1143        //
1144        // clear_all_write_barriers
1145        //
1146        // iterate through all the write barriers and clear all the marks.
1147        //
1148        void clear_all_write_barriers();
1149
1150
1151        //
1152        // reset_all_marks
1153        //
1154        // Clears the mark flags on all blocks
1155        //
1156        void reset_all_marks();
1157
1158
1159        //
1160        // reset_all_pinned
1161        //
1162        // Clears the pinned bits on all blocks
1163        //
1164        void reset_all_pinned();
1165
1166
1167        inline void set_repair_write_barrier(bool repair) { _repair_write_barrier = repair; }
1168        inline bool repair_write_barrier() const { return _repair_write_barrier; }
1169
1170
1171        //
1172        // set_needs_enlivening
1173        //
1174        // Inform all known threads that scanning is about to commence, thus blocks will need to be
1175        // enlivened to make sure they aren't missed during concurrent scanning.
1176        //
1177        void set_needs_enlivening();
1178
1179        //
1180        // enlivening_barrier
1181        //
1182        // Called by Collector::scan_barrier() to enliven all blocks that
1183        // would otherwise be missed by concurrent scanning.
1184        //
1185        void enlivening_barrier();
1186
1187        //
1188        // clear_needs_enlivening
1189        //
1190        // Unblocks threads that may be spinning waiting for enlivening to finish.
1191        //
1192        void clear_needs_enlivening();
1193
1194
1195        //
1196        // collect_begin
1197        //
1198        // Indicate the beginning of the collection period.
1199        //
1200        void  collect_begin();
1201
1202
1203        //
1204        // collect_end
1205        //
1206        // Indicate the end of the collection period.
1207        //
1208        void  collect_end(CollectionTimer &timer, size_t bytes_collected);
1209
1210        //
1211        // purge_free_space
1212        //
1213        // Called in response to memory pressure to relinquish pages.
1214        //
1215        usword_t purge_free_space();
1216
1217        //
1218        // block_collector
1219        //
1220        // Called to lock the global mark bits and thread lists.
1221        // Returns true if successful.
1222        //
1223        bool block_collector();
1224
1225        //
1226        // unblock_collector
1227        //
1228        // Called to unlock the global mark bits and thread lists.
1229        //
1230        void unblock_collector();
1231
1232        //
1233        // collect
1234        //
1235        // Performs the collection process.
1236        //
1237        void collect(bool is_partial, void *current_stack_bottom, CollectionTimer &timer);
1238
1239        //
1240        // collect_partial
1241        //
1242        // Performs a partial (generational) collection.
1243        //
1244        void collect_partial(void *current_stack_bottom, CollectionTimer &timer);
1245
1246        //
1247        // collect_full
1248        //
1249        // Performs a full heap collection.
1250        //
1251        void collect_full(void *current_stack_bottom, CollectionTimer &timer);
1252
1253        //
1254        // analyze_heap
1255        //
1256        // Analyzes the compaction viability of the current heap.
1257        //
1258        void analyze_heap(const char *path);
1259
1260        //
1261        // compact_heap
1262        //
1263        // Compacts entire garbage collected heap.
1264        //
1265        void compact_heap();
1266
1267        //
1268        // disable_compaction
1269        //
1270        // Disables compaction permanently. If called during a compaction, blocks until the compaction finishes.
1271        //
1272        void disable_compaction();
1273
1274        //
1275        // incremental compaction support.
1276        //
1277        void set_in_compaction();
1278        void compaction_barrier();
1279        void clear_in_compaction();
1280
1281        //
1282        // scavenge_blocks
1283        //
1284        // Constructs a list of all blocks that are to be garbaged
1285        //
1286        void scavenge_blocks();
1287
1288        //
1289        // invalidate_garbage
1290        //
1291        // Given an array of garbage, do callouts for finalization
1292        //
1293        void invalidate_garbage(const size_t garbage_count, void *garbage[]);
1294
1295        //
1296        // handle_overretained_garbage
1297        //
1298        // called when we detect a garbage block has been over retained during finalization
1299        // logs a (fatal, based on the setting) resurrection error
1300        //
1301        void handle_overretained_garbage(void *block, int rc, auto_memory_type_t layout);
1302        template <class BlockRef> inline void handle_overretained_garbage(BlockRef block) {
1303            handle_overretained_garbage(block.address(), block.refcount(), block.layout());
1304        }
1305
1306        //
1307        // free_garbage
1308        //
1309        // Free subzone/large arrays of garbage, en-masse.
1310        //
1311        size_t free_garbage(const size_t subzone_garbage_count, void *subzone_garbage[],
1312                            const size_t large_garbage_count, void *large_garbage[],
1313                            size_t &blocks_freed, size_t &bytes_freed);
1314
1315        //
1316        // release_pages
1317        //
1318        // Release any pages that are not in use.
1319        //
1320        void release_pages() {
1321        }
1322
1323        //
1324        // recycle_threads
1325        //
1326        // Searches for unbound threads, queueing them for deletion.
1327        //
1328        void recycle_threads();
1329
1330        //
1331        // register_thread
1332        //
1333        // Add the current thread as a thread to be scanned during gc.
1334        //
1335        Thread &register_thread();
1336
1337
1338        //
1339        // unregister_thread
1340        //
1341        // deprecated
1342        //
1343        void unregister_thread();
1344
1345    private:
1346        Thread *firstScannableThread();
1347        Thread *nextScannableThread(Thread *thread);
1348
1349    public:
1350        //
1351        // scan_registered_threads
1352        //
1353        // Safely enumerates the registered threads, ensuring that their stacks
1354        // remain valid during the call to the scanner block.
1355        //
1356#ifdef __BLOCKS__
1357        typedef void (^thread_visitor_t) (Thread *thread);
1358#else
1359        class thread_visitor {
1360        public:
1361            virtual void operator() (Thread *thread) = 0;
1362        };
1363        typedef thread_visitor &thread_visitor_t;
1364#endif
1365        void scan_registered_threads(thread_visitor_t scanner);
1366        void scan_registered_threads(void (*visitor) (Thread *, void *), void *arg);
1367
1368        //
1369        // suspend_all_registered_threads
1370        //
1371        // Suspend all registered threads. Provided for heap snapshots.
1372        // Acquires _registered_threads_lock so that no new threads can enter the system.
1373        //
1374        void suspend_all_registered_threads();
1375
1376
1377        //
1378        // resume_all_registered_threads
1379        //
1380        // Resumes all suspended registered threads.  Only used by the monitor for heap snapshots.
1381        // Relinquishes the _registered_threads_lock.
1382        //
1383        void resume_all_registered_threads();
1384
1385
1386        //
1387        // perform_work_with_helper_threads
1388        //
1389        // Registers func as a work function. Threads will be recruited to call func repeatedly until it returns false.
1390        // func should perform a chunk of work and then return true if more work remains or false if all work is done.
1391        // The calling thread becomes a worker and does not return until all work is complete.
1392        //
1393        void perform_work_with_helper_threads(boolean_t (*work)(void *arg, boolean_t is_dedicated, boolean_t work_to_completion), void *arg);
1394
1395
1396        //
1397        // volunteer_for_work
1398        //
1399        // May be called by threads to volunteer to do work on the collector's behalf.
1400        // If work is available then a chunk of work is performed and the thread returns.
1401        // If no work is available then the call is a no-op.
1402        // Returns true if there is more work to be done, false if not.
1403        // The intent of this function is that threads can call it while waiting on a spin lock.
1404        //
1405        boolean_t volunteer_for_work(boolean_t work_to_completion) {
1406            if (_has_work > 0) return do_volunteer_for_work(false, work_to_completion);
1407            return false;
1408        }
1409
1410
1411        //
1412        // do_volunteer_for_work
1413        //
1414        // Helper function for volunteer_for_work(). This actually calls the work function.
1415        // If is_dedicated is true then the function loops until there is no more work.
1416        // If is_dedicated is false then the work function is called once.
1417        //
1418        boolean_t do_volunteer_for_work(boolean_t is_dedicated, boolean_t work_to_completion);
1419
1420
1421        //
1422        // worker_thread_loop
1423        //
1424        // Helper function for recruit_worker_threads() used to get worker threads from dispatch_apply_f.
1425        //
1426        static void worker_thread_loop(void *context, size_t step);
1427
1428
1429        //
1430        // weak references.
1431        //
1432        unsigned has_weak_references() { return (num_weak_refs != 0); }
1433
1434        //
1435        // layout_map_for_block.
1436        //
1437        // Used for precise (non-conservative) block scanning.
1438        //
1439        const unsigned char *layout_map_for_block(void *block) {
1440            // FIXME:  for speed, change this to a hard coded offset from the block's word0 field.
1441            return control.layout_for_address ? control.layout_for_address((auto_zone_t *)this, block) : NULL;
1442        }
1443
1444        //
1445        // weak_layout_map_for_block.
1446        //
1447        // Used for conservative block with weak references scanning.
1448        //
1449        const unsigned char *weak_layout_map_for_block(void *block) {
1450            // FIXME:  for speed, change this to a hard coded offset from the block's word0 field.
1451            return control.weak_layout_for_address ? control.weak_layout_for_address((auto_zone_t *)this, block) : NULL;
1452        }
1453
1454        //
1455        // name_for_object
1456        //
1457        // For blocks with AUTO_OBJECT layout, return a name for the object's type.
1458        //
1459        const char *name_for_object(void *object) {
1460            return control.name_for_object ? control.name_for_object((auto_zone_t *)this, object) : "";
1461        }
1462
1463        //
1464        // forward_block
1465        //
1466        // Forwards a block to a new location during compaction.
1467        //
1468        void *forward_block(Subzone *subzone, usword_t q, void *block);
1469
1470        //
1471        // move_block
1472        //
1473        // Moves the block into its new location (using the forwarding pointer).
1474        //
1475        void move_block(Subzone *subzone, usword_t q, void *block);
1476
1477        //
1478        // print_all_blocks
1479        //
1480        // Prints all allocated blocks.
1481        //
1482        void print_all_blocks();
1483
1484
1485        //
1486        // print block
1487        //
1488        // Print the details of a block
1489        //
1490        template <class BlockRef> void print_block(BlockRef block, const char *tag);
1491
1492        //
1493        // malloc_statistics
1494        //
1495        // computes the necessary malloc statistics
1496        //
1497        void malloc_statistics(malloc_statistics_t *stats);
1498
1499        //
1500        // should_collect
1501        //
1502        // Queries all registered resource trackers (including the internal allocation threshold)
1503        // to determine whether a collection should run.
1504        //
1505        boolean_t should_collect();
1506
1507        //
1508        // register_resource_tracker
1509        //
1510        // Register an external resource tracker. Refer to auto_zone_register_resource_tracker().
1511        //
1512        void register_resource_tracker(const char *description, boolean_t (^should_collect)(void));
1513
1514        //
1515        // register_resource_tracker
1516        //
1517        // Unregister an external resource tracker. Refer to auto_zone_register_resource_tracker().
1518        //
1519        void unregister_resource_tracker(const char *description);
1520
1521
1522        //
1523        // resource_tracker_wants_collection
1524        //
1525        // Poll the list of registered resources trackers asking if a collection should be triggered.
1526        // Returns true if some registered tracker indicates a collection is desired, false if none do.
1527        //
1528        boolean_t resource_tracker_wants_collection();
1529
1530        //
1531        // collection_checking_threshold
1532        //
1533        // Fetch the current collection checking threshold. 0 = collection checking disabled
1534        //
1535        inline uint32_t const collection_checking_enabled() { return _collection_checking_enabled != 0;}
1536
1537
1538        //
1539        // enable_collection_checking/disable_collection_checking
1540        //
1541        // Increment/decrement the collection checking enabled counter.
1542        // Collection checking is enabled when the counter is nonzero.
1543        //
1544        void enable_collection_checking();
1545        void disable_collection_checking();
1546
1547
1548        //
1549        // track_pointer
1550        //
1551        // Register a block for collection checking. This is a fast no-op if collection checking is disabled.
1552        //
1553        void track_pointer(void *pointer);
1554
1555
1556        //
1557        // increment_check_counts
1558        //
1559        // Increment the collection count for all blocks registered for collection checking.
1560        //
1561        void increment_check_counts();
1562
1563
1564        //
1565        // clear_garbage_checking_count
1566        //
1567        // Unregisters garbage blocks from collection checking.
1568        //
1569        void clear_garbage_checking_count(void **garbage, size_t count);
1570
1571        //
1572        // enumerate_uncollected
1573        //
1574        // Enumerates all allocated blocks and calls callback for those that are being tracked.
1575        //
1576        void enumerate_uncollected(auto_zone_collection_checking_callback_t callback);
1577
1578
1579#ifdef __BLOCKS__
1580        //
1581        // dump_zone
1582        //
1583        // call blocks with everything needed to recreate heap
1584        // blocks are called in the order given
1585        //
1586        void dump_zone(
1587            auto_zone_stack_dump stack_dump,
1588            auto_zone_register_dump register_dump,
1589            auto_zone_node_dump thread_local_node_dump,
1590            auto_zone_root_dump root_dump,
1591            auto_zone_node_dump global_node_dump,
1592            auto_zone_weak_dump weak_dump_entry
1593        );
1594
1595        //
1596        // visit_zone
1597        //
1598        // Used to enumerate all of the interesting data structures
1599        // of the Zone. Supersedes dump_zone().
1600        //
1601        void visit_zone(auto_zone_visitor_t *visitor);
1602#endif
1603
1604   };
1605
1606
1607};
1608
1609
1610#endif // __AUTO_ZONE_CORE__
1611