1214152Sed/*
2214152Sed * runtime.c
3214152Sed *
4214152Sed * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
5214152Sed * to any person obtaining a copy of this software and associated documentation
6214152Sed * files (the "Software"), to deal in the Software without restriction,
7214152Sed * including without limitation the rights to use, copy, modify, merge, publish,
8214152Sed * distribute, sublicense, and/or sell copies of the Software, and to permit
9214152Sed * persons to whom the Software is furnished to do so, subject to the following
10214152Sed * conditions:
11214152Sed *
12214152Sed * The above copyright notice and this permission notice shall be included in
13214152Sed * all copies or substantial portions of the Software.
14214152Sed *
15214152Sed * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16214152Sed * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17214152Sed * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18214152Sed * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19214152Sed * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20214152Sed * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21214152Sed * SOFTWARE.
22214152Sed *
23214152Sed */
24214152Sed
25214152Sed#include "Block_private.h"
26214152Sed#include <stdio.h>
27214152Sed#include <stdlib.h>
28214152Sed#include <string.h>
29214152Sed#include <stdint.h>
30214152Sed
31214152Sed#include "config.h"
32214152Sed
33214152Sed#ifdef HAVE_AVAILABILITY_MACROS_H
34214152Sed#include <AvailabilityMacros.h>
35214152Sed#endif /* HAVE_AVAILABILITY_MACROS_H */
36214152Sed
37214152Sed#ifdef HAVE_TARGET_CONDITIONALS_H
38214152Sed#include <TargetConditionals.h>
39214152Sed#endif /* HAVE_TARGET_CONDITIONALS_H */
40214152Sed
41214152Sed#if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
42214152Sed
43214152Sed#ifdef HAVE_LIBKERN_OSATOMIC_H
44214152Sed#include <libkern/OSAtomic.h>
45214152Sed#endif /* HAVE_LIBKERN_OSATOMIC_H */
46214152Sed
47214152Sed#elif defined(__WIN32__) || defined(_WIN32)
48214152Sed#define _CRT_SECURE_NO_WARNINGS 1
49214152Sed#include <windows.h>
50214152Sed
51214152Sedstatic __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
52214152Sed    /* fixme barrier is overkill -- see objc-os.h */
53214152Sed    long original = InterlockedCompareExchange(dst, newl, oldl);
54214152Sed    return (original == oldl);
55214152Sed}
56214152Sed
57214152Sedstatic __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
58214152Sed    /* fixme barrier is overkill -- see objc-os.h */
59214152Sed    int original = InterlockedCompareExchange(dst, newi, oldi);
60214152Sed    return (original == oldi);
61214152Sed}
62214152Sed
63214152Sed/*
64214152Sed * Check to see if the GCC atomic built-ins are available.  If we're on
65214152Sed * a 64-bit system, make sure we have an 8-byte atomic function
66214152Sed * available.
67214152Sed *
68214152Sed */
69214152Sed
70214152Sed#elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
71214152Sed
72214152Sedstatic __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
73214152Sed  return __sync_bool_compare_and_swap(dst, oldl, newl);
74214152Sed}
75214152Sed
76214152Sedstatic __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
77214152Sed  return __sync_bool_compare_and_swap(dst, oldi, newi);
78214152Sed}
79214152Sed
80214152Sed#else
81214152Sed#error unknown atomic compare-and-swap primitive
82214152Sed#endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
83214152Sed
84214152Sed
85214152Sed/*
86214152Sed * Globals:
87214152Sed */
88214152Sed
89214152Sedstatic void *_Block_copy_class = _NSConcreteMallocBlock;
90214152Sedstatic void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
91214152Sedstatic int _Block_copy_flag = BLOCK_NEEDS_FREE;
92214152Sedstatic int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
93214152Sed
94214152Sedstatic const int WANTS_ONE = (1 << 16);
95214152Sed
96214152Sedstatic bool isGC = false;
97214152Sed
98214152Sed/*
99214152Sed * Internal Utilities:
100214152Sed */
101214152Sed
102214152Sed#if 0
103214152Sedstatic unsigned long int latching_incr_long(unsigned long int *where) {
104214152Sed    while (1) {
105214152Sed        unsigned long int old_value = *(volatile unsigned long int *)where;
106214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
107214152Sed            return BLOCK_REFCOUNT_MASK;
108214152Sed        }
109214152Sed        if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
110214152Sed            return old_value+1;
111214152Sed        }
112214152Sed    }
113214152Sed}
114214152Sed#endif /* if 0 */
115214152Sed
116214152Sedstatic int latching_incr_int(int *where) {
117214152Sed    while (1) {
118214152Sed        int old_value = *(volatile int *)where;
119214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
120214152Sed            return BLOCK_REFCOUNT_MASK;
121214152Sed        }
122214152Sed        if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
123214152Sed            return old_value+1;
124214152Sed        }
125214152Sed    }
126214152Sed}
127214152Sed
128214152Sed#if 0
129214152Sedstatic int latching_decr_long(unsigned long int *where) {
130214152Sed    while (1) {
131214152Sed        unsigned long int old_value = *(volatile int *)where;
132214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
133214152Sed            return BLOCK_REFCOUNT_MASK;
134214152Sed        }
135214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
136214152Sed            return 0;
137214152Sed        }
138214152Sed        if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
139214152Sed            return old_value-1;
140214152Sed        }
141214152Sed    }
142214152Sed}
143214152Sed#endif /* if 0 */
144214152Sed
145214152Sedstatic int latching_decr_int(int *where) {
146214152Sed    while (1) {
147214152Sed        int old_value = *(volatile int *)where;
148214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
149214152Sed            return BLOCK_REFCOUNT_MASK;
150214152Sed        }
151214152Sed        if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
152214152Sed            return 0;
153214152Sed        }
154214152Sed        if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
155214152Sed            return old_value-1;
156214152Sed        }
157214152Sed    }
158214152Sed}
159214152Sed
160214152Sed
161214152Sed/*
162214152Sed * GC support stub routines:
163214152Sed */
164214152Sed#if 0
165214152Sed#pragma mark GC Support Routines
166214152Sed#endif /* if 0 */
167214152Sed
168214152Sed
169214152Sedstatic void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
170214152Sed    return malloc(size);
171214152Sed}
172214152Sed
173214152Sedstatic void _Block_assign_default(void *value, void **destptr) {
174214152Sed    *destptr = value;
175214152Sed}
176214152Sed
177214152Sedstatic void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
178214152Sed}
179214152Sed
180214152Sedstatic void _Block_do_nothing(const void *aBlock) { }
181214152Sed
182214152Sedstatic void _Block_retain_object_default(const void *ptr) {
183214152Sed    if (!ptr) return;
184214152Sed}
185214152Sed
186214152Sedstatic void _Block_release_object_default(const void *ptr) {
187214152Sed    if (!ptr) return;
188214152Sed}
189214152Sed
190214152Sedstatic void _Block_assign_weak_default(const void *ptr, void *dest) {
191214152Sed    *(void **)dest = (void *)ptr;
192214152Sed}
193214152Sed
194214152Sedstatic void _Block_memmove_default(void *dst, void *src, unsigned long size) {
195214152Sed    memmove(dst, src, (size_t)size);
196214152Sed}
197214152Sed
198214152Sedstatic void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
199214152Sed    void **destp = (void **)dest;
200214152Sed    void **srcp = (void **)src;
201214152Sed    while (size) {
202214152Sed        _Block_assign_default(*srcp, destp);
203214152Sed        destp++;
204214152Sed        srcp++;
205214152Sed        size -= sizeof(void *);
206214152Sed    }
207214152Sed}
208214152Sed
209214152Sed/*
210214152Sed * GC support callout functions - initially set to stub routines:
211214152Sed */
212214152Sed
213214152Sedstatic void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
214214152Sedstatic void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
215214152Sedstatic void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
216214152Sedstatic void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
217214152Sedstatic void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
218214152Sedstatic void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
219214152Sedstatic void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
220214152Sedstatic void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
221214152Sed
222214152Sed
223214152Sed/*
224214152Sed * GC support SPI functions - called from ObjC runtime and CoreFoundation:
225214152Sed */
226214152Sed
227214152Sed/* Public SPI
228214152Sed * Called from objc-auto to turn on GC.
229214152Sed * version 3, 4 arg, but changed 1st arg
230214152Sed */
231214152Sedvoid _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
232214152Sed                    void (*setHasRefcount)(const void *, const bool),
233214152Sed                    void (*gc_assign)(void *, void **),
234214152Sed                    void (*gc_assign_weak)(const void *, void *),
235214152Sed                    void (*gc_memmove)(void *, void *, unsigned long)) {
236214152Sed
237214152Sed    isGC = true;
238214152Sed    _Block_allocator = alloc;
239214152Sed    _Block_deallocator = _Block_do_nothing;
240214152Sed    _Block_assign = gc_assign;
241214152Sed    _Block_copy_flag = BLOCK_IS_GC;
242214152Sed    _Block_copy_class = _NSConcreteAutoBlock;
243214152Sed    /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
244214152Sed    _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
245214152Sed    _Block_setHasRefcount = setHasRefcount;
246214152Sed    _Byref_flag_initial_value = BLOCK_IS_GC;   // no refcount
247214152Sed    _Block_retain_object = _Block_do_nothing;
248214152Sed    _Block_release_object = _Block_do_nothing;
249214152Sed    _Block_assign_weak = gc_assign_weak;
250214152Sed    _Block_memmove = gc_memmove;
251214152Sed}
252214152Sed
253214152Sed/* transitional */
254214152Sedvoid _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
255214152Sed                    void (*setHasRefcount)(const void *, const bool),
256214152Sed                    void (*gc_assign)(void *, void **),
257214152Sed                    void (*gc_assign_weak)(const void *, void *)) {
258214152Sed    /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
259214152Sed    _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
260214152Sed}
261214152Sed
262214152Sed
263214152Sed/*
264214152Sed * Called from objc-auto to alternatively turn on retain/release.
265214152Sed * Prior to this the only "object" support we can provide is for those
266214152Sed * super special objects that live in libSystem, namely dispatch queues.
267214152Sed * Blocks and Block_byrefs have their own special entry points.
268214152Sed *
269214152Sed */
270214152Sedvoid _Block_use_RR( void (*retain)(const void *),
271214152Sed                    void (*release)(const void *)) {
272214152Sed    _Block_retain_object = retain;
273214152Sed    _Block_release_object = release;
274214152Sed}
275214152Sed
276214152Sed/*
277214152Sed * Internal Support routines for copying:
278214152Sed */
279214152Sed
280214152Sed#if 0
281214152Sed#pragma mark Copy/Release support
282214152Sed#endif /* if 0 */
283214152Sed
284214152Sed/* Copy, or bump refcount, of a block.  If really copying, call the copy helper if present. */
285214152Sedstatic void *_Block_copy_internal(const void *arg, const int flags) {
286214152Sed    struct Block_layout *aBlock;
287214152Sed    const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
288214152Sed
289214152Sed    //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
290214152Sed    if (!arg) return NULL;
291214152Sed
292214152Sed
293214152Sed    // The following would be better done as a switch statement
294214152Sed    aBlock = (struct Block_layout *)arg;
295214152Sed    if (aBlock->flags & BLOCK_NEEDS_FREE) {
296214152Sed        // latches on high
297214152Sed        latching_incr_int(&aBlock->flags);
298214152Sed        return aBlock;
299214152Sed    }
300214152Sed    else if (aBlock->flags & BLOCK_IS_GC) {
301214152Sed        // GC refcounting is expensive so do most refcounting here.
302214152Sed        if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
303214152Sed            // Tell collector to hang on this - it will bump the GC refcount version
304214152Sed            _Block_setHasRefcount(aBlock, true);
305214152Sed        }
306214152Sed        return aBlock;
307214152Sed    }
308214152Sed    else if (aBlock->flags & BLOCK_IS_GLOBAL) {
309214152Sed        return aBlock;
310214152Sed    }
311214152Sed
312214152Sed    // Its a stack block.  Make a copy.
313214152Sed    if (!isGC) {
314214152Sed        struct Block_layout *result = malloc(aBlock->descriptor->size);
315214152Sed        if (!result) return (void *)0;
316214152Sed        memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
317214152Sed        // reset refcount
318214152Sed        result->flags &= ~(BLOCK_REFCOUNT_MASK);    // XXX not needed
319214152Sed        result->flags |= BLOCK_NEEDS_FREE | 1;
320214152Sed        result->isa = _NSConcreteMallocBlock;
321214152Sed        if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
322214152Sed            //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
323214152Sed            (*aBlock->descriptor->copy)(result, aBlock); // do fixup
324214152Sed        }
325214152Sed        return result;
326214152Sed    }
327214152Sed    else {
328214152Sed        // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
329214152Sed        // This allows the copy helper routines to make non-refcounted block copies under GC
330214152Sed        unsigned long int flags = aBlock->flags;
331214152Sed        bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
332214152Sed        struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
333214152Sed        if (!result) return (void *)0;
334214152Sed        memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
335214152Sed        // reset refcount
336214152Sed        // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
337214152Sed        flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK);   // XXX not needed
338214152Sed        if (wantsOne)
339214152Sed            flags |= BLOCK_IS_GC | 1;
340214152Sed        else
341214152Sed            flags |= BLOCK_IS_GC;
342214152Sed        result->flags = flags;
343214152Sed        if (flags & BLOCK_HAS_COPY_DISPOSE) {
344214152Sed            //printf("calling block copy helper...\n");
345214152Sed            (*aBlock->descriptor->copy)(result, aBlock); // do fixup
346214152Sed        }
347214152Sed        if (hasCTOR) {
348214152Sed            result->isa = _NSConcreteFinalizingBlock;
349214152Sed        }
350214152Sed        else {
351214152Sed            result->isa = _NSConcreteAutoBlock;
352214152Sed        }
353214152Sed        return result;
354214152Sed    }
355214152Sed}
356214152Sed
357214152Sed
358214152Sed/*
359214152Sed * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
360214152Sed *
361214152Sed * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
362214152Sed * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
363214152Sed * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
364214152Sed * Otherwise we need to copy it and update the stack forwarding pointer
365214152Sed * XXX We need to account for weak/nonretained read-write barriers.
366214152Sed */
367214152Sed
368214152Sedstatic void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
369214152Sed    struct Block_byref **destp = (struct Block_byref **)dest;
370214152Sed    struct Block_byref *src = (struct Block_byref *)arg;
371214152Sed
372214152Sed    //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
373214152Sed    //printf("src dump: %s\n", _Block_byref_dump(src));
374214152Sed    if (src->forwarding->flags & BLOCK_IS_GC) {
375214152Sed        ;   // don't need to do any more work
376214152Sed    }
377214152Sed    else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
378214152Sed        //printf("making copy\n");
379214152Sed        // src points to stack
380214152Sed        bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
381214152Sed        // if its weak ask for an object (only matters under GC)
382214152Sed        struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
383214152Sed        copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
384214152Sed        copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
385214152Sed        src->forwarding = copy;  // patch stack to point to heap copy
386214152Sed        copy->size = src->size;
387214152Sed        if (isWeak) {
388214152Sed            copy->isa = &_NSConcreteWeakBlockVariable;  // mark isa field so it gets weak scanning
389214152Sed        }
390214152Sed        if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
391214152Sed            // Trust copy helper to copy everything of interest
392214152Sed            // If more than one field shows up in a byref block this is wrong XXX
393214152Sed            copy->byref_keep = src->byref_keep;
394214152Sed            copy->byref_destroy = src->byref_destroy;
395214152Sed            (*src->byref_keep)(copy, src);
396214152Sed        }
397214152Sed        else {
398214152Sed            // just bits.  Blast 'em using _Block_memmove in case they're __strong
399214152Sed            _Block_memmove(
400214152Sed                (void *)&copy->byref_keep,
401214152Sed                (void *)&src->byref_keep,
402214152Sed                src->size - sizeof(struct Block_byref_header));
403214152Sed        }
404214152Sed    }
405214152Sed    // already copied to heap
406214152Sed    else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
407214152Sed        latching_incr_int(&src->forwarding->flags);
408214152Sed    }
409214152Sed    // assign byref data block pointer into new Block
410214152Sed    _Block_assign(src->forwarding, (void **)destp);
411214152Sed}
412214152Sed
413214152Sed// Old compiler SPI
414214152Sedstatic void _Block_byref_release(const void *arg) {
415214152Sed    struct Block_byref *shared_struct = (struct Block_byref *)arg;
416214152Sed    int refcount;
417214152Sed
418214152Sed    // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
419214152Sed    shared_struct = shared_struct->forwarding;
420214152Sed
421214152Sed    //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
422214152Sed    // To support C++ destructors under GC we arrange for there to be a finalizer for this
423214152Sed    // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
424214152Sed    if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
425214152Sed        return; // stack or GC or global
426214152Sed    }
427214152Sed    refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
428214152Sed    if (refcount <= 0) {
429214152Sed        printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
430214152Sed    }
431214152Sed    else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
432214152Sed        //printf("disposing of heap based byref block\n");
433214152Sed        if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
434214152Sed            //printf("calling out to helper\n");
435214152Sed            (*shared_struct->byref_destroy)(shared_struct);
436214152Sed        }
437214152Sed        _Block_deallocator((struct Block_layout *)shared_struct);
438214152Sed    }
439214152Sed}
440214152Sed
441214152Sed
442214152Sed/*
443214152Sed *
444214152Sed * API supporting SPI
445214152Sed * _Block_copy, _Block_release, and (old) _Block_destroy
446214152Sed *
447214152Sed */
448214152Sed
449214152Sed#if 0
450214152Sed#pragma mark SPI/API
451214152Sed#endif /* if 0 */
452214152Sed
453214152Sedvoid *_Block_copy(const void *arg) {
454214152Sed    return _Block_copy_internal(arg, WANTS_ONE);
455214152Sed}
456214152Sed
457214152Sed
458214152Sed// API entry point to release a copied Block
459214152Sedvoid _Block_release(void *arg) {
460214152Sed    struct Block_layout *aBlock = (struct Block_layout *)arg;
461214152Sed    int32_t newCount;
462214152Sed    if (!aBlock) return;
463214152Sed    newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
464214152Sed    if (newCount > 0) return;
465214152Sed    // Hit zero
466214152Sed    if (aBlock->flags & BLOCK_IS_GC) {
467214152Sed        // Tell GC we no longer have our own refcounts.  GC will decr its refcount
468214152Sed        // and unless someone has done a CFRetain or marked it uncollectable it will
469214152Sed        // now be subject to GC reclamation.
470214152Sed        _Block_setHasRefcount(aBlock, false);
471214152Sed    }
472214152Sed    else if (aBlock->flags & BLOCK_NEEDS_FREE) {
473214152Sed        if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
474214152Sed        _Block_deallocator(aBlock);
475214152Sed    }
476214152Sed    else if (aBlock->flags & BLOCK_IS_GLOBAL) {
477214152Sed        ;
478214152Sed    }
479214152Sed    else {
480214152Sed        printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
481214152Sed    }
482214152Sed}
483214152Sed
484214152Sed
485214152Sed
486214152Sed// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
487214152Sedstatic void _Block_destroy(const void *arg) {
488214152Sed    struct Block_layout *aBlock;
489214152Sed    if (!arg) return;
490214152Sed    aBlock = (struct Block_layout *)arg;
491214152Sed    if (aBlock->flags & BLOCK_IS_GC) {
492214152Sed        // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
493214152Sed        return; // ignore, we are being called because of a DTOR
494214152Sed    }
495214152Sed    _Block_release(aBlock);
496214152Sed}
497214152Sed
498214152Sed
499214152Sed
500214152Sed/*
501214152Sed *
502214152Sed * SPI used by other layers
503214152Sed *
504214152Sed */
505214152Sed
506214152Sed// SPI, also internal.  Called from NSAutoBlock only under GC
507214152Sedvoid *_Block_copy_collectable(const void *aBlock) {
508214152Sed    return _Block_copy_internal(aBlock, 0);
509214152Sed}
510214152Sed
511214152Sed
512214152Sed// SPI
513214152Sedunsigned long int Block_size(void *arg) {
514214152Sed    return ((struct Block_layout *)arg)->descriptor->size;
515214152Sed}
516214152Sed
517214152Sed
518214152Sed#if 0
519214152Sed#pragma mark Compiler SPI entry points
520214152Sed#endif /* if 0 */
521214152Sed
522214152Sed
523214152Sed/*******************************************************
524214152Sed
525214152SedEntry points used by the compiler - the real API!
526214152Sed
527214152Sed
528214152SedA Block can reference four different kinds of things that require help when the Block is copied to the heap.
529214152Sed1) C++ stack based objects
530214152Sed2) References to Objective-C objects
531214152Sed3) Other Blocks
532214152Sed4) __block variables
533214152Sed
534214152SedIn these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers.  The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign.  The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
535214152Sed
536214152SedThe flags parameter of _Block_object_assign and _Block_object_dispose is set to
537214152Sed	* BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
538214152Sed	* BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
539214152Sed	* BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
540214152SedIf the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
541214152Sed
542214152SedSo the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
543214152Sed
544214152SedWhen  a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions.  Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor.  And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
545214152Sed
546214152SedSo the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
547214152Sed	__block id                   128+3
548214152Sed        __weak block id              128+3+16
549214152Sed	__block (^Block)             128+7
550214152Sed	__weak __block (^Block)      128+7+16
551214152Sed
552214152SedThe implementation of the two routines would be improved by switch statements enumerating the eight cases.
553214152Sed
554214152Sed********************************************************/
555214152Sed
556214152Sed/*
557214152Sed * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
558214152Sed * to do the assignment.
559214152Sed */
560214152Sedvoid _Block_object_assign(void *destAddr, const void *object, const int flags) {
561214152Sed    //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
562214152Sed    if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
563214152Sed        if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
564214152Sed            _Block_assign_weak(object, destAddr);
565214152Sed        }
566214152Sed        else {
567214152Sed            // do *not* retain or *copy* __block variables whatever they are
568214152Sed            _Block_assign((void *)object, destAddr);
569214152Sed        }
570214152Sed    }
571214152Sed    else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF)  {
572214152Sed        // copying a __block reference from the stack Block to the heap
573214152Sed        // flags will indicate if it holds a __weak reference and needs a special isa
574214152Sed        _Block_byref_assign_copy(destAddr, object, flags);
575214152Sed    }
576214152Sed    // (this test must be before next one)
577214152Sed    else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
578214152Sed        // copying a Block declared variable from the stack Block to the heap
579214152Sed        _Block_assign(_Block_copy_internal(object, flags), destAddr);
580214152Sed    }
581214152Sed    // (this test must be after previous one)
582214152Sed    else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
583214152Sed        //printf("retaining object at %p\n", object);
584214152Sed        _Block_retain_object(object);
585214152Sed        //printf("done retaining object at %p\n", object);
586214152Sed        _Block_assign((void *)object, destAddr);
587214152Sed    }
588214152Sed}
589214152Sed
590214152Sed// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
591214152Sed// to help dispose of the contents
592214152Sed// Used initially only for __attribute__((NSObject)) marked pointers.
593214152Sedvoid _Block_object_dispose(const void *object, const int flags) {
594214152Sed    //printf("_Block_object_dispose(%p, %x)\n", object, flags);
595214152Sed    if (flags & BLOCK_FIELD_IS_BYREF)  {
596214152Sed        // get rid of the __block data structure held in a Block
597214152Sed        _Block_byref_release(object);
598214152Sed    }
599214152Sed    else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
600214152Sed        // get rid of a referenced Block held by this Block
601214152Sed        // (ignore __block Block variables, compiler doesn't need to call us)
602214152Sed        _Block_destroy(object);
603214152Sed    }
604214152Sed    else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
605214152Sed        // get rid of a referenced object held by this Block
606214152Sed        // (ignore __block object variables, compiler doesn't need to call us)
607214152Sed        _Block_release_object(object);
608214152Sed    }
609214152Sed}
610214152Sed
611214152Sed
612214152Sed/*
613214152Sed * Debugging support:
614214152Sed */
615214152Sed#if 0
616214152Sed#pragma mark Debugging
617214152Sed#endif /* if 0 */
618214152Sed
619214152Sed
620214152Sedconst char *_Block_dump(const void *block) {
621214152Sed    struct Block_layout *closure = (struct Block_layout *)block;
622214152Sed    static char buffer[512];
623214152Sed    char *cp = buffer;
624214152Sed    if (closure == NULL) {
625214152Sed        sprintf(cp, "NULL passed to _Block_dump\n");
626214152Sed        return buffer;
627214152Sed    }
628214152Sed    if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
629214152Sed        printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
630214152Sed        exit(1);
631214152Sed    }
632214152Sed    cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
633214152Sed    if (closure->isa == NULL) {
634214152Sed        cp += sprintf(cp, "isa: NULL\n");
635214152Sed    }
636214152Sed    else if (closure->isa == _NSConcreteStackBlock) {
637214152Sed        cp += sprintf(cp, "isa: stack Block\n");
638214152Sed    }
639214152Sed    else if (closure->isa == _NSConcreteMallocBlock) {
640214152Sed        cp += sprintf(cp, "isa: malloc heap Block\n");
641214152Sed    }
642214152Sed    else if (closure->isa == _NSConcreteAutoBlock) {
643214152Sed        cp += sprintf(cp, "isa: GC heap Block\n");
644214152Sed    }
645214152Sed    else if (closure->isa == _NSConcreteGlobalBlock) {
646214152Sed        cp += sprintf(cp, "isa: global Block\n");
647214152Sed    }
648214152Sed    else if (closure->isa == _NSConcreteFinalizingBlock) {
649214152Sed        cp += sprintf(cp, "isa: finalizing Block\n");
650214152Sed    }
651214152Sed    else {
652214152Sed        cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
653214152Sed    }
654214152Sed    cp += sprintf(cp, "flags:");
655214152Sed    if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
656214152Sed        cp += sprintf(cp, " HASDESCRIPTOR");
657214152Sed    }
658214152Sed    if (closure->flags & BLOCK_NEEDS_FREE) {
659214152Sed        cp += sprintf(cp, " FREEME");
660214152Sed    }
661214152Sed    if (closure->flags & BLOCK_IS_GC) {
662214152Sed        cp += sprintf(cp, " ISGC");
663214152Sed    }
664214152Sed    if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
665214152Sed        cp += sprintf(cp, " HASHELP");
666214152Sed    }
667214152Sed    if (closure->flags & BLOCK_HAS_CTOR) {
668214152Sed        cp += sprintf(cp, " HASCTOR");
669214152Sed    }
670214152Sed    cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
671214152Sed    cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
672214152Sed    {
673214152Sed        struct Block_descriptor *dp = closure->descriptor;
674214152Sed        cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
675214152Sed        cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
676214152Sed        cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
677214152Sed
678214152Sed        if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
679214152Sed            cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
680214152Sed            cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
681214152Sed        }
682214152Sed    }
683214152Sed    return buffer;
684214152Sed}
685214152Sed
686214152Sed
687214152Sedconst char *_Block_byref_dump(struct Block_byref *src) {
688214152Sed    static char buffer[256];
689214152Sed    char *cp = buffer;
690214152Sed    cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
691214152Sed    cp += sprintf(cp, "  forwarding: %p\n", (void *)src->forwarding);
692214152Sed    cp += sprintf(cp, "  flags: 0x%x\n", src->flags);
693214152Sed    cp += sprintf(cp, "  size: %d\n", src->size);
694214152Sed    if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
695214152Sed        cp += sprintf(cp, "  copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
696214152Sed        cp += sprintf(cp, "  dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
697214152Sed    }
698214152Sed    return buffer;
699214152Sed}
700214152Sed
701