1/**********************************************************************
2
3  gc.c -
4
5  $Author: nagachika $
6  created at: Tue Oct  5 09:44:46 JST 1993
7
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000  Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#include "ruby/ruby.h"
15#include "ruby/st.h"
16#include "ruby/re.h"
17#include "ruby/io.h"
18#include "ruby/thread.h"
19#include "ruby/util.h"
20#include "eval_intern.h"
21#include "vm_core.h"
22#include "internal.h"
23#include "gc.h"
24#include "constant.h"
25#include "ruby_atomic.h"
26#include "probes.h"
27#include <stdio.h>
28#include <setjmp.h>
29#include <sys/types.h>
30#include <assert.h>
31
32#ifdef HAVE_SYS_TIME_H
33#include <sys/time.h>
34#endif
35
36#ifdef HAVE_SYS_RESOURCE_H
37#include <sys/resource.h>
38#endif
39#if defined(__native_client__) && defined(NACL_NEWLIB)
40# include "nacl/resource.h"
41# undef HAVE_POSIX_MEMALIGN
42# undef HAVE_MEMALIGN
43
44#endif
45
46#if defined _WIN32 || defined __CYGWIN__
47#include <windows.h>
48#elif defined(HAVE_POSIX_MEMALIGN)
49#elif defined(HAVE_MEMALIGN)
50#include <malloc.h>
51#endif
52
53#ifdef HAVE_VALGRIND_MEMCHECK_H
54# include <valgrind/memcheck.h>
55# ifndef VALGRIND_MAKE_MEM_DEFINED
56#  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
57# endif
58# ifndef VALGRIND_MAKE_MEM_UNDEFINED
59#  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
60# endif
61#else
62# define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
63# define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
64#endif
65
66#define rb_setjmp(env) RUBY_SETJMP(env)
67#define rb_jmp_buf rb_jmpbuf_t
68
69#ifndef GC_MALLOC_LIMIT
70#define GC_MALLOC_LIMIT 8000000
71#endif
72#define HEAP_MIN_SLOTS 10000
73#define FREE_MIN  4096
74
75typedef struct {
76    unsigned int initial_malloc_limit;
77    unsigned int initial_heap_min_slots;
78    unsigned int initial_free_min;
79#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
80    int gc_stress;
81#endif
82} ruby_gc_params_t;
83
84static ruby_gc_params_t initial_params = {
85    GC_MALLOC_LIMIT,
86    HEAP_MIN_SLOTS,
87    FREE_MIN,
88#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
89    FALSE,
90#endif
91};
92
93#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
94
95#ifndef GC_PROFILE_MORE_DETAIL
96#define GC_PROFILE_MORE_DETAIL 0
97#endif
98
99typedef struct gc_profile_record {
100    double gc_time;
101    double gc_invoke_time;
102
103    size_t heap_total_objects;
104    size_t heap_use_size;
105    size_t heap_total_size;
106
107    int is_marked;
108
109#if GC_PROFILE_MORE_DETAIL
110    double gc_mark_time;
111    double gc_sweep_time;
112
113    size_t heap_use_slots;
114    size_t heap_live_objects;
115    size_t heap_free_objects;
116
117    int have_finalize;
118
119    size_t allocate_increase;
120    size_t allocate_limit;
121#endif
122} gc_profile_record;
123
124#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
125#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
126#endif
127
128typedef struct RVALUE {
129    union {
130	struct {
131	    VALUE flags;		/* always 0 for freed obj */
132	    struct RVALUE *next;
133	} free;
134	struct RBasic  basic;
135	struct RObject object;
136	struct RClass  klass;
137	struct RFloat  flonum;
138	struct RString string;
139	struct RArray  array;
140	struct RRegexp regexp;
141	struct RHash   hash;
142	struct RData   data;
143	struct RTypedData   typeddata;
144	struct RStruct rstruct;
145	struct RBignum bignum;
146	struct RFile   file;
147	struct RNode   node;
148	struct RMatch  match;
149	struct RRational rational;
150	struct RComplex complex;
151    } as;
152#ifdef GC_DEBUG
153    const char *file;
154    int   line;
155#endif
156} RVALUE;
157
158#if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
159#pragma pack(pop)
160#endif
161
162struct heaps_slot {
163    struct heaps_header *header;
164    uintptr_t *bits;
165    RVALUE *freelist;
166    struct heaps_slot *next;
167    struct heaps_slot *prev;
168    struct heaps_slot *free_next;
169};
170
171struct heaps_header {
172    struct heaps_slot *base;
173    uintptr_t *bits;
174    RVALUE *start;
175    RVALUE *end;
176    size_t limit;
177};
178
179struct heaps_free_bitmap {
180    struct heaps_free_bitmap *next;
181};
182
183struct gc_list {
184    VALUE *varptr;
185    struct gc_list *next;
186};
187
188#define STACK_CHUNK_SIZE 500
189
190typedef struct stack_chunk {
191    VALUE data[STACK_CHUNK_SIZE];
192    struct stack_chunk *next;
193} stack_chunk_t;
194
195typedef struct mark_stack {
196    stack_chunk_t *chunk;
197    stack_chunk_t *cache;
198    size_t index;
199    size_t limit;
200    size_t cache_size;
201    size_t unused_cache_size;
202} mark_stack_t;
203
204#ifndef CALC_EXACT_MALLOC_SIZE
205#define CALC_EXACT_MALLOC_SIZE 0
206#endif
207
208typedef struct rb_objspace {
209    struct {
210	size_t limit;
211	size_t increase;
212#if CALC_EXACT_MALLOC_SIZE
213	size_t allocated_size;
214	size_t allocations;
215#endif
216    } malloc_params;
217    struct {
218	size_t increment;
219	struct heaps_slot *ptr;
220	struct heaps_slot *sweep_slots;
221	struct heaps_slot *free_slots;
222	struct heaps_header **sorted;
223	size_t length;
224	size_t used;
225        struct heaps_free_bitmap *free_bitmap;
226	RVALUE *range[2];
227	struct heaps_header *freed;
228	size_t marked_num;
229	size_t free_num;
230	size_t free_min;
231	size_t final_num;
232	size_t do_heap_free;
233    } heap;
234    struct {
235	int dont_gc;
236	int dont_lazy_sweep;
237	int during_gc;
238	rb_atomic_t finalizing;
239    } flags;
240    struct {
241	st_table *table;
242	RVALUE *deferred;
243    } final;
244    mark_stack_t mark_stack;
245    struct {
246	int run;
247	gc_profile_record *record;
248	size_t count;
249	size_t size;
250	double invoke_time;
251    } profile;
252    struct gc_list *global_list;
253    size_t count;
254    size_t total_allocated_object_num;
255    size_t total_freed_object_num;
256    int gc_stress;
257
258    struct mark_func_data_struct {
259	void *data;
260	void (*mark_func)(VALUE v, void *data);
261    } *mark_func_data;
262} rb_objspace_t;
263
264#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
265#define rb_objspace (*GET_VM()->objspace)
266#define ruby_initial_gc_stress	initial_params.gc_stress
267int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
268#else
269static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
270int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
271#endif
272#define malloc_limit		objspace->malloc_params.limit
273#define malloc_increase 	objspace->malloc_params.increase
274#define heaps			objspace->heap.ptr
275#define heaps_length		objspace->heap.length
276#define heaps_used		objspace->heap.used
277#define lomem			objspace->heap.range[0]
278#define himem			objspace->heap.range[1]
279#define heaps_inc		objspace->heap.increment
280#define heaps_freed		objspace->heap.freed
281#define dont_gc 		objspace->flags.dont_gc
282#define during_gc		objspace->flags.during_gc
283#define finalizing		objspace->flags.finalizing
284#define finalizer_table 	objspace->final.table
285#define deferred_final_list	objspace->final.deferred
286#define global_List		objspace->global_list
287#define ruby_gc_stress		objspace->gc_stress
288#define initial_malloc_limit	initial_params.initial_malloc_limit
289#define initial_heap_min_slots	initial_params.initial_heap_min_slots
290#define initial_free_min	initial_params.initial_free_min
291
292#define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
293
294#if SIZEOF_LONG == SIZEOF_VOIDP
295# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
296# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
297#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
298# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
299# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
300   ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
301#else
302# error not supported
303#endif
304
305#define RANY(o) ((RVALUE*)(o))
306#define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
307
308#define HEAP_HEADER(p) ((struct heaps_header *)(p))
309#define GET_HEAP_HEADER(x) (HEAP_HEADER((uintptr_t)(x) & ~(HEAP_ALIGN_MASK)))
310#define GET_HEAP_SLOT(x) (GET_HEAP_HEADER(x)->base)
311#define GET_HEAP_BITMAP(x) (GET_HEAP_HEADER(x)->bits)
312#define NUM_IN_SLOT(p) (((uintptr_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
313#define BITMAP_INDEX(p) (NUM_IN_SLOT(p) / (sizeof(uintptr_t) * CHAR_BIT))
314#define BITMAP_OFFSET(p) (NUM_IN_SLOT(p) & ((sizeof(uintptr_t) * CHAR_BIT)-1))
315#define MARKED_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] & ((uintptr_t)1 << BITMAP_OFFSET(p)))
316
317#ifndef HEAP_ALIGN_LOG
318/* default tiny heap size: 16KB */
319#define HEAP_ALIGN_LOG 14
320#endif
321
322#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
323
324enum {
325    HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
326    HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
327    REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
328    HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
329    HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
330    HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), sizeof(uintptr_t) * CHAR_BIT)
331};
332
333int ruby_gc_debug_indent = 0;
334VALUE rb_mGC;
335extern st_table *rb_class_tbl;
336int ruby_disable_gc_stress = 0;
337
338static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
339static VALUE define_final0(VALUE obj, VALUE block);
340VALUE rb_define_final(VALUE obj, VALUE block);
341VALUE rb_undefine_final(VALUE obj);
342static void run_final(rb_objspace_t *objspace, VALUE obj);
343static void initial_expand_heap(rb_objspace_t *objspace);
344
345static void negative_size_allocation_error(const char *);
346static void *aligned_malloc(size_t, size_t);
347static void aligned_free(void *);
348
349static void init_mark_stack(mark_stack_t *stack);
350
351static VALUE lazy_sweep_enable(void);
352static int garbage_collect(rb_objspace_t *);
353static int gc_prepare_free_objects(rb_objspace_t *);
354static void mark_tbl(rb_objspace_t *, st_table *);
355static void rest_sweep(rb_objspace_t *);
356static void gc_mark_stacked_objects(rb_objspace_t *);
357
358static double getrusage_time(void);
359static inline void gc_prof_timer_start(rb_objspace_t *);
360static inline void gc_prof_timer_stop(rb_objspace_t *, int);
361static inline void gc_prof_mark_timer_start(rb_objspace_t *);
362static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
363static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
364static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
365static inline void gc_prof_set_malloc_info(rb_objspace_t *);
366
367
368/*
369  --------------------------- ObjectSpace -----------------------------
370*/
371
372#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
373rb_objspace_t *
374rb_objspace_alloc(void)
375{
376    rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
377    memset(objspace, 0, sizeof(*objspace));
378    malloc_limit = initial_malloc_limit;
379    ruby_gc_stress = ruby_initial_gc_stress;
380
381    return objspace;
382}
383#endif
384
385#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
386static void free_stack_chunks(mark_stack_t *);
387
388void
389rb_objspace_free(rb_objspace_t *objspace)
390{
391    rest_sweep(objspace);
392    if (objspace->profile.record) {
393	free(objspace->profile.record);
394	objspace->profile.record = 0;
395    }
396    if (global_List) {
397	struct gc_list *list, *next;
398	for (list = global_List; list; list = next) {
399	    next = list->next;
400	    xfree(list);
401	}
402    }
403    if (objspace->heap.free_bitmap) {
404        struct heaps_free_bitmap *list, *next;
405        for (list = objspace->heap.free_bitmap; list; list = next) {
406            next = list->next;
407            free(list);
408        }
409    }
410    if (objspace->heap.sorted) {
411	size_t i;
412	for (i = 0; i < heaps_used; ++i) {
413            free(objspace->heap.sorted[i]->bits);
414	    aligned_free(objspace->heap.sorted[i]);
415	}
416	free(objspace->heap.sorted);
417	heaps_used = 0;
418	heaps = 0;
419    }
420    free_stack_chunks(&objspace->mark_stack);
421    free(objspace);
422}
423#endif
424
425void
426rb_global_variable(VALUE *var)
427{
428    rb_gc_register_address(var);
429}
430
431static void
432allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
433{
434    struct heaps_header **p;
435    struct heaps_free_bitmap *bits;
436    size_t size, add, i;
437
438    size = next_heaps_length*sizeof(struct heaps_header *);
439    add = next_heaps_length - heaps_used;
440
441    if (heaps_used > 0) {
442	p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
443	if (p) objspace->heap.sorted = p;
444    }
445    else {
446	p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
447    }
448
449    if (p == 0) {
450	during_gc = 0;
451	rb_memerror();
452    }
453
454    for (i = 0; i < add; i++) {
455        bits = (struct heaps_free_bitmap *)malloc(HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
456        if (bits == 0) {
457            during_gc = 0;
458            rb_memerror();
459            return;
460        }
461        bits->next = objspace->heap.free_bitmap;
462        objspace->heap.free_bitmap = bits;
463    }
464}
465
466static void
467link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
468{
469    slot->free_next = objspace->heap.free_slots;
470    objspace->heap.free_slots = slot;
471}
472
473static void
474unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
475{
476    objspace->heap.free_slots = slot->free_next;
477    slot->free_next = NULL;
478}
479
480static void
481assign_heap_slot(rb_objspace_t *objspace)
482{
483    RVALUE *p, *pend, *membase;
484    struct heaps_slot *slot;
485    size_t hi, lo, mid;
486    size_t objs;
487
488    objs = HEAP_OBJ_LIMIT;
489    p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
490    if (p == 0) {
491	during_gc = 0;
492	rb_memerror();
493    }
494    slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
495    if (slot == 0) {
496       aligned_free(p);
497       during_gc = 0;
498       rb_memerror();
499    }
500    MEMZERO((void*)slot, struct heaps_slot, 1);
501
502    slot->next = heaps;
503    if (heaps) heaps->prev = slot;
504    heaps = slot;
505
506    membase = p;
507    p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
508    if ((VALUE)p % sizeof(RVALUE) != 0) {
509       p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
510       objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
511    }
512
513    lo = 0;
514    hi = heaps_used;
515    while (lo < hi) {
516	register RVALUE *mid_membase;
517	mid = (lo + hi) / 2;
518	mid_membase = (RVALUE *)objspace->heap.sorted[mid];
519	if (mid_membase < membase) {
520	    lo = mid + 1;
521	}
522	else if (mid_membase > membase) {
523	    hi = mid;
524	}
525	else {
526	    rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
527	}
528    }
529    if (hi < heaps_used) {
530	MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
531    }
532    heaps->header = (struct heaps_header *)membase;
533    objspace->heap.sorted[hi] = heaps->header;
534    objspace->heap.sorted[hi]->start = p;
535    objspace->heap.sorted[hi]->end = (p + objs);
536    objspace->heap.sorted[hi]->base = heaps;
537    objspace->heap.sorted[hi]->limit = objs;
538    assert(objspace->heap.free_bitmap != NULL);
539    heaps->bits = (uintptr_t *)objspace->heap.free_bitmap;
540    objspace->heap.sorted[hi]->bits = (uintptr_t *)objspace->heap.free_bitmap;
541    objspace->heap.free_bitmap = objspace->heap.free_bitmap->next;
542    memset(heaps->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
543    pend = p + objs;
544    if (lomem == 0 || lomem > p) lomem = p;
545    if (himem < pend) himem = pend;
546    heaps_used++;
547
548    while (p < pend) {
549	p->as.free.flags = 0;
550	p->as.free.next = heaps->freelist;
551	heaps->freelist = p;
552	p++;
553    }
554    link_free_heap_slot(objspace, heaps);
555}
556
557static void
558add_heap_slots(rb_objspace_t *objspace, size_t add)
559{
560    size_t i;
561    size_t next_heaps_length;
562
563    next_heaps_length = heaps_used + add;
564
565    if (next_heaps_length > heaps_length) {
566        allocate_sorted_heaps(objspace, next_heaps_length);
567        heaps_length = next_heaps_length;
568    }
569
570    for (i = 0; i < add; i++) {
571        assign_heap_slot(objspace);
572    }
573    heaps_inc = 0;
574}
575
576static void
577init_heap(rb_objspace_t *objspace)
578{
579    add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
580    init_mark_stack(&objspace->mark_stack);
581
582#ifdef USE_SIGALTSTACK
583    {
584	/* altstack of another threads are allocated in another place */
585	rb_thread_t *th = GET_THREAD();
586	void *tmp = th->altstack;
587	th->altstack = malloc(rb_sigaltstack_size());
588	free(tmp); /* free previously allocated area */
589    }
590#endif
591
592    objspace->profile.invoke_time = getrusage_time();
593    finalizer_table = st_init_numtable();
594}
595
596static void
597initial_expand_heap(rb_objspace_t *objspace)
598{
599    size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
600
601    if (min_size > heaps_used) {
602        add_heap_slots(objspace, min_size - heaps_used);
603    }
604}
605
606static void
607set_heaps_increment(rb_objspace_t *objspace)
608{
609    size_t next_heaps_length = (size_t)(heaps_used * 1.8);
610
611    if (next_heaps_length == heaps_used) {
612        next_heaps_length++;
613    }
614
615    heaps_inc = next_heaps_length - heaps_used;
616
617    if (next_heaps_length > heaps_length) {
618	allocate_sorted_heaps(objspace, next_heaps_length);
619        heaps_length = next_heaps_length;
620    }
621}
622
623static int
624heaps_increment(rb_objspace_t *objspace)
625{
626    if (heaps_inc > 0) {
627        assign_heap_slot(objspace);
628	heaps_inc--;
629	return TRUE;
630    }
631    return FALSE;
632}
633
634static VALUE
635newobj(VALUE klass, VALUE flags)
636{
637    rb_objspace_t *objspace = &rb_objspace;
638    VALUE obj;
639
640    if (UNLIKELY(during_gc)) {
641	dont_gc = 1;
642	during_gc = 0;
643	rb_bug("object allocation during garbage collection phase");
644    }
645
646    if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
647	if (!garbage_collect(objspace)) {
648	    during_gc = 0;
649	    rb_memerror();
650	}
651    }
652
653    if (UNLIKELY(!has_free_object)) {
654	if (!gc_prepare_free_objects(objspace)) {
655	    during_gc = 0;
656	    rb_memerror();
657	}
658    }
659
660    obj = (VALUE)objspace->heap.free_slots->freelist;
661    objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
662    if (objspace->heap.free_slots->freelist == NULL) {
663        unlink_free_heap_slot(objspace, objspace->heap.free_slots);
664    }
665
666    MEMZERO((void*)obj, RVALUE, 1);
667#ifdef GC_DEBUG
668    RANY(obj)->file = rb_sourcefile();
669    RANY(obj)->line = rb_sourceline();
670#endif
671    objspace->total_allocated_object_num++;
672
673    return obj;
674}
675
676VALUE
677rb_newobj(void)
678{
679    return newobj(0, T_NONE);
680}
681
682VALUE
683rb_newobj_of(VALUE klass, VALUE flags)
684{
685    VALUE obj;
686
687    obj = newobj(klass, flags);
688    OBJSETUP(obj, klass, flags);
689
690    return obj;
691}
692
693NODE*
694rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
695{
696    NODE *n = (NODE*)rb_newobj();
697
698    n->flags |= T_NODE;
699    nd_set_type(n, type);
700
701    n->u1.value = a0;
702    n->u2.value = a1;
703    n->u3.value = a2;
704
705    return n;
706}
707
708VALUE
709rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
710{
711    NEWOBJ(data, struct RData);
712    if (klass) Check_Type(klass, T_CLASS);
713    OBJSETUP(data, klass, T_DATA);
714    data->data = datap;
715    data->dfree = dfree;
716    data->dmark = dmark;
717
718    return (VALUE)data;
719}
720
721VALUE
722rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
723{
724    NEWOBJ(data, struct RTypedData);
725
726    if (klass) Check_Type(klass, T_CLASS);
727
728    OBJSETUP(data, klass, T_DATA);
729
730    data->data = datap;
731    data->typed_flag = 1;
732    data->type = type;
733
734    return (VALUE)data;
735}
736
737size_t
738rb_objspace_data_type_memsize(VALUE obj)
739{
740    if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
741	return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
742    }
743    else {
744	return 0;
745    }
746}
747
748const char *
749rb_objspace_data_type_name(VALUE obj)
750{
751    if (RTYPEDDATA_P(obj)) {
752	return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
753    }
754    else {
755	return 0;
756    }
757}
758
759static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
760static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
761
762static inline int
763is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
764{
765    register RVALUE *p = RANY(ptr);
766    register struct heaps_header *heap;
767    register size_t hi, lo, mid;
768
769    if (p < lomem || p > himem) return FALSE;
770    if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
771
772    /* check if p looks like a pointer using bsearch*/
773    lo = 0;
774    hi = heaps_used;
775    while (lo < hi) {
776	mid = (lo + hi) / 2;
777	heap = objspace->heap.sorted[mid];
778	if (heap->start <= p) {
779	    if (p < heap->end)
780		return TRUE;
781	    lo = mid + 1;
782	}
783	else {
784	    hi = mid;
785	}
786    }
787    return FALSE;
788}
789
790static int
791free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
792{
793    if (!me->mark) {
794	rb_free_method_entry(me);
795    }
796    return ST_CONTINUE;
797}
798
799void
800rb_free_m_table(st_table *tbl)
801{
802    st_foreach(tbl, free_method_entry_i, 0);
803    st_free_table(tbl);
804}
805
806static int
807free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
808{
809    xfree(ce);
810    return ST_CONTINUE;
811}
812
813void
814rb_free_const_table(st_table *tbl)
815{
816    st_foreach(tbl, free_const_entry_i, 0);
817    st_free_table(tbl);
818}
819
820static int obj_free(rb_objspace_t *, VALUE);
821
822static inline struct heaps_slot *
823add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
824{
825    struct heaps_slot *slot;
826
827    (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
828    p->as.free.flags = 0;
829    slot = GET_HEAP_SLOT(p);
830    p->as.free.next = slot->freelist;
831    slot->freelist = p;
832
833    return slot;
834}
835
836static void
837unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
838{
839    if (slot->prev)
840        slot->prev->next = slot->next;
841    if (slot->next)
842        slot->next->prev = slot->prev;
843    if (heaps == slot)
844        heaps = slot->next;
845    if (objspace->heap.sweep_slots == slot)
846        objspace->heap.sweep_slots = slot->next;
847    slot->prev = NULL;
848    slot->next = NULL;
849}
850
851static void
852free_unused_heaps(rb_objspace_t *objspace)
853{
854    size_t i, j;
855    struct heaps_header *last = 0;
856
857    for (i = j = 1; j < heaps_used; i++) {
858	if (objspace->heap.sorted[i]->limit == 0) {
859            struct heaps_header* h = objspace->heap.sorted[i];
860            ((struct heaps_free_bitmap *)(h->bits))->next =
861                objspace->heap.free_bitmap;
862            objspace->heap.free_bitmap = (struct heaps_free_bitmap *)h->bits;
863	    if (!last) {
864                last = objspace->heap.sorted[i];
865	    }
866	    else {
867		aligned_free(objspace->heap.sorted[i]);
868	    }
869	    heaps_used--;
870	}
871	else {
872	    if (i != j) {
873		objspace->heap.sorted[j] = objspace->heap.sorted[i];
874	    }
875	    j++;
876	}
877    }
878    if (last) {
879	if (last < heaps_freed) {
880	    aligned_free(heaps_freed);
881	    heaps_freed = last;
882	}
883	else {
884	    aligned_free(last);
885	}
886    }
887}
888static inline void
889make_deferred(RVALUE *p)
890{
891    p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
892}
893
894static inline void
895make_io_deferred(RVALUE *p)
896{
897    rb_io_t *fptr = p->as.file.fptr;
898    make_deferred(p);
899    p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
900    p->as.data.data = fptr;
901}
902
903static int
904obj_free(rb_objspace_t *objspace, VALUE obj)
905{
906    switch (BUILTIN_TYPE(obj)) {
907      case T_NIL:
908      case T_FIXNUM:
909      case T_TRUE:
910      case T_FALSE:
911	rb_bug("obj_free() called for broken object");
912	break;
913    }
914
915    if (FL_TEST(obj, FL_EXIVAR)) {
916	rb_free_generic_ivar((VALUE)obj);
917	FL_UNSET(obj, FL_EXIVAR);
918    }
919
920    switch (BUILTIN_TYPE(obj)) {
921      case T_OBJECT:
922	if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
923            RANY(obj)->as.object.as.heap.ivptr) {
924	    xfree(RANY(obj)->as.object.as.heap.ivptr);
925	}
926	break;
927      case T_MODULE:
928      case T_CLASS:
929	rb_clear_cache_by_class((VALUE)obj);
930        if (RCLASS_M_TBL(obj)) {
931            rb_free_m_table(RCLASS_M_TBL(obj));
932        }
933	if (RCLASS_IV_TBL(obj)) {
934	    st_free_table(RCLASS_IV_TBL(obj));
935	}
936	if (RCLASS_CONST_TBL(obj)) {
937	    rb_free_const_table(RCLASS_CONST_TBL(obj));
938	}
939	if (RCLASS_IV_INDEX_TBL(obj)) {
940	    st_free_table(RCLASS_IV_INDEX_TBL(obj));
941	}
942        xfree(RANY(obj)->as.klass.ptr);
943	break;
944      case T_STRING:
945	rb_str_free(obj);
946	break;
947      case T_ARRAY:
948	rb_ary_free(obj);
949	break;
950      case T_HASH:
951	if (RANY(obj)->as.hash.ntbl) {
952	    st_free_table(RANY(obj)->as.hash.ntbl);
953	}
954	break;
955      case T_REGEXP:
956	if (RANY(obj)->as.regexp.ptr) {
957	    onig_free(RANY(obj)->as.regexp.ptr);
958	}
959	break;
960      case T_DATA:
961	if (DATA_PTR(obj)) {
962	    if (RTYPEDDATA_P(obj)) {
963		RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
964	    }
965	    if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
966		xfree(DATA_PTR(obj));
967	    }
968	    else if (RANY(obj)->as.data.dfree) {
969		make_deferred(RANY(obj));
970		return 1;
971	    }
972	}
973	break;
974      case T_MATCH:
975	if (RANY(obj)->as.match.rmatch) {
976            struct rmatch *rm = RANY(obj)->as.match.rmatch;
977	    onig_region_free(&rm->regs, 0);
978            if (rm->char_offset)
979		xfree(rm->char_offset);
980	    xfree(rm);
981	}
982	break;
983      case T_FILE:
984	if (RANY(obj)->as.file.fptr) {
985	    make_io_deferred(RANY(obj));
986	    return 1;
987	}
988	break;
989      case T_RATIONAL:
990      case T_COMPLEX:
991	break;
992      case T_ICLASS:
993	/* iClass shares table with the module */
994	xfree(RANY(obj)->as.klass.ptr);
995	break;
996
997      case T_FLOAT:
998	break;
999
1000      case T_BIGNUM:
1001	if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1002	    xfree(RBIGNUM_DIGITS(obj));
1003	}
1004	break;
1005      case T_NODE:
1006	switch (nd_type(obj)) {
1007	  case NODE_SCOPE:
1008	    if (RANY(obj)->as.node.u1.tbl) {
1009		xfree(RANY(obj)->as.node.u1.tbl);
1010	    }
1011	    break;
1012	  case NODE_ARGS:
1013	    if (RANY(obj)->as.node.u3.args) {
1014		xfree(RANY(obj)->as.node.u3.args);
1015	    }
1016	    break;
1017	  case NODE_ALLOCA:
1018	    xfree(RANY(obj)->as.node.u1.node);
1019	    break;
1020	}
1021	break;			/* no need to free iv_tbl */
1022
1023      case T_STRUCT:
1024	if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1025	    RANY(obj)->as.rstruct.as.heap.ptr) {
1026	    xfree(RANY(obj)->as.rstruct.as.heap.ptr);
1027	}
1028	break;
1029
1030      default:
1031	rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1032	       BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1033    }
1034
1035    return 0;
1036}
1037
1038void
1039Init_heap(void)
1040{
1041    init_heap(&rb_objspace);
1042}
1043
1044typedef int each_obj_callback(void *, void *, size_t, void *);
1045
1046struct each_obj_args {
1047    each_obj_callback *callback;
1048    void *data;
1049};
1050
1051static VALUE
1052objspace_each_objects(VALUE arg)
1053{
1054    size_t i;
1055    RVALUE *membase = 0;
1056    RVALUE *pstart, *pend;
1057    rb_objspace_t *objspace = &rb_objspace;
1058    struct each_obj_args *args = (struct each_obj_args *)arg;
1059    volatile VALUE v;
1060
1061    i = 0;
1062    while (i < heaps_used) {
1063	while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1])
1064	    i--;
1065	while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i] <= (uintptr_t)membase)
1066	    i++;
1067	if (heaps_used <= i)
1068	  break;
1069	membase = (RVALUE *)objspace->heap.sorted[i];
1070
1071	pstart = objspace->heap.sorted[i]->start;
1072	pend = pstart + objspace->heap.sorted[i]->limit;
1073
1074	for (; pstart != pend; pstart++) {
1075	    if (pstart->as.basic.flags) {
1076		v = (VALUE)pstart; /* acquire to save this object */
1077		break;
1078	    }
1079	}
1080	if (pstart != pend) {
1081	    if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
1082		break;
1083	    }
1084	}
1085    }
1086    RB_GC_GUARD(v);
1087
1088    return Qnil;
1089}
1090
1091/*
1092 * rb_objspace_each_objects() is special C API to walk through
1093 * Ruby object space.  This C API is too difficult to use it.
1094 * To be frank, you should not use it. Or you need to read the
1095 * source code of this function and understand what this function does.
1096 *
1097 * 'callback' will be called several times (the number of heap slot,
1098 * at current implementation) with:
1099 *   vstart: a pointer to the first living object of the heap_slot.
1100 *   vend: a pointer to next to the valid heap_slot area.
1101 *   stride: a distance to next VALUE.
1102 *
1103 * If callback() returns non-zero, the iteration will be stopped.
1104 *
1105 * This is a sample callback code to iterate liveness objects:
1106 *
1107 *   int
1108 *   sample_callback(void *vstart, void *vend, int stride, void *data) {
1109 *     VALUE v = (VALUE)vstart;
1110 *     for (; v != (VALUE)vend; v += stride) {
1111 *       if (RBASIC(v)->flags) { // liveness check
1112 *       // do something with live object 'v'
1113 *     }
1114 *     return 0; // continue to iteration
1115 *   }
1116 *
1117 * Note: 'vstart' is not a top of heap_slot.  This point the first
1118 *       living object to grasp at least one object to avoid GC issue.
1119 *       This means that you can not walk through all Ruby object slot
1120 *       including freed object slot.
1121 *
1122 * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
1123 *       However, there are possibilities to pass variable values with
1124 *       'stride' with some reasons.  You must use stride instead of
1125 *       use some constant value in the iteration.
1126 */
1127void
1128rb_objspace_each_objects(each_obj_callback *callback, void *data)
1129{
1130    struct each_obj_args args;
1131    rb_objspace_t *objspace = &rb_objspace;
1132
1133    rest_sweep(objspace);
1134    objspace->flags.dont_lazy_sweep = TRUE;
1135
1136    args.callback = callback;
1137    args.data = data;
1138    rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
1139}
1140
1141struct os_each_struct {
1142    size_t num;
1143    VALUE of;
1144};
1145
1146static int
1147internal_object_p(VALUE obj)
1148{
1149    RVALUE *p = (RVALUE *)obj;
1150
1151    if (p->as.basic.flags) {
1152	switch (BUILTIN_TYPE(p)) {
1153	  case T_NONE:
1154	  case T_ICLASS:
1155	  case T_NODE:
1156	  case T_ZOMBIE:
1157	    break;
1158	  case T_CLASS:
1159	    if (FL_TEST(p, FL_SINGLETON))
1160	      break;
1161	  default:
1162	    if (!p->as.basic.klass) break;
1163	    return 0;
1164	}
1165    }
1166    return 1;
1167}
1168
1169int
1170rb_objspace_internal_object_p(VALUE obj)
1171{
1172    return internal_object_p(obj);
1173}
1174
1175static int
1176os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1177{
1178    struct os_each_struct *oes = (struct os_each_struct *)data;
1179    RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
1180
1181    for (; p != pend; p++) {
1182	volatile VALUE v = (VALUE)p;
1183	if (!internal_object_p(v)) {
1184	    if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1185		rb_yield(v);
1186		oes->num++;
1187	    }
1188	}
1189    }
1190
1191    return 0;
1192}
1193
1194static VALUE
1195os_obj_of(VALUE of)
1196{
1197    struct os_each_struct oes;
1198
1199    oes.num = 0;
1200    oes.of = of;
1201    rb_objspace_each_objects(os_obj_of_i, &oes);
1202    return SIZET2NUM(oes.num);
1203}
1204
1205/*
1206 *  call-seq:
1207 *     ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
1208 *     ObjectSpace.each_object([module])              -> an_enumerator
1209 *
1210 *  Calls the block once for each living, nonimmediate object in this
1211 *  Ruby process. If <i>module</i> is specified, calls the block
1212 *  for only those classes or modules that match (or are a subclass of)
1213 *  <i>module</i>. Returns the number of objects found. Immediate
1214 *  objects (<code>Fixnum</code>s, <code>Symbol</code>s
1215 *  <code>true</code>, <code>false</code>, and <code>nil</code>) are
1216 *  never returned. In the example below, <code>each_object</code>
1217 *  returns both the numbers we defined and several constants defined in
1218 *  the <code>Math</code> module.
1219 *
1220 *  If no block is given, an enumerator is returned instead.
1221 *
1222 *     a = 102.7
1223 *     b = 95       # Won't be returned
1224 *     c = 12345678987654321
1225 *     count = ObjectSpace.each_object(Numeric) {|x| p x }
1226 *     puts "Total count: #{count}"
1227 *
1228 *  <em>produces:</em>
1229 *
1230 *     12345678987654321
1231 *     102.7
1232 *     2.71828182845905
1233 *     3.14159265358979
1234 *     2.22044604925031e-16
1235 *     1.7976931348623157e+308
1236 *     2.2250738585072e-308
1237 *     Total count: 7
1238 *
1239 */
1240
1241static VALUE
1242os_each_obj(int argc, VALUE *argv, VALUE os)
1243{
1244    VALUE of;
1245
1246    rb_secure(4);
1247    if (argc == 0) {
1248	of = 0;
1249    }
1250    else {
1251	rb_scan_args(argc, argv, "01", &of);
1252    }
1253    RETURN_ENUMERATOR(os, 1, &of);
1254    return os_obj_of(of);
1255}
1256
1257/*
1258 *  call-seq:
1259 *     ObjectSpace.undefine_finalizer(obj)
1260 *
1261 *  Removes all finalizers for <i>obj</i>.
1262 *
1263 */
1264
1265static VALUE
1266undefine_final(VALUE os, VALUE obj)
1267{
1268    return rb_undefine_final(obj);
1269}
1270
1271VALUE
1272rb_undefine_final(VALUE obj)
1273{
1274    rb_objspace_t *objspace = &rb_objspace;
1275    st_data_t data = obj;
1276    rb_check_frozen(obj);
1277    st_delete(finalizer_table, &data, 0);
1278    FL_UNSET(obj, FL_FINALIZE);
1279    return obj;
1280}
1281
1282/*
1283 *  call-seq:
1284 *     ObjectSpace.define_finalizer(obj, aProc=proc())
1285 *
1286 *  Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1287 *  was destroyed.
1288 *
1289 */
1290
1291static VALUE
1292define_final(int argc, VALUE *argv, VALUE os)
1293{
1294    VALUE obj, block;
1295
1296    rb_scan_args(argc, argv, "11", &obj, &block);
1297    rb_check_frozen(obj);
1298    if (argc == 1) {
1299	block = rb_block_proc();
1300    }
1301    else if (!rb_respond_to(block, rb_intern("call"))) {
1302	rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1303		 rb_obj_classname(block));
1304    }
1305
1306    return define_final0(obj, block);
1307}
1308
1309static VALUE
1310define_final0(VALUE obj, VALUE block)
1311{
1312    rb_objspace_t *objspace = &rb_objspace;
1313    VALUE table;
1314    st_data_t data;
1315
1316    if (!FL_ABLE(obj)) {
1317	rb_raise(rb_eArgError, "cannot define finalizer for %s",
1318		 rb_obj_classname(obj));
1319    }
1320    RBASIC(obj)->flags |= FL_FINALIZE;
1321
1322    block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
1323    OBJ_FREEZE(block);
1324
1325    if (st_lookup(finalizer_table, obj, &data)) {
1326	table = (VALUE)data;
1327	rb_ary_push(table, block);
1328    }
1329    else {
1330	table = rb_ary_new3(1, block);
1331	RBASIC(table)->klass = 0;
1332	st_add_direct(finalizer_table, obj, table);
1333    }
1334    return block;
1335}
1336
1337VALUE
1338rb_define_final(VALUE obj, VALUE block)
1339{
1340    rb_check_frozen(obj);
1341    if (!rb_respond_to(block, rb_intern("call"))) {
1342	rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1343		 rb_obj_classname(block));
1344    }
1345    return define_final0(obj, block);
1346}
1347
1348void
1349rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1350{
1351    rb_objspace_t *objspace = &rb_objspace;
1352    VALUE table;
1353    st_data_t data;
1354
1355    if (!FL_TEST(obj, FL_FINALIZE)) return;
1356    if (st_lookup(finalizer_table, obj, &data)) {
1357	table = (VALUE)data;
1358	st_insert(finalizer_table, dest, table);
1359    }
1360    FL_SET(dest, FL_FINALIZE);
1361}
1362
1363static VALUE
1364run_single_final(VALUE arg)
1365{
1366    VALUE *args = (VALUE *)arg;
1367    rb_eval_cmd(args[0], args[1], (int)args[2]);
1368    return Qnil;
1369}
1370
1371static void
1372run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
1373{
1374    long i;
1375    int status;
1376    VALUE args[3];
1377    VALUE objid = nonspecial_obj_id(obj);
1378    VALUE saved_errinfo = rb_errinfo();
1379
1380    if (RARRAY_LEN(table) > 0) {
1381	args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
1382    }
1383    else {
1384	args[1] = 0;
1385    }
1386
1387    args[2] = (VALUE)rb_safe_level();
1388    rb_set_errinfo(Qnil);
1389    for (i=0; i<RARRAY_LEN(table); i++) {
1390	VALUE final = RARRAY_PTR(table)[i];
1391	args[0] = RARRAY_PTR(final)[1];
1392	args[2] = FIX2INT(RARRAY_PTR(final)[0]);
1393	status = 0;
1394	rb_protect(run_single_final, (VALUE)args, &status);
1395	if (status)
1396	    rb_set_errinfo(Qnil);
1397    }
1398    GET_THREAD()->errinfo = saved_errinfo;
1399}
1400
1401static void
1402run_final(rb_objspace_t *objspace, VALUE obj)
1403{
1404    RUBY_DATA_FUNC free_func = 0;
1405    st_data_t key, table;
1406
1407    objspace->heap.final_num--;
1408
1409    RBASIC(obj)->klass = 0;
1410
1411    if (RTYPEDDATA_P(obj)) {
1412	free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
1413    }
1414    else {
1415	free_func = RDATA(obj)->dfree;
1416    }
1417    if (free_func) {
1418	(*free_func)(DATA_PTR(obj));
1419    }
1420
1421    key = (st_data_t)obj;
1422    if (st_delete(finalizer_table, &key, &table)) {
1423	run_finalizer(objspace, obj, (VALUE)table);
1424    }
1425}
1426
1427static void
1428finalize_list(rb_objspace_t *objspace, RVALUE *p)
1429{
1430    while (p) {
1431	RVALUE *tmp = p->as.free.next;
1432	run_final(objspace, (VALUE)p);
1433	objspace->total_freed_object_num++;
1434	if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
1435            add_slot_local_freelist(objspace, p);
1436	    objspace->heap.free_num++;
1437	}
1438	else {
1439	    struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
1440	    slot->header->limit--;
1441	}
1442	p = tmp;
1443    }
1444}
1445
1446static void
1447finalize_deferred(rb_objspace_t *objspace)
1448{
1449    RVALUE *p;
1450
1451    while ((p = ATOMIC_PTR_EXCHANGE(deferred_final_list, 0)) != 0) {
1452	finalize_list(objspace, p);
1453    }
1454}
1455
1456void
1457rb_gc_finalize_deferred(void)
1458{
1459    rb_objspace_t *objspace = &rb_objspace;
1460    if (ATOMIC_EXCHANGE(finalizing, 1)) return;
1461    finalize_deferred(objspace);
1462    ATOMIC_SET(finalizing, 0);
1463}
1464
1465struct force_finalize_list {
1466    VALUE obj;
1467    VALUE table;
1468    struct force_finalize_list *next;
1469};
1470
1471static int
1472force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
1473{
1474    struct force_finalize_list **prev = (struct force_finalize_list **)arg;
1475    struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
1476    curr->obj = key;
1477    curr->table = val;
1478    curr->next = *prev;
1479    *prev = curr;
1480    return ST_CONTINUE;
1481}
1482
1483void
1484rb_gc_call_finalizer_at_exit(void)
1485{
1486    rb_objspace_call_finalizer(&rb_objspace);
1487}
1488
1489static void
1490rb_objspace_call_finalizer(rb_objspace_t *objspace)
1491{
1492    RVALUE *p, *pend;
1493    RVALUE *final_list = 0;
1494    size_t i;
1495
1496    rest_sweep(objspace);
1497
1498    if (ATOMIC_EXCHANGE(finalizing, 1)) return;
1499
1500    /* run finalizers */
1501    finalize_deferred(objspace);
1502    assert(deferred_final_list == 0);
1503
1504    /* force to run finalizer */
1505    while (finalizer_table->num_entries) {
1506	struct force_finalize_list *list = 0;
1507	st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
1508	while (list) {
1509	    struct force_finalize_list *curr = list;
1510	    st_data_t obj = (st_data_t)curr->obj;
1511	    run_finalizer(objspace, curr->obj, curr->table);
1512	    st_delete(finalizer_table, &obj, 0);
1513	    list = curr->next;
1514	    xfree(curr);
1515	}
1516    }
1517
1518    /* finalizers are part of garbage collection */
1519    during_gc++;
1520
1521    /* run data object's finalizers */
1522    for (i = 0; i < heaps_used; i++) {
1523	p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
1524	while (p < pend) {
1525	    if (BUILTIN_TYPE(p) == T_DATA &&
1526		DATA_PTR(p) && RANY(p)->as.data.dfree &&
1527		!rb_obj_is_thread((VALUE)p) && !rb_obj_is_mutex((VALUE)p) &&
1528		!rb_obj_is_fiber((VALUE)p)) {
1529		p->as.free.flags = 0;
1530		if (RTYPEDDATA_P(p)) {
1531		    RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
1532		}
1533		if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
1534		    xfree(DATA_PTR(p));
1535		}
1536		else if (RANY(p)->as.data.dfree) {
1537		    make_deferred(RANY(p));
1538		    RANY(p)->as.free.next = final_list;
1539		    final_list = p;
1540		}
1541	    }
1542	    else if (BUILTIN_TYPE(p) == T_FILE) {
1543		if (RANY(p)->as.file.fptr) {
1544		    make_io_deferred(RANY(p));
1545		    RANY(p)->as.free.next = final_list;
1546		    final_list = p;
1547		}
1548	    }
1549	    p++;
1550	}
1551    }
1552    during_gc = 0;
1553    if (final_list) {
1554	finalize_list(objspace, final_list);
1555    }
1556
1557    st_free_table(finalizer_table);
1558    finalizer_table = 0;
1559    ATOMIC_SET(finalizing, 0);
1560}
1561
1562static inline int
1563is_id_value(rb_objspace_t *objspace, VALUE ptr)
1564{
1565    if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
1566    if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
1567    if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
1568    return TRUE;
1569}
1570
1571static inline int
1572is_swept_object(rb_objspace_t *objspace, VALUE ptr)
1573{
1574    struct heaps_slot *slot = objspace->heap.sweep_slots;
1575
1576    while (slot) {
1577	if ((VALUE)slot->header->start <= ptr && ptr < (VALUE)(slot->header->end))
1578	    return FALSE;
1579	slot = slot->next;
1580    }
1581    return TRUE;
1582}
1583
1584static inline int
1585is_dead_object(rb_objspace_t *objspace, VALUE ptr)
1586{
1587    if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_BITMAP(ptr), ptr))
1588	return FALSE;
1589    if (!is_swept_object(objspace, ptr))
1590	return TRUE;
1591    return FALSE;
1592}
1593
1594static inline int
1595is_live_object(rb_objspace_t *objspace, VALUE ptr)
1596{
1597    if (BUILTIN_TYPE(ptr) == 0) return FALSE;
1598    if (RBASIC(ptr)->klass == 0) return FALSE;
1599    if (is_dead_object(objspace, ptr)) return FALSE;
1600    return TRUE;
1601}
1602
1603/*
1604 *  call-seq:
1605 *     ObjectSpace._id2ref(object_id) -> an_object
1606 *
1607 *  Converts an object id to a reference to the object. May not be
1608 *  called on an object id passed as a parameter to a finalizer.
1609 *
1610 *     s = "I am a string"                    #=> "I am a string"
1611 *     r = ObjectSpace._id2ref(s.object_id)   #=> "I am a string"
1612 *     r == s                                 #=> true
1613 *
1614 */
1615
1616static VALUE
1617id2ref(VALUE obj, VALUE objid)
1618{
1619#if SIZEOF_LONG == SIZEOF_VOIDP
1620#define NUM2PTR(x) NUM2ULONG(x)
1621#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1622#define NUM2PTR(x) NUM2ULL(x)
1623#endif
1624    rb_objspace_t *objspace = &rb_objspace;
1625    VALUE ptr;
1626    void *p0;
1627
1628    rb_secure(4);
1629    ptr = NUM2PTR(objid);
1630    p0 = (void *)ptr;
1631
1632    if (ptr == Qtrue) return Qtrue;
1633    if (ptr == Qfalse) return Qfalse;
1634    if (ptr == Qnil) return Qnil;
1635    if (FIXNUM_P(ptr)) return (VALUE)ptr;
1636    if (FLONUM_P(ptr)) return (VALUE)ptr;
1637    ptr = obj_id_to_ref(objid);
1638
1639    if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
1640        ID symid = ptr / sizeof(RVALUE);
1641        if (rb_id2name(symid) == 0)
1642	    rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
1643	return ID2SYM(symid);
1644    }
1645
1646    if (!is_id_value(objspace, ptr)) {
1647	rb_raise(rb_eRangeError, "%p is not id value", p0);
1648    }
1649    if (!is_live_object(objspace, ptr)) {
1650	rb_raise(rb_eRangeError, "%p is recycled object", p0);
1651    }
1652    return (VALUE)ptr;
1653}
1654
1655/*
1656 *  Document-method: __id__
1657 *  Document-method: object_id
1658 *
1659 *  call-seq:
1660 *     obj.__id__       -> integer
1661 *     obj.object_id    -> integer
1662 *
1663 *  Returns an integer identifier for +obj+.
1664 *
1665 *  The same number will be returned on all calls to +id+ for a given object,
1666 *  and no two active objects will share an id.
1667 *
1668 *  Object#object_id is a different concept from the +:name+ notation, which
1669 *  returns the symbol id of +name+.
1670 *
1671 *  Replaces the deprecated Object#id.
1672 */
1673
1674/*
1675 *  call-seq:
1676 *     obj.hash    -> fixnum
1677 *
1678 *  Generates a Fixnum hash value for this object.
1679 *
1680 *  This function must have the property that <code>a.eql?(b)</code> implies
1681 *  <code>a.hash == b.hash</code>.
1682 *
1683 *  The hash value is used by Hash class.
1684 *
1685 *  Any hash value that exceeds the capacity of a Fixnum will be truncated
1686 *  before being used.
1687 */
1688
1689VALUE
1690rb_obj_id(VALUE obj)
1691{
1692    /*
1693     *                32-bit VALUE space
1694     *          MSB ------------------------ LSB
1695     *  false   00000000000000000000000000000000
1696     *  true    00000000000000000000000000000010
1697     *  nil     00000000000000000000000000000100
1698     *  undef   00000000000000000000000000000110
1699     *  symbol  ssssssssssssssssssssssss00001110
1700     *  object  oooooooooooooooooooooooooooooo00        = 0 (mod sizeof(RVALUE))
1701     *  fixnum  fffffffffffffffffffffffffffffff1
1702     *
1703     *                    object_id space
1704     *                                       LSB
1705     *  false   00000000000000000000000000000000
1706     *  true    00000000000000000000000000000010
1707     *  nil     00000000000000000000000000000100
1708     *  undef   00000000000000000000000000000110
1709     *  symbol   000SSSSSSSSSSSSSSSSSSSSSSSSSSS0        S...S % A = 4 (S...S = s...s * A + 4)
1710     *  object   oooooooooooooooooooooooooooooo0        o...o % A = 0
1711     *  fixnum  fffffffffffffffffffffffffffffff1        bignum if required
1712     *
1713     *  where A = sizeof(RVALUE)/4
1714     *
1715     *  sizeof(RVALUE) is
1716     *  20 if 32-bit, double is 4-byte aligned
1717     *  24 if 32-bit, double is 8-byte aligned
1718     *  40 if 64-bit
1719     */
1720    if (SYMBOL_P(obj)) {
1721        return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
1722    }
1723    else if (FLONUM_P(obj)) {
1724#if SIZEOF_LONG == SIZEOF_VOIDP
1725	return LONG2NUM((SIGNED_VALUE)obj);
1726#else
1727	return LL2NUM((SIGNED_VALUE)obj);
1728#endif
1729    }
1730    else if (SPECIAL_CONST_P(obj)) {
1731	return LONG2NUM((SIGNED_VALUE)obj);
1732    }
1733    return nonspecial_obj_id(obj);
1734}
1735
1736static int
1737set_zero(st_data_t key, st_data_t val, st_data_t arg)
1738{
1739    VALUE k = (VALUE)key;
1740    VALUE hash = (VALUE)arg;
1741    rb_hash_aset(hash, k, INT2FIX(0));
1742    return ST_CONTINUE;
1743}
1744
1745/*
1746 *  call-seq:
1747 *     ObjectSpace.count_objects([result_hash]) -> hash
1748 *
1749 *  Counts objects for each type.
1750 *
1751 *  It returns a hash, such as:
1752 *	{
1753 *	  :TOTAL=>10000,
1754 *	  :FREE=>3011,
1755 *	  :T_OBJECT=>6,
1756 *	  :T_CLASS=>404,
1757 *	  # ...
1758 *	}
1759 *
1760 *  The contents of the returned hash are implementation specific.
1761 *  It may be changed in future.
1762 *
1763 *  If the optional argument +result_hash+ is given,
1764 *  it is overwritten and returned. This is intended to avoid probe effect.
1765 *
1766 *  This method is only expected to work on C Ruby.
1767 *
1768 */
1769
1770static VALUE
1771count_objects(int argc, VALUE *argv, VALUE os)
1772{
1773    rb_objspace_t *objspace = &rb_objspace;
1774    size_t counts[T_MASK+1];
1775    size_t freed = 0;
1776    size_t total = 0;
1777    size_t i;
1778    VALUE hash;
1779
1780    if (rb_scan_args(argc, argv, "01", &hash) == 1) {
1781        if (!RB_TYPE_P(hash, T_HASH))
1782            rb_raise(rb_eTypeError, "non-hash given");
1783    }
1784
1785    for (i = 0; i <= T_MASK; i++) {
1786        counts[i] = 0;
1787    }
1788
1789    for (i = 0; i < heaps_used; i++) {
1790        RVALUE *p, *pend;
1791
1792        p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
1793        for (;p < pend; p++) {
1794            if (p->as.basic.flags) {
1795                counts[BUILTIN_TYPE(p)]++;
1796            }
1797            else {
1798                freed++;
1799            }
1800        }
1801        total += objspace->heap.sorted[i]->limit;
1802    }
1803
1804    if (hash == Qnil) {
1805        hash = rb_hash_new();
1806    }
1807    else if (!RHASH_EMPTY_P(hash)) {
1808        st_foreach(RHASH_TBL(hash), set_zero, hash);
1809    }
1810    rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
1811    rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
1812
1813    for (i = 0; i <= T_MASK; i++) {
1814        VALUE type;
1815        switch (i) {
1816#define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
1817	    COUNT_TYPE(T_NONE);
1818	    COUNT_TYPE(T_OBJECT);
1819	    COUNT_TYPE(T_CLASS);
1820	    COUNT_TYPE(T_MODULE);
1821	    COUNT_TYPE(T_FLOAT);
1822	    COUNT_TYPE(T_STRING);
1823	    COUNT_TYPE(T_REGEXP);
1824	    COUNT_TYPE(T_ARRAY);
1825	    COUNT_TYPE(T_HASH);
1826	    COUNT_TYPE(T_STRUCT);
1827	    COUNT_TYPE(T_BIGNUM);
1828	    COUNT_TYPE(T_FILE);
1829	    COUNT_TYPE(T_DATA);
1830	    COUNT_TYPE(T_MATCH);
1831	    COUNT_TYPE(T_COMPLEX);
1832	    COUNT_TYPE(T_RATIONAL);
1833	    COUNT_TYPE(T_NIL);
1834	    COUNT_TYPE(T_TRUE);
1835	    COUNT_TYPE(T_FALSE);
1836	    COUNT_TYPE(T_SYMBOL);
1837	    COUNT_TYPE(T_FIXNUM);
1838	    COUNT_TYPE(T_UNDEF);
1839	    COUNT_TYPE(T_NODE);
1840	    COUNT_TYPE(T_ICLASS);
1841	    COUNT_TYPE(T_ZOMBIE);
1842#undef COUNT_TYPE
1843          default:              type = INT2NUM(i); break;
1844        }
1845        if (counts[i])
1846            rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
1847    }
1848
1849    return hash;
1850}
1851
1852
1853
1854/*
1855  ------------------------ Garbage Collection ------------------------
1856*/
1857
1858/* Sweeping */
1859
1860static VALUE
1861lazy_sweep_enable(void)
1862{
1863    rb_objspace_t *objspace = &rb_objspace;
1864
1865    objspace->flags.dont_lazy_sweep = FALSE;
1866    return Qnil;
1867}
1868
1869static void
1870gc_clear_slot_bits(struct heaps_slot *slot)
1871{
1872    memset(slot->bits, 0, HEAP_BITMAP_LIMIT * sizeof(uintptr_t));
1873}
1874
1875static size_t
1876objspace_live_num(rb_objspace_t *objspace)
1877{
1878    return objspace->total_allocated_object_num - objspace->total_freed_object_num;
1879}
1880
1881static void
1882slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
1883{
1884    size_t empty_num = 0, freed_num = 0, final_num = 0;
1885    RVALUE *p, *pend;
1886    RVALUE *final = deferred_final_list;
1887    int deferred;
1888    uintptr_t *bits;
1889
1890    p = sweep_slot->header->start; pend = p + sweep_slot->header->limit;
1891    bits = GET_HEAP_BITMAP(p);
1892    while (p < pend) {
1893        if ((!(MARKED_IN_BITMAP(bits, p))) && BUILTIN_TYPE(p) != T_ZOMBIE) {
1894            if (p->as.basic.flags) {
1895                if ((deferred = obj_free(objspace, (VALUE)p)) ||
1896                    (FL_TEST(p, FL_FINALIZE))) {
1897                    if (!deferred) {
1898                        p->as.free.flags = T_ZOMBIE;
1899                        RDATA(p)->dfree = 0;
1900                    }
1901                    p->as.free.next = deferred_final_list;
1902                    deferred_final_list = p;
1903                    assert(BUILTIN_TYPE(p) == T_ZOMBIE);
1904                    final_num++;
1905                }
1906                else {
1907                    (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
1908                    p->as.free.flags = 0;
1909                    p->as.free.next = sweep_slot->freelist;
1910                    sweep_slot->freelist = p;
1911                    freed_num++;
1912                }
1913            }
1914            else {
1915                empty_num++;
1916            }
1917        }
1918        p++;
1919    }
1920    gc_clear_slot_bits(sweep_slot);
1921    if (final_num + freed_num + empty_num == sweep_slot->header->limit &&
1922        objspace->heap.free_num > objspace->heap.do_heap_free) {
1923        RVALUE *pp;
1924
1925        for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
1926	    RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
1927            pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
1928        }
1929        sweep_slot->header->limit = final_num;
1930        unlink_heap_slot(objspace, sweep_slot);
1931    }
1932    else {
1933        if (freed_num + empty_num > 0) {
1934            link_free_heap_slot(objspace, sweep_slot);
1935        }
1936        else {
1937            sweep_slot->free_next = NULL;
1938        }
1939	objspace->heap.free_num += freed_num + empty_num;
1940    }
1941    objspace->total_freed_object_num += freed_num;
1942    objspace->heap.final_num += final_num;
1943
1944    if (deferred_final_list && !finalizing) {
1945        rb_thread_t *th = GET_THREAD();
1946        if (th) {
1947            RUBY_VM_SET_FINALIZER_INTERRUPT(th);
1948        }
1949    }
1950}
1951
1952static int
1953ready_to_gc(rb_objspace_t *objspace)
1954{
1955    if (dont_gc || during_gc) {
1956	if (!has_free_object) {
1957            if (!heaps_increment(objspace)) {
1958                set_heaps_increment(objspace);
1959                heaps_increment(objspace);
1960            }
1961	}
1962	return FALSE;
1963    }
1964    return TRUE;
1965}
1966
1967static void
1968before_gc_sweep(rb_objspace_t *objspace)
1969{
1970    objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
1971    objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
1972    if (objspace->heap.free_min < initial_free_min) {
1973        objspace->heap.free_min = initial_free_min;
1974	if (objspace->heap.do_heap_free < initial_free_min)
1975	    objspace->heap.do_heap_free = initial_free_min;
1976    }
1977    objspace->heap.sweep_slots = heaps;
1978    objspace->heap.free_num = 0;
1979    objspace->heap.free_slots = NULL;
1980
1981    /* sweep unlinked method entries */
1982    if (GET_VM()->unlinked_method_entry_list) {
1983	rb_sweep_method_entry(GET_VM());
1984    }
1985}
1986
1987static void
1988after_gc_sweep(rb_objspace_t *objspace)
1989{
1990    size_t inc;
1991
1992    gc_prof_set_malloc_info(objspace);
1993    if (objspace->heap.free_num < objspace->heap.free_min) {
1994        set_heaps_increment(objspace);
1995        heaps_increment(objspace);
1996    }
1997
1998    inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
1999    if (inc > malloc_limit) {
2000	malloc_limit +=
2001	  (size_t)((inc - malloc_limit) * (double)objspace->heap.marked_num / (heaps_used * HEAP_OBJ_LIMIT));
2002	if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
2003    }
2004
2005    free_unused_heaps(objspace);
2006}
2007
2008static int
2009lazy_sweep(rb_objspace_t *objspace)
2010{
2011    struct heaps_slot *next;
2012
2013    heaps_increment(objspace);
2014    while (objspace->heap.sweep_slots) {
2015        next = objspace->heap.sweep_slots->next;
2016	slot_sweep(objspace, objspace->heap.sweep_slots);
2017        objspace->heap.sweep_slots = next;
2018        if (has_free_object) {
2019            during_gc = 0;
2020            return TRUE;
2021        }
2022    }
2023    return FALSE;
2024}
2025
2026static void
2027rest_sweep(rb_objspace_t *objspace)
2028{
2029    if (objspace->heap.sweep_slots) {
2030	while (objspace->heap.sweep_slots) {
2031	    lazy_sweep(objspace);
2032	}
2033	after_gc_sweep(objspace);
2034    }
2035}
2036
2037static void gc_marks(rb_objspace_t *objspace);
2038
2039static int
2040gc_prepare_free_objects(rb_objspace_t *objspace)
2041{
2042    int res;
2043
2044    if (objspace->flags.dont_lazy_sweep)
2045        return garbage_collect(objspace);
2046
2047
2048    if (!ready_to_gc(objspace)) return TRUE;
2049
2050    during_gc++;
2051    gc_prof_timer_start(objspace);
2052    gc_prof_sweep_timer_start(objspace);
2053
2054    if (objspace->heap.sweep_slots) {
2055        res = lazy_sweep(objspace);
2056        if (res) {
2057            gc_prof_sweep_timer_stop(objspace);
2058            gc_prof_set_malloc_info(objspace);
2059            gc_prof_timer_stop(objspace, Qfalse);
2060            return res;
2061        }
2062        after_gc_sweep(objspace);
2063    }
2064    else {
2065        if (heaps_increment(objspace)) {
2066            during_gc = 0;
2067            return TRUE;
2068        }
2069    }
2070
2071    gc_marks(objspace);
2072
2073    before_gc_sweep(objspace);
2074    if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.marked_num)) {
2075	set_heaps_increment(objspace);
2076    }
2077
2078    gc_prof_sweep_timer_start(objspace);
2079    if (!(res = lazy_sweep(objspace))) {
2080        after_gc_sweep(objspace);
2081        if (has_free_object) {
2082            res = TRUE;
2083            during_gc = 0;
2084        }
2085    }
2086    gc_prof_sweep_timer_stop(objspace);
2087
2088    gc_prof_timer_stop(objspace, Qtrue);
2089    return res;
2090}
2091
2092static void
2093gc_sweep(rb_objspace_t *objspace)
2094{
2095    struct heaps_slot *next;
2096
2097    before_gc_sweep(objspace);
2098
2099    while (objspace->heap.sweep_slots) {
2100        next = objspace->heap.sweep_slots->next;
2101	slot_sweep(objspace, objspace->heap.sweep_slots);
2102        objspace->heap.sweep_slots = next;
2103    }
2104
2105    after_gc_sweep(objspace);
2106
2107    during_gc = 0;
2108}
2109
2110/* Marking stack */
2111
2112static void push_mark_stack(mark_stack_t *, VALUE);
2113static int pop_mark_stack(mark_stack_t *, VALUE *);
2114static void shrink_stack_chunk_cache(mark_stack_t *stack);
2115
2116static stack_chunk_t *
2117stack_chunk_alloc(void)
2118{
2119    stack_chunk_t *res;
2120
2121    res = malloc(sizeof(stack_chunk_t));
2122    if (!res)
2123        rb_memerror();
2124
2125    return res;
2126}
2127
2128static inline int
2129is_mark_stask_empty(mark_stack_t *stack)
2130{
2131    return stack->chunk == NULL;
2132}
2133
2134static void
2135add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
2136{
2137    chunk->next = stack->cache;
2138    stack->cache = chunk;
2139    stack->cache_size++;
2140}
2141
2142static void
2143shrink_stack_chunk_cache(mark_stack_t *stack)
2144{
2145    stack_chunk_t *chunk;
2146
2147    if (stack->unused_cache_size > (stack->cache_size/2)) {
2148        chunk = stack->cache;
2149        stack->cache = stack->cache->next;
2150        stack->cache_size--;
2151        free(chunk);
2152    }
2153    stack->unused_cache_size = stack->cache_size;
2154}
2155
2156static void
2157push_mark_stack_chunk(mark_stack_t *stack)
2158{
2159    stack_chunk_t *next;
2160
2161    assert(stack->index == stack->limit);
2162    if (stack->cache_size > 0) {
2163        next = stack->cache;
2164        stack->cache = stack->cache->next;
2165        stack->cache_size--;
2166        if (stack->unused_cache_size > stack->cache_size)
2167            stack->unused_cache_size = stack->cache_size;
2168    }
2169    else {
2170        next = stack_chunk_alloc();
2171    }
2172    next->next = stack->chunk;
2173    stack->chunk = next;
2174    stack->index = 0;
2175}
2176
2177static void
2178pop_mark_stack_chunk(mark_stack_t *stack)
2179{
2180    stack_chunk_t *prev;
2181
2182    prev = stack->chunk->next;
2183    assert(stack->index == 0);
2184    add_stack_chunk_cache(stack, stack->chunk);
2185    stack->chunk = prev;
2186    stack->index = stack->limit;
2187}
2188
2189#if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2190static void
2191free_stack_chunks(mark_stack_t *stack)
2192{
2193    stack_chunk_t *chunk = stack->chunk;
2194    stack_chunk_t *next = NULL;
2195
2196    while (chunk != NULL) {
2197        next = chunk->next;
2198        free(chunk);
2199        chunk = next;
2200    }
2201}
2202#endif
2203
2204static void
2205push_mark_stack(mark_stack_t *stack, VALUE data)
2206{
2207    if (stack->index == stack->limit) {
2208        push_mark_stack_chunk(stack);
2209    }
2210    stack->chunk->data[stack->index++] = data;
2211}
2212
2213static int
2214pop_mark_stack(mark_stack_t *stack, VALUE *data)
2215{
2216    if (is_mark_stask_empty(stack)) {
2217        return FALSE;
2218    }
2219    if (stack->index == 1) {
2220        *data = stack->chunk->data[--stack->index];
2221        pop_mark_stack_chunk(stack);
2222        return TRUE;
2223    }
2224    *data = stack->chunk->data[--stack->index];
2225    return TRUE;
2226}
2227
2228static void
2229init_mark_stack(mark_stack_t *stack)
2230{
2231    int i;
2232
2233    push_mark_stack_chunk(stack);
2234    stack->limit = STACK_CHUNK_SIZE;
2235
2236    for (i=0; i < 4; i++) {
2237        add_stack_chunk_cache(stack, stack_chunk_alloc());
2238    }
2239    stack->unused_cache_size = stack->cache_size;
2240}
2241
2242
2243/* Marking */
2244
2245#define MARK_IN_BITMAP(bits, p) (bits[BITMAP_INDEX(p)] = bits[BITMAP_INDEX(p)] | ((uintptr_t)1 << BITMAP_OFFSET(p)))
2246
2247
2248#ifdef __ia64
2249#define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
2250#else
2251#define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
2252#endif
2253
2254#define STACK_START (th->machine_stack_start)
2255#define STACK_END (th->machine_stack_end)
2256#define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
2257
2258#if STACK_GROW_DIRECTION < 0
2259# define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
2260#elif STACK_GROW_DIRECTION > 0
2261# define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
2262#else
2263# define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2264			: (size_t)(STACK_END - STACK_START + 1))
2265#endif
2266#if !STACK_GROW_DIRECTION
2267int ruby_stack_grow_direction;
2268int
2269ruby_get_stack_grow_direction(volatile VALUE *addr)
2270{
2271    VALUE *end;
2272    SET_MACHINE_STACK_END(&end);
2273
2274    if (end > addr) return ruby_stack_grow_direction = 1;
2275    return ruby_stack_grow_direction = -1;
2276}
2277#endif
2278
2279size_t
2280ruby_stack_length(VALUE **p)
2281{
2282    rb_thread_t *th = GET_THREAD();
2283    SET_STACK_END;
2284    if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2285    return STACK_LENGTH;
2286}
2287
2288#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2289static int
2290stack_check(int water_mark)
2291{
2292    int ret;
2293    rb_thread_t *th = GET_THREAD();
2294    SET_STACK_END;
2295    ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
2296#ifdef __ia64
2297    if (!ret) {
2298        ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
2299              th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
2300    }
2301#endif
2302    return ret;
2303}
2304#endif
2305
2306#define STACKFRAME_FOR_CALL_CFUNC 512
2307
2308int
2309ruby_stack_check(void)
2310{
2311#if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2312    return 0;
2313#else
2314    return stack_check(STACKFRAME_FOR_CALL_CFUNC);
2315#endif
2316}
2317
2318static void
2319mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
2320{
2321    VALUE v;
2322    while (n--) {
2323        v = *x;
2324        (void)VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
2325	if (is_pointer_to_heap(objspace, (void *)v)) {
2326	    gc_mark(objspace, v);
2327	}
2328	x++;
2329    }
2330}
2331
2332static void
2333gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
2334{
2335    long n;
2336
2337    if (end <= start) return;
2338    n = end - start;
2339    mark_locations_array(objspace, start, n);
2340}
2341
2342void
2343rb_gc_mark_locations(VALUE *start, VALUE *end)
2344{
2345    gc_mark_locations(&rb_objspace, start, end);
2346}
2347
2348#define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
2349
2350struct mark_tbl_arg {
2351    rb_objspace_t *objspace;
2352};
2353
2354static int
2355mark_entry(st_data_t key, st_data_t value, st_data_t data)
2356{
2357    struct mark_tbl_arg *arg = (void*)data;
2358    gc_mark(arg->objspace, (VALUE)value);
2359    return ST_CONTINUE;
2360}
2361
2362static void
2363mark_tbl(rb_objspace_t *objspace, st_table *tbl)
2364{
2365    struct mark_tbl_arg arg;
2366    if (!tbl || tbl->num_entries == 0) return;
2367    arg.objspace = objspace;
2368    st_foreach(tbl, mark_entry, (st_data_t)&arg);
2369}
2370
2371static int
2372mark_key(st_data_t key, st_data_t value, st_data_t data)
2373{
2374    struct mark_tbl_arg *arg = (void*)data;
2375    gc_mark(arg->objspace, (VALUE)key);
2376    return ST_CONTINUE;
2377}
2378
2379static void
2380mark_set(rb_objspace_t *objspace, st_table *tbl)
2381{
2382    struct mark_tbl_arg arg;
2383    if (!tbl) return;
2384    arg.objspace = objspace;
2385    st_foreach(tbl, mark_key, (st_data_t)&arg);
2386}
2387
2388void
2389rb_mark_set(st_table *tbl)
2390{
2391    mark_set(&rb_objspace, tbl);
2392}
2393
2394static int
2395mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2396{
2397    struct mark_tbl_arg *arg = (void*)data;
2398    gc_mark(arg->objspace, (VALUE)key);
2399    gc_mark(arg->objspace, (VALUE)value);
2400    return ST_CONTINUE;
2401}
2402
2403static void
2404mark_hash(rb_objspace_t *objspace, st_table *tbl)
2405{
2406    struct mark_tbl_arg arg;
2407    if (!tbl) return;
2408    arg.objspace = objspace;
2409    st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
2410}
2411
2412void
2413rb_mark_hash(st_table *tbl)
2414{
2415    mark_hash(&rb_objspace, tbl);
2416}
2417
2418static void
2419mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
2420{
2421    const rb_method_definition_t *def = me->def;
2422
2423    gc_mark(objspace, me->klass);
2424  again:
2425    if (!def) return;
2426    switch (def->type) {
2427      case VM_METHOD_TYPE_ISEQ:
2428	gc_mark(objspace, def->body.iseq->self);
2429	break;
2430      case VM_METHOD_TYPE_BMETHOD:
2431	gc_mark(objspace, def->body.proc);
2432	break;
2433      case VM_METHOD_TYPE_ATTRSET:
2434      case VM_METHOD_TYPE_IVAR:
2435	gc_mark(objspace, def->body.attr.location);
2436	break;
2437      case VM_METHOD_TYPE_REFINED:
2438	if (def->body.orig_me) {
2439	    def = def->body.orig_me->def;
2440	    goto again;
2441	}
2442	break;
2443      default:
2444	break; /* ignore */
2445    }
2446}
2447
2448void
2449rb_mark_method_entry(const rb_method_entry_t *me)
2450{
2451    mark_method_entry(&rb_objspace, me);
2452}
2453
2454static int
2455mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
2456{
2457    struct mark_tbl_arg *arg = (void*)data;
2458    mark_method_entry(arg->objspace, me);
2459    return ST_CONTINUE;
2460}
2461
2462static void
2463mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
2464{
2465    struct mark_tbl_arg arg;
2466    if (!tbl) return;
2467    arg.objspace = objspace;
2468    st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
2469}
2470
2471static int
2472mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
2473{
2474    struct mark_tbl_arg *arg = (void*)data;
2475    gc_mark(arg->objspace, ce->value);
2476    gc_mark(arg->objspace, ce->file);
2477    return ST_CONTINUE;
2478}
2479
2480static void
2481mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
2482{
2483    struct mark_tbl_arg arg;
2484    if (!tbl) return;
2485    arg.objspace = objspace;
2486    st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
2487}
2488
2489#if STACK_GROW_DIRECTION < 0
2490#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2491#elif STACK_GROW_DIRECTION > 0
2492#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2493#else
2494#define GET_STACK_BOUNDS(start, end, appendix) \
2495    ((STACK_END < STACK_START) ? \
2496     ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2497#endif
2498
2499#define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
2500
2501static void
2502mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
2503{
2504    union {
2505	rb_jmp_buf j;
2506	VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
2507    } save_regs_gc_mark;
2508    VALUE *stack_start, *stack_end;
2509
2510    FLUSH_REGISTER_WINDOWS;
2511    /* This assumes that all registers are saved into the jmp_buf (and stack) */
2512    rb_setjmp(save_regs_gc_mark.j);
2513
2514    SET_STACK_END;
2515    GET_STACK_BOUNDS(stack_start, stack_end, 1);
2516
2517    mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
2518
2519    rb_gc_mark_locations(stack_start, stack_end);
2520#ifdef __ia64
2521    rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2522#endif
2523#if defined(__mc68000__)
2524    mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
2525			 (STACK_START - STACK_END));
2526#endif
2527}
2528
2529void
2530rb_gc_mark_machine_stack(rb_thread_t *th)
2531{
2532    rb_objspace_t *objspace = &rb_objspace;
2533    VALUE *stack_start, *stack_end;
2534
2535    GET_STACK_BOUNDS(stack_start, stack_end, 0);
2536    rb_gc_mark_locations(stack_start, stack_end);
2537#ifdef __ia64
2538    rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2539#endif
2540}
2541
2542void
2543rb_mark_tbl(st_table *tbl)
2544{
2545    mark_tbl(&rb_objspace, tbl);
2546}
2547
2548void
2549rb_gc_mark_maybe(VALUE obj)
2550{
2551    if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
2552	gc_mark(&rb_objspace, obj);
2553    }
2554}
2555
2556static int
2557gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
2558{
2559    register uintptr_t *bits = GET_HEAP_BITMAP(ptr);
2560    if (MARKED_IN_BITMAP(bits, ptr)) return 0;
2561    MARK_IN_BITMAP(bits, ptr);
2562    objspace->heap.marked_num++;
2563    return 1;
2564}
2565
2566static int
2567markable_object_p(rb_objspace_t *objspace, VALUE ptr)
2568{
2569    register RVALUE *obj = RANY(ptr);
2570
2571    if (rb_special_const_p(ptr)) return 0; /* special const not marked */
2572    if (obj->as.basic.flags == 0) return 0 ;       /* free cell */
2573
2574    return 1;
2575}
2576
2577int
2578rb_objspace_markable_object_p(VALUE obj)
2579{
2580    return markable_object_p(/* now it doesn't use &rb_objspace */ 0, obj);
2581}
2582
2583static void
2584gc_mark(rb_objspace_t *objspace, VALUE ptr)
2585{
2586    if (!markable_object_p(objspace, ptr)) {
2587	return;
2588    }
2589
2590    if (LIKELY(objspace->mark_func_data == 0)) {
2591	if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
2592	push_mark_stack(&objspace->mark_stack, ptr);
2593    }
2594    else {
2595	objspace->mark_func_data->mark_func(ptr, objspace->mark_func_data->data);
2596    }
2597}
2598
2599void
2600rb_gc_mark(VALUE ptr)
2601{
2602    gc_mark(&rb_objspace, ptr);
2603}
2604
2605static void
2606gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
2607{
2608    register RVALUE *obj = RANY(ptr);
2609
2610    goto marking;		/* skip */
2611
2612  again:
2613    if (LIKELY(objspace->mark_func_data == 0)) {
2614	obj = RANY(ptr);
2615	if (!markable_object_p(objspace, ptr)) return;
2616	if (!gc_mark_ptr(objspace, ptr)) return;  /* already marked */
2617    }
2618    else {
2619	gc_mark(objspace, ptr);
2620	return;
2621    }
2622
2623  marking:
2624    if (FL_TEST(obj, FL_EXIVAR)) {
2625	rb_mark_generic_ivar(ptr);
2626    }
2627
2628    switch (BUILTIN_TYPE(obj)) {
2629      case T_NIL:
2630      case T_FIXNUM:
2631	rb_bug("rb_gc_mark() called for broken object");
2632	break;
2633
2634      case T_NODE:
2635	switch (nd_type(obj)) {
2636	  case NODE_IF:		/* 1,2,3 */
2637	  case NODE_FOR:
2638	  case NODE_ITER:
2639	  case NODE_WHEN:
2640	  case NODE_MASGN:
2641	  case NODE_RESCUE:
2642	  case NODE_RESBODY:
2643	  case NODE_CLASS:
2644	  case NODE_BLOCK_PASS:
2645	    gc_mark(objspace, (VALUE)obj->as.node.u2.node);
2646	    /* fall through */
2647	  case NODE_BLOCK:	/* 1,3 */
2648	  case NODE_ARRAY:
2649	  case NODE_DSTR:
2650	  case NODE_DXSTR:
2651	  case NODE_DREGX:
2652	  case NODE_DREGX_ONCE:
2653	  case NODE_ENSURE:
2654	  case NODE_CALL:
2655	  case NODE_DEFS:
2656	  case NODE_OP_ASGN1:
2657	    gc_mark(objspace, (VALUE)obj->as.node.u1.node);
2658	    /* fall through */
2659	  case NODE_SUPER:	/* 3 */
2660	  case NODE_FCALL:
2661	  case NODE_DEFN:
2662	  case NODE_ARGS_AUX:
2663	    ptr = (VALUE)obj->as.node.u3.node;
2664	    goto again;
2665
2666	  case NODE_WHILE:	/* 1,2 */
2667	  case NODE_UNTIL:
2668	  case NODE_AND:
2669	  case NODE_OR:
2670	  case NODE_CASE:
2671	  case NODE_SCLASS:
2672	  case NODE_DOT2:
2673	  case NODE_DOT3:
2674	  case NODE_FLIP2:
2675	  case NODE_FLIP3:
2676	  case NODE_MATCH2:
2677	  case NODE_MATCH3:
2678	  case NODE_OP_ASGN_OR:
2679	  case NODE_OP_ASGN_AND:
2680	  case NODE_MODULE:
2681	  case NODE_ALIAS:
2682	  case NODE_VALIAS:
2683	  case NODE_ARGSCAT:
2684	    gc_mark(objspace, (VALUE)obj->as.node.u1.node);
2685	    /* fall through */
2686	  case NODE_GASGN:	/* 2 */
2687	  case NODE_LASGN:
2688	  case NODE_DASGN:
2689	  case NODE_DASGN_CURR:
2690	  case NODE_IASGN:
2691	  case NODE_IASGN2:
2692	  case NODE_CVASGN:
2693	  case NODE_COLON3:
2694	  case NODE_OPT_N:
2695	  case NODE_EVSTR:
2696	  case NODE_UNDEF:
2697	  case NODE_POSTEXE:
2698	    ptr = (VALUE)obj->as.node.u2.node;
2699	    goto again;
2700
2701	  case NODE_HASH:	/* 1 */
2702	  case NODE_LIT:
2703	  case NODE_STR:
2704	  case NODE_XSTR:
2705	  case NODE_DEFINED:
2706	  case NODE_MATCH:
2707	  case NODE_RETURN:
2708	  case NODE_BREAK:
2709	  case NODE_NEXT:
2710	  case NODE_YIELD:
2711	  case NODE_COLON2:
2712	  case NODE_SPLAT:
2713	  case NODE_TO_ARY:
2714	    ptr = (VALUE)obj->as.node.u1.node;
2715	    goto again;
2716
2717	  case NODE_SCOPE:	/* 2,3 */
2718	  case NODE_CDECL:
2719	  case NODE_OPT_ARG:
2720	    gc_mark(objspace, (VALUE)obj->as.node.u3.node);
2721	    ptr = (VALUE)obj->as.node.u2.node;
2722	    goto again;
2723
2724	  case NODE_ARGS:	/* custom */
2725	    {
2726		struct rb_args_info *args = obj->as.node.u3.args;
2727		if (args) {
2728		    if (args->pre_init)    gc_mark(objspace, (VALUE)args->pre_init);
2729		    if (args->post_init)   gc_mark(objspace, (VALUE)args->post_init);
2730		    if (args->opt_args)    gc_mark(objspace, (VALUE)args->opt_args);
2731		    if (args->kw_args)     gc_mark(objspace, (VALUE)args->kw_args);
2732		    if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
2733		}
2734	    }
2735	    ptr = (VALUE)obj->as.node.u2.node;
2736	    goto again;
2737
2738	  case NODE_ZARRAY:	/* - */
2739	  case NODE_ZSUPER:
2740	  case NODE_VCALL:
2741	  case NODE_GVAR:
2742	  case NODE_LVAR:
2743	  case NODE_DVAR:
2744	  case NODE_IVAR:
2745	  case NODE_CVAR:
2746	  case NODE_NTH_REF:
2747	  case NODE_BACK_REF:
2748	  case NODE_REDO:
2749	  case NODE_RETRY:
2750	  case NODE_SELF:
2751	  case NODE_NIL:
2752	  case NODE_TRUE:
2753	  case NODE_FALSE:
2754	  case NODE_ERRINFO:
2755	  case NODE_BLOCK_ARG:
2756	    break;
2757	  case NODE_ALLOCA:
2758	    mark_locations_array(objspace,
2759				 (VALUE*)obj->as.node.u1.value,
2760				 obj->as.node.u3.cnt);
2761	    gc_mark(objspace, (VALUE)obj->as.node.u2.node);
2762	    break;
2763
2764	  case NODE_CREF:
2765	    gc_mark(objspace, obj->as.node.nd_refinements);
2766	    gc_mark(objspace, (VALUE)obj->as.node.u1.node);
2767	    ptr = (VALUE)obj->as.node.u3.node;
2768	    goto again;
2769
2770	  default:		/* unlisted NODE */
2771	    if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
2772		gc_mark(objspace, (VALUE)obj->as.node.u1.node);
2773	    }
2774	    if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
2775		gc_mark(objspace, (VALUE)obj->as.node.u2.node);
2776	    }
2777	    if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
2778		gc_mark(objspace, (VALUE)obj->as.node.u3.node);
2779	    }
2780	}
2781	return;			/* no need to mark class. */
2782    }
2783
2784    gc_mark(objspace, obj->as.basic.klass);
2785    switch (BUILTIN_TYPE(obj)) {
2786      case T_ICLASS:
2787      case T_CLASS:
2788      case T_MODULE:
2789	mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2790	if (!RCLASS_EXT(obj)) break;
2791	mark_tbl(objspace, RCLASS_IV_TBL(obj));
2792	mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
2793	ptr = RCLASS_SUPER(obj);
2794	goto again;
2795
2796      case T_ARRAY:
2797	if (FL_TEST(obj, ELTS_SHARED)) {
2798	    ptr = obj->as.array.as.heap.aux.shared;
2799	    goto again;
2800	}
2801	else {
2802	    long i, len = RARRAY_LEN(obj);
2803	    VALUE *ptr = RARRAY_PTR(obj);
2804	    for (i=0; i < len; i++) {
2805		gc_mark(objspace, *ptr++);
2806	    }
2807	}
2808	break;
2809
2810      case T_HASH:
2811	mark_hash(objspace, obj->as.hash.ntbl);
2812	ptr = obj->as.hash.ifnone;
2813	goto again;
2814
2815      case T_STRING:
2816#define STR_ASSOC FL_USER3   /* copied from string.c */
2817	if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
2818	    ptr = obj->as.string.as.heap.aux.shared;
2819	    goto again;
2820	}
2821	break;
2822
2823      case T_DATA:
2824	if (RTYPEDDATA_P(obj)) {
2825	    RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
2826	    if (mark_func) (*mark_func)(DATA_PTR(obj));
2827	}
2828	else {
2829	    if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
2830	}
2831	break;
2832
2833      case T_OBJECT:
2834        {
2835            long i, len = ROBJECT_NUMIV(obj);
2836	    VALUE *ptr = ROBJECT_IVPTR(obj);
2837            for (i  = 0; i < len; i++) {
2838		gc_mark(objspace, *ptr++);
2839            }
2840        }
2841	break;
2842
2843      case T_FILE:
2844        if (obj->as.file.fptr) {
2845            gc_mark(objspace, obj->as.file.fptr->pathv);
2846            gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
2847            gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
2848            gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
2849            gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
2850            gc_mark(objspace, obj->as.file.fptr->write_lock);
2851        }
2852        break;
2853
2854      case T_REGEXP:
2855        ptr = obj->as.regexp.src;
2856        goto again;
2857
2858      case T_FLOAT:
2859      case T_BIGNUM:
2860      case T_ZOMBIE:
2861	break;
2862
2863      case T_MATCH:
2864	gc_mark(objspace, obj->as.match.regexp);
2865	if (obj->as.match.str) {
2866	    ptr = obj->as.match.str;
2867	    goto again;
2868	}
2869	break;
2870
2871      case T_RATIONAL:
2872	gc_mark(objspace, obj->as.rational.num);
2873	ptr = obj->as.rational.den;
2874	goto again;
2875
2876      case T_COMPLEX:
2877	gc_mark(objspace, obj->as.complex.real);
2878	ptr = obj->as.complex.imag;
2879	goto again;
2880
2881      case T_STRUCT:
2882	{
2883	    long len = RSTRUCT_LEN(obj);
2884	    VALUE *ptr = RSTRUCT_PTR(obj);
2885
2886	    while (len--) {
2887		gc_mark(objspace, *ptr++);
2888	    }
2889	}
2890	break;
2891
2892      default:
2893	rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2894	       BUILTIN_TYPE(obj), (void *)obj,
2895	       is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
2896    }
2897}
2898
2899static void
2900gc_mark_stacked_objects(rb_objspace_t *objspace)
2901{
2902    mark_stack_t *mstack = &objspace->mark_stack;
2903    VALUE obj = 0;
2904
2905    if (!mstack->index) return;
2906    while (pop_mark_stack(mstack, &obj)) {
2907        gc_mark_children(objspace, obj);
2908    }
2909    shrink_stack_chunk_cache(mstack);
2910}
2911
2912static void
2913gc_marks(rb_objspace_t *objspace)
2914{
2915    struct gc_list *list;
2916    rb_thread_t *th = GET_THREAD();
2917    struct mark_func_data_struct *prev_mark_func_data;
2918
2919    prev_mark_func_data = objspace->mark_func_data;
2920    objspace->mark_func_data = 0;
2921
2922    gc_prof_mark_timer_start(objspace);
2923    objspace->heap.marked_num = 0;
2924    objspace->count++;
2925
2926    SET_STACK_END;
2927
2928    th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
2929
2930    mark_tbl(objspace, finalizer_table);
2931    mark_current_machine_context(objspace, th);
2932
2933    rb_gc_mark_symbols();
2934    rb_gc_mark_encodings();
2935
2936    /* mark protected global variables */
2937    for (list = global_List; list; list = list->next) {
2938	rb_gc_mark_maybe(*list->varptr);
2939    }
2940    rb_mark_end_proc();
2941    rb_gc_mark_global_tbl();
2942
2943    mark_tbl(objspace, rb_class_tbl);
2944
2945    /* mark generic instance variables for special constants */
2946    rb_mark_generic_ivar_tbl();
2947
2948    rb_gc_mark_parser();
2949
2950    rb_gc_mark_unlinked_live_method_entries(th->vm);
2951
2952    /* marking-loop */
2953    gc_mark_stacked_objects(objspace);
2954
2955    gc_prof_mark_timer_stop(objspace);
2956
2957    objspace->mark_func_data = prev_mark_func_data;
2958}
2959
2960/* GC */
2961
2962void
2963rb_gc_force_recycle(VALUE p)
2964{
2965    rb_objspace_t *objspace = &rb_objspace;
2966    struct heaps_slot *slot;
2967
2968    objspace->total_freed_object_num++;
2969    if (MARKED_IN_BITMAP(GET_HEAP_BITMAP(p), p)) {
2970        add_slot_local_freelist(objspace, (RVALUE *)p);
2971    }
2972    else {
2973	objspace->heap.free_num++;
2974        slot = add_slot_local_freelist(objspace, (RVALUE *)p);
2975        if (slot->free_next == NULL) {
2976            link_free_heap_slot(objspace, slot);
2977        }
2978    }
2979}
2980
2981void
2982rb_gc_register_mark_object(VALUE obj)
2983{
2984    VALUE ary = GET_THREAD()->vm->mark_object_ary;
2985    rb_ary_push(ary, obj);
2986}
2987
2988void
2989rb_gc_register_address(VALUE *addr)
2990{
2991    rb_objspace_t *objspace = &rb_objspace;
2992    struct gc_list *tmp;
2993
2994    tmp = ALLOC(struct gc_list);
2995    tmp->next = global_List;
2996    tmp->varptr = addr;
2997    global_List = tmp;
2998}
2999
3000void
3001rb_gc_unregister_address(VALUE *addr)
3002{
3003    rb_objspace_t *objspace = &rb_objspace;
3004    struct gc_list *tmp = global_List;
3005
3006    if (tmp->varptr == addr) {
3007	global_List = tmp->next;
3008	xfree(tmp);
3009	return;
3010    }
3011    while (tmp->next) {
3012	if (tmp->next->varptr == addr) {
3013	    struct gc_list *t = tmp->next;
3014
3015	    tmp->next = tmp->next->next;
3016	    xfree(t);
3017	    break;
3018	}
3019	tmp = tmp->next;
3020    }
3021}
3022
3023#define GC_NOTIFY 0
3024
3025static int
3026garbage_collect(rb_objspace_t *objspace)
3027{
3028    if (GC_NOTIFY) printf("start garbage_collect()\n");
3029
3030    if (!heaps) {
3031	return FALSE;
3032    }
3033    if (!ready_to_gc(objspace)) {
3034        return TRUE;
3035    }
3036
3037    gc_prof_timer_start(objspace);
3038
3039    rest_sweep(objspace);
3040
3041    during_gc++;
3042    gc_marks(objspace);
3043
3044    gc_prof_sweep_timer_start(objspace);
3045    gc_sweep(objspace);
3046    gc_prof_sweep_timer_stop(objspace);
3047
3048    gc_prof_timer_stop(objspace, Qtrue);
3049    if (GC_NOTIFY) printf("end garbage_collect()\n");
3050    return TRUE;
3051}
3052
3053static void *
3054gc_with_gvl(void *ptr)
3055{
3056    return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
3057}
3058
3059static int
3060garbage_collect_with_gvl(rb_objspace_t *objspace)
3061{
3062    if (dont_gc) return TRUE;
3063    if (ruby_thread_has_gvl_p()) {
3064	return garbage_collect(objspace);
3065    }
3066    else {
3067	if (ruby_native_thread_p()) {
3068	    return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
3069	}
3070	else {
3071	    /* no ruby thread */
3072	    fprintf(stderr, "[FATAL] failed to allocate memory\n");
3073	    exit(EXIT_FAILURE);
3074	}
3075    }
3076}
3077
3078int
3079rb_garbage_collect(void)
3080{
3081    return garbage_collect(&rb_objspace);
3082}
3083
3084#undef Init_stack
3085
3086void
3087Init_stack(volatile VALUE *addr)
3088{
3089    ruby_init_stack(addr);
3090}
3091
3092/*
3093 *  call-seq:
3094 *     GC.start                     -> nil
3095 *     gc.garbage_collect           -> nil
3096 *     ObjectSpace.garbage_collect  -> nil
3097 *
3098 *  Initiates garbage collection, unless manually disabled.
3099 *
3100 */
3101
3102VALUE
3103rb_gc_start(void)
3104{
3105    rb_gc();
3106    return Qnil;
3107}
3108
3109void
3110rb_gc(void)
3111{
3112    rb_objspace_t *objspace = &rb_objspace;
3113    garbage_collect(objspace);
3114    if (!finalizing) finalize_deferred(objspace);
3115    free_unused_heaps(objspace);
3116}
3117
3118int
3119rb_during_gc(void)
3120{
3121    rb_objspace_t *objspace = &rb_objspace;
3122    return during_gc;
3123}
3124
3125/*
3126 *  call-seq:
3127 *     GC.count -> Integer
3128 *
3129 *  The number of times GC occurred.
3130 *
3131 *  It returns the number of times GC occurred since the process started.
3132 *
3133 */
3134
3135static VALUE
3136gc_count(VALUE self)
3137{
3138    return UINT2NUM(rb_objspace.count);
3139}
3140
3141/*
3142 *  call-seq:
3143 *     GC.stat -> Hash
3144 *
3145 *  Returns a Hash containing information about the GC.
3146 *
3147 *  The hash includes information about internal statistics about GC such as:
3148 *
3149 *	{
3150 *	    :count=>0,
3151 *	    :heap_used=>12,
3152 *     	    :heap_length=>12,
3153 *     	    :heap_increment=>0,
3154 *     	    :heap_live_num=>7539,
3155 *     	    :heap_free_num=>88,
3156 *     	    :heap_final_num=>0,
3157 *     	    :total_allocated_object=>7630,
3158 *     	    :total_freed_object=>88
3159 *	}
3160 *
3161 *  The contents of the hash are implementation specific and may be changed in
3162 *  the future.
3163 *
3164 *  This method is only expected to work on C Ruby.
3165 *
3166 */
3167
3168static VALUE
3169gc_stat(int argc, VALUE *argv, VALUE self)
3170{
3171    rb_objspace_t *objspace = &rb_objspace;
3172    VALUE hash;
3173    static VALUE sym_count;
3174    static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
3175    static VALUE sym_heap_live_num, sym_heap_free_num, sym_heap_final_num;
3176    static VALUE sym_total_allocated_object, sym_total_freed_object;
3177    if (sym_count == 0) {
3178	sym_count = ID2SYM(rb_intern_const("count"));
3179	sym_heap_used = ID2SYM(rb_intern_const("heap_used"));
3180	sym_heap_length = ID2SYM(rb_intern_const("heap_length"));
3181	sym_heap_increment = ID2SYM(rb_intern_const("heap_increment"));
3182	sym_heap_live_num = ID2SYM(rb_intern_const("heap_live_num"));
3183	sym_heap_free_num = ID2SYM(rb_intern_const("heap_free_num"));
3184	sym_heap_final_num = ID2SYM(rb_intern_const("heap_final_num"));
3185	sym_total_allocated_object = ID2SYM(rb_intern_const("total_allocated_object"));
3186	sym_total_freed_object = ID2SYM(rb_intern_const("total_freed_object"));
3187    }
3188
3189    if (rb_scan_args(argc, argv, "01", &hash) == 1) {
3190        if (!RB_TYPE_P(hash, T_HASH))
3191            rb_raise(rb_eTypeError, "non-hash given");
3192    }
3193
3194    if (hash == Qnil) {
3195        hash = rb_hash_new();
3196    }
3197
3198    rest_sweep(objspace);
3199
3200    rb_hash_aset(hash, sym_count, SIZET2NUM(objspace->count));
3201    /* implementation dependent counters */
3202    rb_hash_aset(hash, sym_heap_used, SIZET2NUM(objspace->heap.used));
3203    rb_hash_aset(hash, sym_heap_length, SIZET2NUM(objspace->heap.length));
3204    rb_hash_aset(hash, sym_heap_increment, SIZET2NUM(objspace->heap.increment));
3205    rb_hash_aset(hash, sym_heap_live_num, SIZET2NUM(objspace_live_num(objspace)));
3206    rb_hash_aset(hash, sym_heap_free_num, SIZET2NUM(objspace->heap.free_num));
3207    rb_hash_aset(hash, sym_heap_final_num, SIZET2NUM(objspace->heap.final_num));
3208    rb_hash_aset(hash, sym_total_allocated_object, SIZET2NUM(objspace->total_allocated_object_num));
3209    rb_hash_aset(hash, sym_total_freed_object, SIZET2NUM(objspace->total_freed_object_num));
3210
3211    return hash;
3212}
3213
3214/*
3215 *  call-seq:
3216 *    GC.stress	    -> true or false
3217 *
3218 *  Returns current status of GC stress mode.
3219 */
3220
3221static VALUE
3222gc_stress_get(VALUE self)
3223{
3224    rb_objspace_t *objspace = &rb_objspace;
3225    return ruby_gc_stress ? Qtrue : Qfalse;
3226}
3227
3228/*
3229 *  call-seq:
3230 *    GC.stress = bool          -> bool
3231 *
3232 *  Updates the GC stress mode.
3233 *
3234 *  When stress mode is enabled, the GC is invoked at every GC opportunity:
3235 *  all memory and object allocations.
3236 *
3237 *  Enabling stress mode will degrade performance, it is only for debugging.
3238 */
3239
3240static VALUE
3241gc_stress_set(VALUE self, VALUE flag)
3242{
3243    rb_objspace_t *objspace = &rb_objspace;
3244    rb_secure(2);
3245    ruby_gc_stress = RTEST(flag);
3246    return flag;
3247}
3248
3249/*
3250 *  call-seq:
3251 *     GC.enable    -> true or false
3252 *
3253 *  Enables garbage collection, returning +true+ if garbage
3254 *  collection was previously disabled.
3255 *
3256 *     GC.disable   #=> false
3257 *     GC.enable    #=> true
3258 *     GC.enable    #=> false
3259 *
3260 */
3261
3262VALUE
3263rb_gc_enable(void)
3264{
3265    rb_objspace_t *objspace = &rb_objspace;
3266    int old = dont_gc;
3267
3268    dont_gc = FALSE;
3269    return old ? Qtrue : Qfalse;
3270}
3271
3272/*
3273 *  call-seq:
3274 *     GC.disable    -> true or false
3275 *
3276 *  Disables garbage collection, returning +true+ if garbage
3277 *  collection was already disabled.
3278 *
3279 *     GC.disable   #=> false
3280 *     GC.disable   #=> true
3281 *
3282 */
3283
3284VALUE
3285rb_gc_disable(void)
3286{
3287    rb_objspace_t *objspace = &rb_objspace;
3288    int old = dont_gc;
3289
3290    rest_sweep(objspace);
3291
3292    dont_gc = TRUE;
3293    return old ? Qtrue : Qfalse;
3294}
3295
3296void
3297rb_gc_set_params(void)
3298{
3299    char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
3300
3301    if (rb_safe_level() > 0) return;
3302
3303    malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
3304    if (malloc_limit_ptr != NULL) {
3305	int malloc_limit_i = atoi(malloc_limit_ptr);
3306	if (RTEST(ruby_verbose))
3307	    fprintf(stderr, "malloc_limit=%d (%d)\n",
3308		    malloc_limit_i, initial_malloc_limit);
3309	if (malloc_limit_i > 0) {
3310	    initial_malloc_limit = malloc_limit_i;
3311	}
3312    }
3313
3314    heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
3315    if (heap_min_slots_ptr != NULL) {
3316	int heap_min_slots_i = atoi(heap_min_slots_ptr);
3317	if (RTEST(ruby_verbose))
3318	    fprintf(stderr, "heap_min_slots=%d (%d)\n",
3319		    heap_min_slots_i, initial_heap_min_slots);
3320	if (heap_min_slots_i > 0) {
3321	    initial_heap_min_slots = heap_min_slots_i;
3322            initial_expand_heap(&rb_objspace);
3323	}
3324    }
3325
3326    free_min_ptr = getenv("RUBY_FREE_MIN");
3327    if (free_min_ptr != NULL) {
3328	int free_min_i = atoi(free_min_ptr);
3329	if (RTEST(ruby_verbose))
3330	    fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
3331	if (free_min_i > 0) {
3332	    initial_free_min = free_min_i;
3333	}
3334    }
3335}
3336
3337void
3338rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
3339{
3340    rb_objspace_t *objspace = &rb_objspace;
3341
3342    if (markable_object_p(objspace, obj)) {
3343	struct mark_func_data_struct mfd;
3344	mfd.mark_func = func;
3345	mfd.data = data;
3346	objspace->mark_func_data = &mfd;
3347	gc_mark_children(objspace, obj);
3348	objspace->mark_func_data = 0;
3349    }
3350}
3351
3352/*
3353  ------------------------ Extended allocator ------------------------
3354*/
3355
3356static void vm_xfree(rb_objspace_t *objspace, void *ptr);
3357
3358static void *
3359negative_size_allocation_error_with_gvl(void *ptr)
3360{
3361    rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
3362    return 0; /* should not be reached */
3363}
3364
3365static void
3366negative_size_allocation_error(const char *msg)
3367{
3368    if (ruby_thread_has_gvl_p()) {
3369	rb_raise(rb_eNoMemError, "%s", msg);
3370    }
3371    else {
3372	if (ruby_native_thread_p()) {
3373	    rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
3374	}
3375	else {
3376	    fprintf(stderr, "[FATAL] %s\n", msg);
3377	    exit(EXIT_FAILURE);
3378	}
3379    }
3380}
3381
3382static void *
3383ruby_memerror_body(void *dummy)
3384{
3385    rb_memerror();
3386    return 0;
3387}
3388
3389static void
3390ruby_memerror(void)
3391{
3392    if (ruby_thread_has_gvl_p()) {
3393	rb_memerror();
3394    }
3395    else {
3396	if (ruby_native_thread_p()) {
3397	    rb_thread_call_with_gvl(ruby_memerror_body, 0);
3398	}
3399	else {
3400	    /* no ruby thread */
3401	    fprintf(stderr, "[FATAL] failed to allocate memory\n");
3402	    exit(EXIT_FAILURE);
3403	}
3404    }
3405}
3406
3407void
3408rb_memerror(void)
3409{
3410    rb_thread_t *th = GET_THREAD();
3411    if (!nomem_error ||
3412	(rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
3413	fprintf(stderr, "[FATAL] failed to allocate memory\n");
3414	exit(EXIT_FAILURE);
3415    }
3416    if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
3417	rb_thread_raised_clear(th);
3418	GET_THREAD()->errinfo = nomem_error;
3419	JUMP_TAG(TAG_RAISE);
3420    }
3421    rb_thread_raised_set(th, RAISED_NOMEMORY);
3422    rb_exc_raise(nomem_error);
3423}
3424
3425static void *
3426aligned_malloc(size_t alignment, size_t size)
3427{
3428    void *res;
3429
3430#if defined __MINGW32__
3431    res = __mingw_aligned_malloc(size, alignment);
3432#elif defined _WIN32 && !defined __CYGWIN__
3433    res = _aligned_malloc(size, alignment);
3434#elif defined(HAVE_POSIX_MEMALIGN)
3435    if (posix_memalign(&res, alignment, size) == 0) {
3436        return res;
3437    }
3438    else {
3439        return NULL;
3440    }
3441#elif defined(HAVE_MEMALIGN)
3442    res = memalign(alignment, size);
3443#else
3444    char* aligned;
3445    res = malloc(alignment + size + sizeof(void*));
3446    aligned = (char*)res + alignment + sizeof(void*);
3447    aligned -= ((VALUE)aligned & (alignment - 1));
3448    ((void**)aligned)[-1] = res;
3449    res = (void*)aligned;
3450#endif
3451
3452#if defined(_DEBUG) || defined(GC_DEBUG)
3453    /* alignment must be a power of 2 */
3454    assert((alignment - 1) & alignment == 0);
3455    assert(alignment % sizeof(void*) == 0);
3456#endif
3457    return res;
3458}
3459
3460static void
3461aligned_free(void *ptr)
3462{
3463#if defined __MINGW32__
3464    __mingw_aligned_free(ptr);
3465#elif defined _WIN32 && !defined __CYGWIN__
3466    _aligned_free(ptr);
3467#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
3468    free(ptr);
3469#else
3470    free(((void**)ptr)[-1]);
3471#endif
3472}
3473
3474static inline size_t
3475vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
3476{
3477    if ((ssize_t)size < 0) {
3478	negative_size_allocation_error("negative allocation size (or too big)");
3479    }
3480    if (size == 0) size = 1;
3481
3482#if CALC_EXACT_MALLOC_SIZE
3483    size += sizeof(size_t);
3484#endif
3485
3486    if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
3487	(malloc_increase+size) > malloc_limit) {
3488	garbage_collect_with_gvl(objspace);
3489    }
3490
3491    return size;
3492}
3493
3494static inline void *
3495vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
3496{
3497    ATOMIC_SIZE_ADD(malloc_increase, size);
3498
3499#if CALC_EXACT_MALLOC_SIZE
3500    ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
3501    ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
3502    ((size_t *)mem)[0] = size;
3503    mem = (size_t *)mem + 1;
3504#endif
3505
3506    return mem;
3507}
3508
3509#define TRY_WITH_GC(alloc) do { \
3510	if (!(alloc) && \
3511	    (!garbage_collect_with_gvl(objspace) || \
3512	     !(alloc))) { \
3513	    ruby_memerror(); \
3514	} \
3515    } while (0)
3516
3517static void *
3518vm_xmalloc(rb_objspace_t *objspace, size_t size)
3519{
3520    void *mem;
3521
3522    size = vm_malloc_prepare(objspace, size);
3523    TRY_WITH_GC(mem = malloc(size));
3524    return vm_malloc_fixup(objspace, mem, size);
3525}
3526
3527static void *
3528vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
3529{
3530    void *mem;
3531#if CALC_EXACT_MALLOC_SIZE
3532    size_t oldsize;
3533#endif
3534
3535    if ((ssize_t)size < 0) {
3536	negative_size_allocation_error("negative re-allocation size");
3537    }
3538
3539    if (!ptr) return vm_xmalloc(objspace, size);
3540
3541    /*
3542     * The behavior of realloc(ptr, 0) is implementation defined.
3543     * Therefore we don't use realloc(ptr, 0) for portability reason.
3544     * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
3545     */
3546    if (size == 0) {
3547	vm_xfree(objspace, ptr);
3548	return 0;
3549    }
3550    if (ruby_gc_stress && !ruby_disable_gc_stress)
3551	garbage_collect_with_gvl(objspace);
3552
3553#if CALC_EXACT_MALLOC_SIZE
3554    size += sizeof(size_t);
3555    ptr = (size_t *)ptr - 1;
3556    oldsize = ((size_t *)ptr)[0];
3557#endif
3558
3559    mem = realloc(ptr, size);
3560    if (!mem) {
3561	if (garbage_collect_with_gvl(objspace)) {
3562	    mem = realloc(ptr, size);
3563	}
3564	if (!mem) {
3565	    ruby_memerror();
3566        }
3567    }
3568    ATOMIC_SIZE_ADD(malloc_increase, size);
3569
3570#if CALC_EXACT_MALLOC_SIZE
3571    ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize);
3572    ((size_t *)mem)[0] = size;
3573    mem = (size_t *)mem + 1;
3574#endif
3575
3576    return mem;
3577}
3578
3579static void
3580vm_xfree(rb_objspace_t *objspace, void *ptr)
3581{
3582#if CALC_EXACT_MALLOC_SIZE
3583    size_t size;
3584    ptr = ((size_t *)ptr) - 1;
3585    size = ((size_t*)ptr)[0];
3586    if (size) {
3587	ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size);
3588	ATOMIC_SIZE_DEC(objspace->malloc_params.allocations);
3589    }
3590#endif
3591
3592    free(ptr);
3593}
3594
3595void *
3596ruby_xmalloc(size_t size)
3597{
3598    return vm_xmalloc(&rb_objspace, size);
3599}
3600
3601static inline size_t
3602xmalloc2_size(size_t n, size_t size)
3603{
3604    size_t len = size * n;
3605    if (n != 0 && size != len / n) {
3606	rb_raise(rb_eArgError, "malloc: possible integer overflow");
3607    }
3608    return len;
3609}
3610
3611void *
3612ruby_xmalloc2(size_t n, size_t size)
3613{
3614    return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
3615}
3616
3617static void *
3618vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
3619{
3620    void *mem;
3621    size_t size;
3622
3623    size = xmalloc2_size(count, elsize);
3624    size = vm_malloc_prepare(objspace, size);
3625
3626    TRY_WITH_GC(mem = calloc(1, size));
3627    return vm_malloc_fixup(objspace, mem, size);
3628}
3629
3630void *
3631ruby_xcalloc(size_t n, size_t size)
3632{
3633    return vm_xcalloc(&rb_objspace, n, size);
3634}
3635
3636void *
3637ruby_xrealloc(void *ptr, size_t size)
3638{
3639    return vm_xrealloc(&rb_objspace, ptr, size);
3640}
3641
3642void *
3643ruby_xrealloc2(void *ptr, size_t n, size_t size)
3644{
3645    size_t len = size * n;
3646    if (n != 0 && size != len / n) {
3647	rb_raise(rb_eArgError, "realloc: possible integer overflow");
3648    }
3649    return ruby_xrealloc(ptr, len);
3650}
3651
3652void
3653ruby_xfree(void *x)
3654{
3655    if (x)
3656	vm_xfree(&rb_objspace, x);
3657}
3658
3659
3660/* Mimic ruby_xmalloc, but need not rb_objspace.
3661 * should return pointer suitable for ruby_xfree
3662 */
3663void *
3664ruby_mimmalloc(size_t size)
3665{
3666    void *mem;
3667#if CALC_EXACT_MALLOC_SIZE
3668    size += sizeof(size_t);
3669#endif
3670    mem = malloc(size);
3671#if CALC_EXACT_MALLOC_SIZE
3672    /* set 0 for consistency of allocated_size/allocations */
3673    ((size_t *)mem)[0] = 0;
3674    mem = (size_t *)mem + 1;
3675#endif
3676    return mem;
3677}
3678
3679#if CALC_EXACT_MALLOC_SIZE
3680/*
3681 *  call-seq:
3682 *     GC.malloc_allocated_size -> Integer
3683 *
3684 *  Returns the size of memory allocated by malloc().
3685 *
3686 *  Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
3687 */
3688
3689static VALUE
3690gc_malloc_allocated_size(VALUE self)
3691{
3692    return UINT2NUM(rb_objspace.malloc_params.allocated_size);
3693}
3694
3695/*
3696 *  call-seq:
3697 *     GC.malloc_allocations -> Integer
3698 *
3699 *  Returns the number of malloc() allocations.
3700 *
3701 *  Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
3702 */
3703
3704static VALUE
3705gc_malloc_allocations(VALUE self)
3706{
3707    return UINT2NUM(rb_objspace.malloc_params.allocations);
3708}
3709#endif
3710
3711/*
3712  ------------------------------ WeakMap ------------------------------
3713*/
3714
3715struct weakmap {
3716    st_table *obj2wmap;		/* obj -> [ref,...] */
3717    st_table *wmap2obj;		/* ref -> obj */
3718    VALUE final;
3719};
3720
3721static int
3722wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
3723{
3724    gc_mark_ptr((rb_objspace_t *)arg, (VALUE)val);
3725    return ST_CONTINUE;
3726}
3727
3728static void
3729wmap_mark(void *ptr)
3730{
3731    struct weakmap *w = ptr;
3732    st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
3733    rb_gc_mark(w->final);
3734}
3735
3736static int
3737wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
3738{
3739    rb_ary_resize((VALUE)val, 0);
3740    return ST_CONTINUE;
3741}
3742
3743static void
3744wmap_free(void *ptr)
3745{
3746    struct weakmap *w = ptr;
3747    st_foreach(w->obj2wmap, wmap_free_map, 0);
3748    st_free_table(w->obj2wmap);
3749    st_free_table(w->wmap2obj);
3750}
3751
3752size_t rb_ary_memsize(VALUE ary);
3753static int
3754wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
3755{
3756    *(size_t *)arg += rb_ary_memsize((VALUE)val);
3757    return ST_CONTINUE;
3758}
3759
3760static size_t
3761wmap_memsize(const void *ptr)
3762{
3763    size_t size;
3764    const struct weakmap *w = ptr;
3765    if (!w) return 0;
3766    size = sizeof(*w);
3767    size += st_memsize(w->obj2wmap);
3768    size += st_memsize(w->wmap2obj);
3769    st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
3770    return size;
3771}
3772
3773static const rb_data_type_t weakmap_type = {
3774    "weakmap",
3775    {
3776	wmap_mark,
3777	wmap_free,
3778	wmap_memsize,
3779    }
3780};
3781
3782static VALUE
3783wmap_allocate(VALUE klass)
3784{
3785    struct weakmap *w;
3786    VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
3787    w->obj2wmap = st_init_numtable();
3788    w->wmap2obj = st_init_numtable();
3789    w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
3790    return obj;
3791}
3792
3793static int
3794wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
3795{
3796    VALUE wmap, ary;
3797    if (!existing) return ST_STOP;
3798    wmap = (VALUE)arg, ary = (VALUE)*value;
3799    rb_ary_delete_same(ary, wmap);
3800    if (!RARRAY_LEN(ary)) return ST_DELETE;
3801    return ST_CONTINUE;
3802}
3803
3804static VALUE
3805wmap_finalize(VALUE self, VALUE objid)
3806{
3807    st_data_t orig, wmap, data;
3808    VALUE obj, rids;
3809    long i;
3810    struct weakmap *w;
3811
3812    TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
3813    /* Get reference from object id. */
3814    obj = obj_id_to_ref(objid);
3815
3816    /* obj is original referenced object and/or weak reference. */
3817    orig = (st_data_t)obj;
3818    if (st_delete(w->obj2wmap, &orig, &data)) {
3819	rids = (VALUE)data;
3820	for (i = 0; i < RARRAY_LEN(rids); ++i) {
3821	    wmap = (st_data_t)RARRAY_PTR(rids)[i];
3822	    st_delete(w->wmap2obj, &wmap, NULL);
3823	}
3824    }
3825
3826    wmap = (st_data_t)obj;
3827    if (st_delete(w->wmap2obj, &wmap, &orig)) {
3828	wmap = (st_data_t)obj;
3829	st_update(w->obj2wmap, orig, wmap_final_func, wmap);
3830    }
3831    return self;
3832}
3833
3834/* Creates a weak reference from the given key to the given value */
3835static VALUE
3836wmap_aset(VALUE self, VALUE wmap, VALUE orig)
3837{
3838    st_data_t data;
3839    VALUE rids;
3840    struct weakmap *w;
3841
3842    TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
3843    rb_define_final(orig, w->final);
3844    rb_define_final(wmap, w->final);
3845    if (st_lookup(w->obj2wmap, (st_data_t)orig, &data)) {
3846	rids = (VALUE)data;
3847    }
3848    else {
3849	rids = rb_ary_tmp_new(1);
3850	st_insert(w->obj2wmap, (st_data_t)orig, (st_data_t)rids);
3851    }
3852    rb_ary_push(rids, wmap);
3853    st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
3854    return nonspecial_obj_id(orig);
3855}
3856
3857/* Retrieves a weakly referenced object with the given key */
3858static VALUE
3859wmap_aref(VALUE self, VALUE wmap)
3860{
3861    st_data_t data;
3862    VALUE obj;
3863    struct weakmap *w;
3864    rb_objspace_t *objspace = &rb_objspace;
3865
3866    TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
3867    if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
3868    obj = (VALUE)data;
3869    if (!is_id_value(objspace, obj)) return Qnil;
3870    if (!is_live_object(objspace, obj)) return Qnil;
3871    return obj;
3872}
3873
3874
3875/*
3876  ------------------------------ GC profiler ------------------------------
3877*/
3878
3879static inline void gc_prof_set_heap_info(rb_objspace_t *, gc_profile_record *);
3880#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
3881
3882static double
3883getrusage_time(void)
3884{
3885#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
3886    {
3887        static int try_clock_gettime = 1;
3888        struct timespec ts;
3889        if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
3890            return ts.tv_sec + ts.tv_nsec * 1e-9;
3891        }
3892        else {
3893            try_clock_gettime = 0;
3894        }
3895    }
3896#endif
3897
3898#ifdef RUSAGE_SELF
3899    {
3900        struct rusage usage;
3901        struct timeval time;
3902        if (getrusage(RUSAGE_SELF, &usage) == 0) {
3903            time = usage.ru_utime;
3904            return time.tv_sec + time.tv_usec * 1e-6;
3905        }
3906    }
3907#endif
3908
3909#ifdef _WIN32
3910    {
3911        FILETIME creation_time, exit_time, kernel_time, user_time;
3912        ULARGE_INTEGER ui;
3913        LONG_LONG q;
3914        double t;
3915
3916        if (GetProcessTimes(GetCurrentProcess(),
3917                            &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
3918            memcpy(&ui, &user_time, sizeof(FILETIME));
3919            q = ui.QuadPart / 10L;
3920            t = (DWORD)(q % 1000000L) * 1e-6;
3921            q /= 1000000L;
3922#ifdef __GNUC__
3923            t += q;
3924#else
3925            t += (double)(DWORD)(q >> 16) * (1 << 16);
3926            t += (DWORD)q & ~(~0 << 16);
3927#endif
3928            return t;
3929        }
3930    }
3931#endif
3932
3933    return 0.0;
3934}
3935
3936static inline void
3937gc_prof_timer_start(rb_objspace_t *objspace)
3938{
3939    if (objspace->profile.run) {
3940        size_t count = objspace->profile.count;
3941
3942        if (!objspace->profile.record) {
3943            objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
3944            objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);
3945        }
3946        if (count >= objspace->profile.size) {
3947            objspace->profile.size += 1000;
3948            objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);
3949        }
3950        if (!objspace->profile.record) {
3951            rb_bug("gc_profile malloc or realloc miss");
3952        }
3953        MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);
3954        objspace->profile.record[count].gc_time = getrusage_time();
3955        objspace->profile.record[objspace->profile.count].gc_invoke_time =
3956            objspace->profile.record[count].gc_time - objspace->profile.invoke_time;
3957    }
3958}
3959
3960static inline void
3961gc_prof_timer_stop(rb_objspace_t *objspace, int marked)
3962{
3963    if (objspace->profile.run) {
3964        double gc_time = 0;
3965        size_t count = objspace->profile.count;
3966        gc_profile_record *record = &objspace->profile.record[count];
3967
3968        gc_time = getrusage_time() - record->gc_time;
3969        if (gc_time < 0) gc_time = 0;
3970        record->gc_time = gc_time;
3971        record->is_marked = !!(marked);
3972        gc_prof_set_heap_info(objspace, record);
3973        objspace->profile.count++;
3974    }
3975}
3976
3977#if !GC_PROFILE_MORE_DETAIL
3978
3979static inline void
3980gc_prof_mark_timer_start(rb_objspace_t *objspace)
3981{
3982    if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
3983	RUBY_DTRACE_GC_MARK_BEGIN();
3984    }
3985}
3986
3987static inline void
3988gc_prof_mark_timer_stop(rb_objspace_t *objspace)
3989{
3990    if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
3991	RUBY_DTRACE_GC_MARK_END();
3992    }
3993}
3994
3995static inline void
3996gc_prof_sweep_timer_start(rb_objspace_t *objspace)
3997{
3998    if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
3999	RUBY_DTRACE_GC_SWEEP_BEGIN();
4000    }
4001}
4002
4003static inline void
4004gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
4005{
4006    if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
4007	RUBY_DTRACE_GC_SWEEP_END();
4008    }
4009}
4010
4011static inline void
4012gc_prof_set_malloc_info(rb_objspace_t *objspace)
4013{
4014}
4015
4016static inline void
4017gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
4018{
4019    size_t live = objspace_live_num(objspace);
4020    size_t total = heaps_used * HEAP_OBJ_LIMIT;
4021
4022    record->heap_total_objects = total;
4023    record->heap_use_size = live * sizeof(RVALUE);
4024    record->heap_total_size = total * sizeof(RVALUE);
4025}
4026
4027#else
4028
4029static inline void
4030gc_prof_mark_timer_start(rb_objspace_t *objspace)
4031{
4032    if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
4033	RUBY_DTRACE_GC_MARK_BEGIN();
4034    }
4035    if (objspace->profile.run) {
4036        size_t count = objspace->profile.count;
4037
4038        objspace->profile.record[count].gc_mark_time = getrusage_time();
4039    }
4040}
4041
4042static inline void
4043gc_prof_mark_timer_stop(rb_objspace_t *objspace)
4044{
4045    if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
4046	RUBY_DTRACE_GC_MARK_END();
4047    }
4048    if (objspace->profile.run) {
4049        double mark_time = 0;
4050        size_t count = objspace->profile.count;
4051        gc_profile_record *record = &objspace->profile.record[count];
4052
4053        mark_time = getrusage_time() - record->gc_mark_time;
4054        if (mark_time < 0) mark_time = 0;
4055        record->gc_mark_time = mark_time;
4056    }
4057}
4058
4059static inline void
4060gc_prof_sweep_timer_start(rb_objspace_t *objspace)
4061{
4062    if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
4063	RUBY_DTRACE_GC_SWEEP_BEGIN();
4064    }
4065    if (objspace->profile.run) {
4066        size_t count = objspace->profile.count;
4067
4068        objspace->profile.record[count].gc_sweep_time = getrusage_time();
4069    }
4070}
4071
4072static inline void
4073gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
4074{
4075    if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
4076	RUBY_DTRACE_GC_SWEEP_END();
4077    }
4078    if (objspace->profile.run) {
4079        double sweep_time = 0;
4080        size_t count = objspace->profile.count;
4081        gc_profile_record *record = &objspace->profile.record[count];
4082
4083        sweep_time = getrusage_time() - record->gc_sweep_time;\
4084        if (sweep_time < 0) sweep_time = 0;\
4085        record->gc_sweep_time = sweep_time;
4086    }
4087}
4088
4089static inline void
4090gc_prof_set_malloc_info(rb_objspace_t *objspace)
4091{
4092    if (objspace->profile.run) {
4093        gc_profile_record *record = &objspace->profile.record[objspace->profile.count];
4094        if (record) {
4095            record->allocate_increase = malloc_increase;
4096            record->allocate_limit = malloc_limit;
4097        }
4098    }
4099}
4100
4101static inline void
4102gc_prof_set_heap_info(rb_objspace_t *objspace, gc_profile_record *record)
4103{
4104    size_t live = objspace_live_num(objspace);
4105    size_t total = heaps_used * HEAP_OBJ_LIMIT;
4106
4107    record->heap_use_slots = heaps_used;
4108    record->heap_live_objects = live;
4109    record->heap_free_objects = total - live;
4110    record->heap_total_objects = total;
4111    record->have_finalize = deferred_final_list ? Qtrue : Qfalse;
4112    record->heap_use_size = live * sizeof(RVALUE);
4113    record->heap_total_size = total * sizeof(RVALUE);
4114}
4115
4116#endif /* !GC_PROFILE_MORE_DETAIL */
4117
4118
4119/*
4120 *  call-seq:
4121 *    GC::Profiler.clear          -> nil
4122 *
4123 *  Clears the GC profiler data.
4124 *
4125 */
4126
4127static VALUE
4128gc_profile_clear(void)
4129{
4130    rb_objspace_t *objspace = &rb_objspace;
4131
4132    if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
4133        objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2;
4134        objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);
4135        if (!objspace->profile.record) {
4136            rb_memerror();
4137        }
4138    }
4139    MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
4140    objspace->profile.count = 0;
4141    return Qnil;
4142}
4143
4144/*
4145 *  call-seq:
4146 *     GC::Profiler.raw_data	-> [Hash, ...]
4147 *
4148 *  Returns an Array of individual raw profile data Hashes ordered
4149 *  from earliest to latest by +:GC_INVOKE_TIME+.
4150 *
4151 *  For example:
4152 *
4153 *    [
4154 *	{
4155 *	   :GC_TIME=>1.3000000000000858e-05,
4156 *	   :GC_INVOKE_TIME=>0.010634999999999999,
4157 *	   :HEAP_USE_SIZE=>289640,
4158 *	   :HEAP_TOTAL_SIZE=>588960,
4159 *	   :HEAP_TOTAL_OBJECTS=>14724,
4160 *	   :GC_IS_MARKED=>false
4161 *	},
4162 *      # ...
4163 *    ]
4164 *
4165 *  The keys mean:
4166 *
4167 *  +:GC_TIME+::
4168 *	Time elapsed in seconds for this GC run
4169 *  +:GC_INVOKE_TIME+::
4170 *	Time elapsed in seconds from startup to when the GC was invoked
4171 *  +:HEAP_USE_SIZE+::
4172 *	Total bytes of heap used
4173 *  +:HEAP_TOTAL_SIZE+::
4174 *	Total size of heap in bytes
4175 *  +:HEAP_TOTAL_OBJECTS+::
4176 *	Total number of objects
4177 *  +:GC_IS_MARKED+::
4178 *	Returns +true+ if the GC is in mark phase
4179 *
4180 *  If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
4181 *  to the following hash keys:
4182 *
4183 *  +:GC_MARK_TIME+::
4184 *  +:GC_SWEEP_TIME+::
4185 *  +:ALLOCATE_INCREASE+::
4186 *  +:ALLOCATE_LIMIT+::
4187 *  +:HEAP_USE_SLOTS+::
4188 *  +:HEAP_LIVE_OBJECTS+::
4189 *  +:HEAP_FREE_OBJECTS+::
4190 *  +:HAVE_FINALIZE+::
4191 *
4192 */
4193
4194static VALUE
4195gc_profile_record_get(void)
4196{
4197    VALUE prof;
4198    VALUE gc_profile = rb_ary_new();
4199    size_t i;
4200    rb_objspace_t *objspace = (&rb_objspace);
4201
4202    if (!objspace->profile.run) {
4203	return Qnil;
4204    }
4205
4206    for (i =0; i < objspace->profile.count; i++) {
4207	prof = rb_hash_new();
4208        rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
4209        rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
4210        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
4211        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
4212        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
4213        rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
4214#if GC_PROFILE_MORE_DETAIL
4215        rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
4216        rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
4217        rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
4218        rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
4219        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
4220        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
4221        rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
4222        rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
4223#endif
4224	rb_ary_push(gc_profile, prof);
4225    }
4226
4227    return gc_profile;
4228}
4229
4230static void
4231gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
4232{
4233    rb_objspace_t *objspace = &rb_objspace;
4234    size_t count = objspace->profile.count;
4235
4236    if (objspace->profile.run && count) {
4237	int index = 1;
4238	size_t i;
4239	gc_profile_record r;
4240	append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->count));
4241	append(out, rb_str_new_cstr("Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC Time(ms)\n"));
4242	for (i = 0; i < count; i++) {
4243	    r = objspace->profile.record[i];
4244#if !GC_PROFILE_MORE_DETAIL
4245            if (r.is_marked) {
4246#endif
4247		append(out, rb_sprintf("%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
4248			index++, r.gc_invoke_time, r.heap_use_size,
4249			r.heap_total_size, r.heap_total_objects, r.gc_time*1000));
4250#if !GC_PROFILE_MORE_DETAIL
4251            }
4252#endif
4253	}
4254#if GC_PROFILE_MORE_DETAIL
4255	append(out, rb_str_new_cstr("\n\n" \
4256		"More detail.\n" \
4257		"Index Allocate Increase    Allocate Limit  Use Slot  Have Finalize             Mark Time(ms)            Sweep Time(ms)\n"));
4258        index = 1;
4259	for (i = 0; i < count; i++) {
4260	    r = objspace->profile.record[i];
4261	    append(out, rb_sprintf("%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
4262			index++, r.allocate_increase, r.allocate_limit,
4263			r.heap_use_slots, (r.have_finalize ? "true" : "false"),
4264			r.gc_mark_time*1000, r.gc_sweep_time*1000));
4265	}
4266#endif
4267    }
4268}
4269
4270/*
4271 *  call-seq:
4272 *     GC::Profiler.result  -> String
4273 *
4274 *  Returns a profile data report such as:
4275 *
4276 *    GC 1 invokes.
4277 *    Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC time(ms)
4278 *        1               0.012               159240               212940                10647         0.00000000000001530000
4279 */
4280
4281static VALUE
4282gc_profile_result(void)
4283{
4284	VALUE str = rb_str_buf_new(0);
4285	gc_profile_dump_on(str, rb_str_buf_append);
4286	return str;
4287}
4288
4289/*
4290 *  call-seq:
4291 *     GC::Profiler.report
4292 *     GC::Profiler.report(io)
4293 *
4294 *  Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
4295 *
4296 */
4297
4298static VALUE
4299gc_profile_report(int argc, VALUE *argv, VALUE self)
4300{
4301    VALUE out;
4302
4303    if (argc == 0) {
4304	out = rb_stdout;
4305    }
4306    else {
4307	rb_scan_args(argc, argv, "01", &out);
4308    }
4309    gc_profile_dump_on(out, rb_io_write);
4310
4311    return Qnil;
4312}
4313
4314/*
4315 *  call-seq:
4316 *     GC::Profiler.total_time	-> float
4317 *
4318 *  The total time used for garbage collection in seconds
4319 */
4320
4321static VALUE
4322gc_profile_total_time(VALUE self)
4323{
4324    double time = 0;
4325    rb_objspace_t *objspace = &rb_objspace;
4326    size_t i;
4327
4328    if (objspace->profile.run && objspace->profile.count) {
4329	for (i = 0; i < objspace->profile.count; i++) {
4330	    time += objspace->profile.record[i].gc_time;
4331	}
4332    }
4333    return DBL2NUM(time);
4334}
4335
4336/*
4337 *  call-seq:
4338 *    GC::Profiler.enabled?	-> true or false
4339 *
4340 *  The current status of GC profile mode.
4341 */
4342
4343static VALUE
4344gc_profile_enable_get(VALUE self)
4345{
4346    rb_objspace_t *objspace = &rb_objspace;
4347    return objspace->profile.run ? Qtrue : Qfalse;
4348}
4349
4350/*
4351 *  call-seq:
4352 *    GC::Profiler.enable	-> nil
4353 *
4354 *  Starts the GC profiler.
4355 *
4356 */
4357
4358static VALUE
4359gc_profile_enable(void)
4360{
4361    rb_objspace_t *objspace = &rb_objspace;
4362
4363    objspace->profile.run = TRUE;
4364    return Qnil;
4365}
4366
4367/*
4368 *  call-seq:
4369 *    GC::Profiler.disable	-> nil
4370 *
4371 *  Stops the GC profiler.
4372 *
4373 */
4374
4375static VALUE
4376gc_profile_disable(void)
4377{
4378    rb_objspace_t *objspace = &rb_objspace;
4379
4380    objspace->profile.run = FALSE;
4381    return Qnil;
4382}
4383
4384#ifdef GC_DEBUG
4385
4386/*
4387  ------------------------------ DEBUG ------------------------------
4388*/
4389
4390void
4391rb_gcdebug_print_obj_condition(VALUE obj)
4392{
4393    rb_objspace_t *objspace = &rb_objspace;
4394
4395    if (is_pointer_to_heap(objspace, (void *)obj)) {
4396        fprintf(stderr, "pointer to heap?: true\n");
4397    }
4398    else {
4399        fprintf(stderr, "pointer to heap?: false\n");
4400        return;
4401    }
4402    fprintf(stderr, "marked?: %s\n",
4403            MARKED_IN_BITMAP(GET_HEAP_BITMAP(obj), obj) ? "true" : "false");
4404    if (is_lazy_sweeping(objspace)) {
4405        fprintf(stderr, "lazy sweeping?: true\n");
4406        fprintf(stderr, "swept?: %s\n",
4407                is_swept_object(objspace, obj) ? "done" : "not yet");
4408    }
4409    else {
4410        fprintf(stderr, "lazy sweeping?: false\n");
4411    }
4412}
4413
4414static VALUE
4415gcdebug_sential(VALUE obj, VALUE name)
4416{
4417    fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
4418    return Qnil;
4419}
4420
4421void
4422rb_gcdebug_sentinel(VALUE obj, const char *name)
4423{
4424    rb_define_final(obj, rb_proc_new(gcdebug_sential, (VALUE)name));
4425}
4426#endif /* GC_DEBUG */
4427
4428
4429/*
4430 * Document-class: ObjectSpace
4431 *
4432 *  The ObjectSpace module contains a number of routines
4433 *  that interact with the garbage collection facility and allow you to
4434 *  traverse all living objects with an iterator.
4435 *
4436 *  ObjectSpace also provides support for object finalizers, procs that will be
4437 *  called when a specific object is about to be destroyed by garbage
4438 *  collection.
4439 *
4440 *     include ObjectSpace
4441 *
4442 *     a = "A"
4443 *     b = "B"
4444 *     c = "C"
4445 *
4446 *     define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4447 *     define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
4448 *     define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
4449 *
4450 *  _produces:_
4451 *
4452 *     Finalizer three on 537763470
4453 *     Finalizer one on 537763480
4454 *     Finalizer two on 537763480
4455 *
4456 */
4457
4458/*
4459 *  Document-class: ObjectSpace::WeakMap
4460 *
4461 *  An ObjectSpace::WeakMap object holds references to
4462 *  any objects, but those objects can get garbage collected.
4463 *
4464 *  This class is mostly used internally by WeakRef, please use
4465 *  +lib/weakref.rb+ for the public interface.
4466 */
4467
4468/*  Document-class: GC::Profiler
4469 *
4470 *  The GC profiler provides access to information on GC runs including time,
4471 *  length and object space size.
4472 *
4473 *  Example:
4474 *
4475 *    GC::Profiler.enable
4476 *
4477 *    require 'rdoc/rdoc'
4478 *
4479 *    GC::Profiler.report
4480 *
4481 *    GC::Profiler.disable
4482 *
4483 *  See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4484 */
4485
4486/*
4487 *  The GC module provides an interface to Ruby's mark and
4488 *  sweep garbage collection mechanism.
4489 *
4490 *  Some of the underlying methods are also available via the ObjectSpace
4491 *  module.
4492 *
4493 *  You may obtain information about the operation of the GC through
4494 *  GC::Profiler.
4495 */
4496
4497void
4498Init_GC(void)
4499{
4500    VALUE rb_mObSpace;
4501    VALUE rb_mProfiler;
4502
4503    rb_mGC = rb_define_module("GC");
4504    rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
4505    rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
4506    rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
4507    rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
4508    rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
4509    rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
4510    rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
4511    rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
4512
4513    rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
4514    rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
4515    rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
4516    rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
4517    rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
4518    rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
4519    rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
4520    rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
4521    rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
4522
4523    rb_mObSpace = rb_define_module("ObjectSpace");
4524    rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
4525    rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
4526
4527    rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
4528    rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
4529
4530    rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
4531
4532    nomem_error = rb_exc_new3(rb_eNoMemError,
4533			      rb_obj_freeze(rb_str_new2("failed to allocate memory")));
4534    OBJ_TAINT(nomem_error);
4535    OBJ_FREEZE(nomem_error);
4536
4537    rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
4538    rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
4539
4540    rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
4541
4542    {
4543	VALUE rb_cWeakMap = rb_define_class_under(rb_mObSpace, "WeakMap", rb_cObject);
4544	rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
4545	rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
4546	rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
4547	rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
4548    }
4549
4550#if CALC_EXACT_MALLOC_SIZE
4551    rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
4552    rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
4553#endif
4554}
4555