1/**********************************************************************
2
3  cont.c -
4
5  $Author: nagachika $
6  created at: Thu May 23 09:03:43 2007
7
8  Copyright (C) 2007 Koichi Sasada
9
10**********************************************************************/
11
12#include "ruby/ruby.h"
13#include "internal.h"
14#include "vm_core.h"
15#include "gc.h"
16#include "eval_intern.h"
17
18/* FIBER_USE_NATIVE enables Fiber performance improvement using system
19 * dependent method such as make/setcontext on POSIX system or
20 * CreateFiber() API on Windows.
21 * This hack make Fiber context switch faster (x2 or more).
22 * However, it decrease maximum number of Fiber.  For example, on the
23 * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
24 *
25 * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
26 * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
27 */
28
29#if !defined(FIBER_USE_NATIVE)
30# if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
31#   if 0
32#   elif defined(__NetBSD__)
33/* On our experience, NetBSD doesn't support using setcontext() and pthread
34 * simultaneously.  This is because pthread_self(), TLS and other information
35 * are represented by stack pointer (higher bits of stack pointer).
36 * TODO: check such constraint on configure.
37 */
38#     define FIBER_USE_NATIVE 0
39#   elif defined(__sun)
40/* On Solaris because resuming any Fiber caused SEGV, for some reason.
41 */
42#     define FIBER_USE_NATIVE 0
43#   elif defined(__ia64)
44/* At least, Linux/ia64's getcontext(3) doesn't save register window.
45 */
46#     define FIBER_USE_NATIVE 0
47#   elif defined(__GNU__)
48/* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
49 * and swapcontext functions. Disabling their usage till support is
50 * implemented. More info at
51 * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
52 */
53#     define FIBER_USE_NATIVE 0
54#   else
55#     define FIBER_USE_NATIVE 1
56#   endif
57# elif defined(_WIN32)
58#   if _WIN32_WINNT >= 0x0400
59/* only when _WIN32_WINNT >= 0x0400 on Windows because Fiber APIs are
60 * supported only such building (and running) environments.
61 * [ruby-dev:41192]
62 */
63#     define FIBER_USE_NATIVE 1
64#   endif
65# endif
66#endif
67#if !defined(FIBER_USE_NATIVE)
68#define FIBER_USE_NATIVE 0
69#endif
70
71#if FIBER_USE_NATIVE
72#ifndef _WIN32
73#include <unistd.h>
74#include <sys/mman.h>
75#include <ucontext.h>
76#endif
77#define RB_PAGE_SIZE (pagesize)
78#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
79static long pagesize;
80#endif /*FIBER_USE_NATIVE*/
81
82#define CAPTURE_JUST_VALID_VM_STACK 1
83
84enum context_type {
85    CONTINUATION_CONTEXT = 0,
86    FIBER_CONTEXT = 1,
87    ROOT_FIBER_CONTEXT = 2
88};
89
90typedef struct rb_context_struct {
91    enum context_type type;
92    VALUE self;
93    int argc;
94    VALUE value;
95    VALUE *vm_stack;
96#ifdef CAPTURE_JUST_VALID_VM_STACK
97    size_t vm_stack_slen;  /* length of stack (head of th->stack) */
98    size_t vm_stack_clen;  /* length of control frames (tail of th->stack) */
99#endif
100    VALUE *machine_stack;
101    VALUE *machine_stack_src;
102#ifdef __ia64
103    VALUE *machine_register_stack;
104    VALUE *machine_register_stack_src;
105    int machine_register_stack_size;
106#endif
107    rb_thread_t saved_thread;
108    rb_jmpbuf_t jmpbuf;
109    size_t machine_stack_size;
110} rb_context_t;
111
112enum fiber_status {
113    CREATED,
114    RUNNING,
115    TERMINATED
116};
117
118#if FIBER_USE_NATIVE && !defined(_WIN32)
119#define MAX_MAHINE_STACK_CACHE  10
120static int machine_stack_cache_index = 0;
121typedef struct machine_stack_cache_struct {
122    void *ptr;
123    size_t size;
124} machine_stack_cache_t;
125static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
126static machine_stack_cache_t terminated_machine_stack;
127#endif
128
129typedef struct rb_fiber_struct {
130    rb_context_t cont;
131    VALUE prev;
132    enum fiber_status status;
133    struct rb_fiber_struct *prev_fiber;
134    struct rb_fiber_struct *next_fiber;
135    /* If a fiber invokes "transfer",
136     * then this fiber can't "resume" any more after that.
137     * You shouldn't mix "transfer" and "resume".
138     */
139    int transfered;
140
141#if FIBER_USE_NATIVE
142#ifdef _WIN32
143    void *fib_handle;
144#else
145    ucontext_t context;
146#endif
147#endif
148} rb_fiber_t;
149
150static const rb_data_type_t cont_data_type, fiber_data_type;
151static VALUE rb_cContinuation;
152static VALUE rb_cFiber;
153static VALUE rb_eFiberError;
154
155#define GetContPtr(obj, ptr)  \
156    TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
157
158#define GetFiberPtr(obj, ptr)  do {\
159    TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
160    if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
161} while (0)
162
163NOINLINE(static VALUE cont_capture(volatile int *stat));
164
165#define THREAD_MUST_BE_RUNNING(th) do { \
166	if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread");	\
167    } while (0)
168
169static void
170cont_mark(void *ptr)
171{
172    RUBY_MARK_ENTER("cont");
173    if (ptr) {
174	rb_context_t *cont = ptr;
175	rb_gc_mark(cont->value);
176	rb_thread_mark(&cont->saved_thread);
177	rb_gc_mark(cont->saved_thread.self);
178
179	if (cont->vm_stack) {
180#ifdef CAPTURE_JUST_VALID_VM_STACK
181	    rb_gc_mark_locations(cont->vm_stack,
182				 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
183#else
184	    rb_gc_mark_localtion(cont->vm_stack,
185				 cont->vm_stack, cont->saved_thread.stack_size);
186#endif
187	}
188
189	if (cont->machine_stack) {
190	    if (cont->type == CONTINUATION_CONTEXT) {
191		/* cont */
192		rb_gc_mark_locations(cont->machine_stack,
193				     cont->machine_stack + cont->machine_stack_size);
194            }
195            else {
196		/* fiber */
197		rb_thread_t *th;
198                rb_fiber_t *fib = (rb_fiber_t*)cont;
199		GetThreadPtr(cont->saved_thread.self, th);
200		if ((th->fiber != cont->self) && fib->status == RUNNING) {
201		    rb_gc_mark_locations(cont->machine_stack,
202					 cont->machine_stack + cont->machine_stack_size);
203		}
204	    }
205	}
206#ifdef __ia64
207	if (cont->machine_register_stack) {
208	    rb_gc_mark_locations(cont->machine_register_stack,
209				 cont->machine_register_stack + cont->machine_register_stack_size);
210	}
211#endif
212    }
213    RUBY_MARK_LEAVE("cont");
214}
215
216static void
217cont_free(void *ptr)
218{
219    RUBY_FREE_ENTER("cont");
220    if (ptr) {
221	rb_context_t *cont = ptr;
222	RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
223#if FIBER_USE_NATIVE
224	if (cont->type == CONTINUATION_CONTEXT) {
225	    /* cont */
226	    RUBY_FREE_UNLESS_NULL(cont->machine_stack);
227	}
228	else {
229	    /* fiber */
230#ifdef _WIN32
231	    if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
232		/* don't delete root fiber handle */
233		rb_fiber_t *fib = (rb_fiber_t*)cont;
234		if (fib->fib_handle) {
235		    DeleteFiber(fib->fib_handle);
236		}
237	    }
238#else /* not WIN32 */
239	    if (GET_THREAD()->fiber != cont->self) {
240                rb_fiber_t *fib = (rb_fiber_t*)cont;
241                if (fib->context.uc_stack.ss_sp) {
242                    if (cont->type == ROOT_FIBER_CONTEXT) {
243			rb_bug("Illegal root fiber parameter");
244                    }
245		    munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
246		}
247	    }
248            else {
249		/* It may reached here when finalize */
250		/* TODO examine whether it is a bug */
251                /* rb_bug("cont_free: release self"); */
252            }
253#endif
254	}
255#else /* not FIBER_USE_NATIVE */
256	RUBY_FREE_UNLESS_NULL(cont->machine_stack);
257#endif
258#ifdef __ia64
259	RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
260#endif
261	RUBY_FREE_UNLESS_NULL(cont->vm_stack);
262
263	/* free rb_cont_t or rb_fiber_t */
264	ruby_xfree(ptr);
265    }
266    RUBY_FREE_LEAVE("cont");
267}
268
269static size_t
270cont_memsize(const void *ptr)
271{
272    const rb_context_t *cont = ptr;
273    size_t size = 0;
274    if (cont) {
275	size = sizeof(*cont);
276	if (cont->vm_stack) {
277#ifdef CAPTURE_JUST_VALID_VM_STACK
278	    size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
279#else
280	    size_t n = cont->saved_thread.stack_size;
281#endif
282	    size += n * sizeof(*cont->vm_stack);
283	}
284
285	if (cont->machine_stack) {
286	    size += cont->machine_stack_size * sizeof(*cont->machine_stack);
287	}
288#ifdef __ia64
289	if (cont->machine_register_stack) {
290	    size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
291	}
292#endif
293    }
294    return size;
295}
296
297static void
298fiber_mark(void *ptr)
299{
300    RUBY_MARK_ENTER("cont");
301    if (ptr) {
302	rb_fiber_t *fib = ptr;
303	rb_gc_mark(fib->prev);
304	cont_mark(&fib->cont);
305    }
306    RUBY_MARK_LEAVE("cont");
307}
308
309static void
310fiber_link_join(rb_fiber_t *fib)
311{
312    VALUE current_fibval = rb_fiber_current();
313    rb_fiber_t *current_fib;
314    GetFiberPtr(current_fibval, current_fib);
315
316    /* join fiber link */
317    fib->next_fiber = current_fib->next_fiber;
318    fib->prev_fiber = current_fib;
319    current_fib->next_fiber->prev_fiber = fib;
320    current_fib->next_fiber = fib;
321}
322
323static void
324fiber_link_remove(rb_fiber_t *fib)
325{
326    fib->prev_fiber->next_fiber = fib->next_fiber;
327    fib->next_fiber->prev_fiber = fib->prev_fiber;
328}
329
330static void
331fiber_free(void *ptr)
332{
333    RUBY_FREE_ENTER("fiber");
334    if (ptr) {
335	rb_fiber_t *fib = ptr;
336	if (fib->cont.type != ROOT_FIBER_CONTEXT &&
337	    fib->cont.saved_thread.local_storage) {
338	    st_free_table(fib->cont.saved_thread.local_storage);
339	}
340	fiber_link_remove(fib);
341
342	cont_free(&fib->cont);
343    }
344    RUBY_FREE_LEAVE("fiber");
345}
346
347static size_t
348fiber_memsize(const void *ptr)
349{
350    const rb_fiber_t *fib = ptr;
351    size_t size = 0;
352    if (ptr) {
353	size = sizeof(*fib);
354	if (fib->cont.type != ROOT_FIBER_CONTEXT) {
355	    size += st_memsize(fib->cont.saved_thread.local_storage);
356	}
357	size += cont_memsize(&fib->cont);
358    }
359    return size;
360}
361
362VALUE
363rb_obj_is_fiber(VALUE obj)
364{
365    if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
366	return Qtrue;
367    }
368    else {
369	return Qfalse;
370    }
371}
372
373static void
374cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
375{
376    size_t size;
377
378    SET_MACHINE_STACK_END(&th->machine_stack_end);
379#ifdef __ia64
380    th->machine_register_stack_end = rb_ia64_bsp();
381#endif
382
383    if (th->machine_stack_start > th->machine_stack_end) {
384	size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
385	cont->machine_stack_src = th->machine_stack_end;
386    }
387    else {
388	size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
389	cont->machine_stack_src = th->machine_stack_start;
390    }
391
392    if (cont->machine_stack) {
393	REALLOC_N(cont->machine_stack, VALUE, size);
394    }
395    else {
396	cont->machine_stack = ALLOC_N(VALUE, size);
397    }
398
399    FLUSH_REGISTER_WINDOWS;
400    MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
401
402#ifdef __ia64
403    rb_ia64_flushrs();
404    size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
405    cont->machine_register_stack_src = th->machine_register_stack_start;
406    if (cont->machine_register_stack) {
407	REALLOC_N(cont->machine_register_stack, VALUE, size);
408    }
409    else {
410	cont->machine_register_stack = ALLOC_N(VALUE, size);
411    }
412
413    MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
414#endif
415}
416
417static const rb_data_type_t cont_data_type = {
418    "continuation",
419    {cont_mark, cont_free, cont_memsize,},
420};
421
422static void
423cont_save_thread(rb_context_t *cont, rb_thread_t *th)
424{
425    /* save thread context */
426    cont->saved_thread = *th;
427    /* saved_thread->machine_stack_(start|end) should be NULL */
428    /* because it may happen GC afterward */
429    cont->saved_thread.machine_stack_start = 0;
430    cont->saved_thread.machine_stack_end = 0;
431#ifdef __ia64
432    cont->saved_thread.machine_register_stack_start = 0;
433    cont->saved_thread.machine_register_stack_end = 0;
434#endif
435}
436
437static void
438cont_init(rb_context_t *cont, rb_thread_t *th)
439{
440    /* save thread context */
441    cont_save_thread(cont, th);
442    cont->saved_thread.local_storage = 0;
443}
444
445static rb_context_t *
446cont_new(VALUE klass)
447{
448    rb_context_t *cont;
449    volatile VALUE contval;
450    rb_thread_t *th = GET_THREAD();
451
452    THREAD_MUST_BE_RUNNING(th);
453    contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
454    cont->self = contval;
455    cont_init(cont, th);
456    return cont;
457}
458
459static VALUE
460cont_capture(volatile int *stat)
461{
462    rb_context_t *cont;
463    rb_thread_t *th = GET_THREAD(), *sth;
464    volatile VALUE contval;
465
466    THREAD_MUST_BE_RUNNING(th);
467    rb_vm_stack_to_heap(th);
468    cont = cont_new(rb_cContinuation);
469    contval = cont->self;
470    sth = &cont->saved_thread;
471
472#ifdef CAPTURE_JUST_VALID_VM_STACK
473    cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
474    cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
475    cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
476    MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
477    MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
478#else
479    cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
480    MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
481#endif
482    sth->stack = 0;
483
484    cont_save_machine_stack(th, cont);
485
486    if (ruby_setjmp(cont->jmpbuf)) {
487	volatile VALUE value;
488
489	value = cont->value;
490	if (cont->argc == -1) rb_exc_raise(value);
491	cont->value = Qnil;
492	*stat = 1;
493	return value;
494    }
495    else {
496	*stat = 0;
497	return contval;
498    }
499}
500
501static void
502cont_restore_thread(rb_context_t *cont)
503{
504    rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
505
506    /* restore thread context */
507    if (cont->type == CONTINUATION_CONTEXT) {
508	/* continuation */
509	VALUE fib;
510
511	th->fiber = sth->fiber;
512	fib = th->fiber ? th->fiber : th->root_fiber;
513
514	if (fib) {
515	    rb_fiber_t *fcont;
516	    GetFiberPtr(fib, fcont);
517	    th->stack_size = fcont->cont.saved_thread.stack_size;
518	    th->stack = fcont->cont.saved_thread.stack;
519	}
520#ifdef CAPTURE_JUST_VALID_VM_STACK
521	MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
522	MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
523	       cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
524#else
525	MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
526#endif
527    }
528    else {
529	/* fiber */
530	th->stack = sth->stack;
531	th->stack_size = sth->stack_size;
532	th->local_storage = sth->local_storage;
533	th->fiber = cont->self;
534    }
535
536    th->cfp = sth->cfp;
537    th->safe_level = sth->safe_level;
538    th->raised_flag = sth->raised_flag;
539    th->state = sth->state;
540    th->status = sth->status;
541    th->tag = sth->tag;
542    th->protect_tag = sth->protect_tag;
543    th->errinfo = sth->errinfo;
544    th->first_proc = sth->first_proc;
545    th->root_lep = sth->root_lep;
546    th->root_svar = sth->root_svar;
547}
548
549#if FIBER_USE_NATIVE
550#ifdef _WIN32
551static void
552fiber_set_stack_location(void)
553{
554    rb_thread_t *th = GET_THREAD();
555    VALUE *ptr;
556
557    SET_MACHINE_STACK_END(&ptr);
558    th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
559}
560
561static VOID CALLBACK
562fiber_entry(void *arg)
563{
564    fiber_set_stack_location();
565    rb_fiber_start();
566}
567#else /* _WIN32 */
568
569/*
570 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
571 * if MAP_STACK is passed.
572 * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
573 */
574#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
575#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
576#else
577#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
578#endif
579
580static char*
581fiber_machine_stack_alloc(size_t size)
582{
583    char *ptr;
584
585    if (machine_stack_cache_index > 0) {
586	if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
587	    ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
588	    machine_stack_cache_index--;
589	    machine_stack_cache[machine_stack_cache_index].ptr = NULL;
590	    machine_stack_cache[machine_stack_cache_index].size = 0;
591	}
592	else{
593            /* TODO handle multiple machine stack size */
594	    rb_bug("machine_stack_cache size is not canonicalized");
595	}
596    }
597    else {
598	void *page;
599	STACK_GROW_DIR_DETECTION;
600
601	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
602	if (ptr == MAP_FAILED) {
603	    rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
604	}
605
606	/* guard page setup */
607	page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
608	if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
609	    rb_raise(rb_eFiberError, "mprotect failed");
610	}
611    }
612
613    return ptr;
614}
615#endif
616
617static void
618fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
619{
620    rb_thread_t *sth = &fib->cont.saved_thread;
621
622#ifdef _WIN32
623    fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
624    if (!fib->fib_handle) {
625	/* try to release unnecessary fibers & retry to create */
626	rb_gc();
627	fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
628	if (!fib->fib_handle) {
629	    rb_raise(rb_eFiberError, "can't create fiber");
630	}
631    }
632    sth->machine_stack_maxsize = size;
633#else /* not WIN32 */
634    ucontext_t *context = &fib->context;
635    char *ptr;
636    STACK_GROW_DIR_DETECTION;
637
638    getcontext(context);
639    ptr = fiber_machine_stack_alloc(size);
640    context->uc_link = NULL;
641    context->uc_stack.ss_sp = ptr;
642    context->uc_stack.ss_size = size;
643    makecontext(context, rb_fiber_start, 0);
644    sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
645    sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
646#endif
647#ifdef __ia64
648    sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
649#endif
650}
651
652NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
653
654static void
655fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
656{
657    rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
658
659    if (newfib->status != RUNNING) {
660	fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
661    }
662
663    /* restore thread context */
664    cont_restore_thread(&newfib->cont);
665    th->machine_stack_maxsize = sth->machine_stack_maxsize;
666    if (sth->machine_stack_end && (newfib != oldfib)) {
667	rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
668    }
669
670    /* save  oldfib's machine stack */
671    if (oldfib->status != TERMINATED) {
672	STACK_GROW_DIR_DETECTION;
673	SET_MACHINE_STACK_END(&th->machine_stack_end);
674	if (STACK_DIR_UPPER(0, 1)) {
675	    oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end;
676	    oldfib->cont.machine_stack = th->machine_stack_end;
677	}
678	else {
679	    oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start;
680	    oldfib->cont.machine_stack = th->machine_stack_start;
681	}
682    }
683    /* exchange machine_stack_start between oldfib and newfib */
684    oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start;
685    th->machine_stack_start = sth->machine_stack_start;
686    /* oldfib->machine_stack_end should be NULL */
687    oldfib->cont.saved_thread.machine_stack_end = 0;
688#ifndef _WIN32
689    if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
690	rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
691    }
692#endif
693
694    /* swap machine context */
695#ifdef _WIN32
696    SwitchToFiber(newfib->fib_handle);
697#else
698    swapcontext(&oldfib->context, &newfib->context);
699#endif
700}
701#endif
702
703NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
704
705static void
706cont_restore_1(rb_context_t *cont)
707{
708    cont_restore_thread(cont);
709
710    /* restore machine stack */
711#ifdef _M_AMD64
712    {
713	/* workaround for x64 SEH */
714	jmp_buf buf;
715	setjmp(buf);
716	((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
717	    ((_JUMP_BUFFER*)(&buf))->Frame;
718    }
719#endif
720    if (cont->machine_stack_src) {
721	FLUSH_REGISTER_WINDOWS;
722	MEMCPY(cont->machine_stack_src, cont->machine_stack,
723		VALUE, cont->machine_stack_size);
724    }
725
726#ifdef __ia64
727    if (cont->machine_register_stack_src) {
728	MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
729	       VALUE, cont->machine_register_stack_size);
730    }
731#endif
732
733    ruby_longjmp(cont->jmpbuf, 1);
734}
735
736NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
737
738#ifdef __ia64
739#define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
740#define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
741static volatile int C(a), C(b), C(c), C(d), C(e);
742static volatile int C(f), C(g), C(h), C(i), C(j);
743static volatile int C(k), C(l), C(m), C(n), C(o);
744static volatile int C(p), C(q), C(r), C(s), C(t);
745#if 0
746{/* the above lines make cc-mode.el confused so much */}
747#endif
748int rb_dummy_false = 0;
749NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
750static void
751register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
752{
753    if (rb_dummy_false) {
754        /* use registers as much as possible */
755        E(a) = E(b) = E(c) = E(d) = E(e) =
756        E(f) = E(g) = E(h) = E(i) = E(j) =
757        E(k) = E(l) = E(m) = E(n) = E(o) =
758        E(p) = E(q) = E(r) = E(s) = E(t) = 0;
759        E(a) = E(b) = E(c) = E(d) = E(e) =
760        E(f) = E(g) = E(h) = E(i) = E(j) =
761        E(k) = E(l) = E(m) = E(n) = E(o) =
762        E(p) = E(q) = E(r) = E(s) = E(t) = 0;
763    }
764    if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
765        register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
766    }
767    cont_restore_0(cont, vp);
768}
769#undef C
770#undef E
771#endif
772
773static void
774cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
775{
776    if (cont->machine_stack_src) {
777#ifdef HAVE_ALLOCA
778#define STACK_PAD_SIZE 1
779#else
780#define STACK_PAD_SIZE 1024
781#endif
782	VALUE space[STACK_PAD_SIZE];
783
784#if !STACK_GROW_DIRECTION
785	if (addr_in_prev_frame > &space[0]) {
786	    /* Stack grows downward */
787#endif
788#if STACK_GROW_DIRECTION <= 0
789	    volatile VALUE *const end = cont->machine_stack_src;
790	    if (&space[0] > end) {
791# ifdef HAVE_ALLOCA
792		volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
793		space[0] = *sp;
794# else
795		cont_restore_0(cont, &space[0]);
796# endif
797	    }
798#endif
799#if !STACK_GROW_DIRECTION
800	}
801	else {
802	    /* Stack grows upward */
803#endif
804#if STACK_GROW_DIRECTION >= 0
805	    volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
806	    if (&space[STACK_PAD_SIZE] < end) {
807# ifdef HAVE_ALLOCA
808		volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
809		space[0] = *sp;
810# else
811		cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
812# endif
813	    }
814#endif
815#if !STACK_GROW_DIRECTION
816	}
817#endif
818    }
819    cont_restore_1(cont);
820}
821#ifdef __ia64
822#define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
823#endif
824
825/*
826 *  Document-class: Continuation
827 *
828 *  Continuation objects are generated by Kernel#callcc,
829 *  after having +require+d <i>continuation</i>. They hold
830 *  a return address and execution context, allowing a nonlocal return
831 *  to the end of the <code>callcc</code> block from anywhere within a
832 *  program. Continuations are somewhat analogous to a structured
833 *  version of C's <code>setjmp/longjmp</code> (although they contain
834 *  more state, so you might consider them closer to threads).
835 *
836 *  For instance:
837 *
838 *     require "continuation"
839 *     arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
840 *     callcc{|cc| $cc = cc}
841 *     puts(message = arr.shift)
842 *     $cc.call unless message =~ /Max/
843 *
844 *  <em>produces:</em>
845 *
846 *     Freddie
847 *     Herbie
848 *     Ron
849 *     Max
850 *
851 *  This (somewhat contrived) example allows the inner loop to abandon
852 *  processing early:
853 *
854 *     require "continuation"
855 *     callcc {|cont|
856 *       for i in 0..4
857 *         print "\n#{i}: "
858 *         for j in i*5...(i+1)*5
859 *           cont.call() if j == 17
860 *           printf "%3d", j
861 *         end
862 *       end
863 *     }
864 *     puts
865 *
866 *  <em>produces:</em>
867 *
868 *     0:   0  1  2  3  4
869 *     1:   5  6  7  8  9
870 *     2:  10 11 12 13 14
871 *     3:  15 16
872 */
873
874/*
875 *  call-seq:
876 *     callcc {|cont| block }   ->  obj
877 *
878 *  Generates a Continuation object, which it passes to
879 *  the associated block. You need to <code>require
880 *  'continuation'</code> before using this method. Performing a
881 *  <em>cont</em><code>.call</code> will cause the #callcc
882 *  to return (as will falling through the end of the block). The
883 *  value returned by the #callcc is the value of the
884 *  block, or the value passed to <em>cont</em><code>.call</code>. See
885 *  class Continuation for more details. Also see
886 *  Kernel#throw for an alternative mechanism for
887 *  unwinding a call stack.
888 */
889
890static VALUE
891rb_callcc(VALUE self)
892{
893    volatile int called;
894    volatile VALUE val = cont_capture(&called);
895
896    if (called) {
897	return val;
898    }
899    else {
900	return rb_yield(val);
901    }
902}
903
904static VALUE
905make_passing_arg(int argc, VALUE *argv)
906{
907    switch (argc) {
908      case 0:
909	return Qnil;
910      case 1:
911	return argv[0];
912      default:
913	return rb_ary_new4(argc, argv);
914    }
915}
916
917/*
918 *  call-seq:
919 *     cont.call(args, ...)
920 *     cont[args, ...]
921 *
922 *  Invokes the continuation. The program continues from the end of the
923 *  <code>callcc</code> block. If no arguments are given, the original
924 *  <code>callcc</code> returns <code>nil</code>. If one argument is
925 *  given, <code>callcc</code> returns it. Otherwise, an array
926 *  containing <i>args</i> is returned.
927 *
928 *     callcc {|cont|  cont.call }           #=> nil
929 *     callcc {|cont|  cont.call 1 }         #=> 1
930 *     callcc {|cont|  cont.call 1, 2, 3 }   #=> [1, 2, 3]
931 */
932
933static VALUE
934rb_cont_call(int argc, VALUE *argv, VALUE contval)
935{
936    rb_context_t *cont;
937    rb_thread_t *th = GET_THREAD();
938    GetContPtr(contval, cont);
939
940    if (cont->saved_thread.self != th->self) {
941	rb_raise(rb_eRuntimeError, "continuation called across threads");
942    }
943    if (cont->saved_thread.protect_tag != th->protect_tag) {
944	rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
945    }
946    if (cont->saved_thread.fiber) {
947	rb_fiber_t *fcont;
948	GetFiberPtr(cont->saved_thread.fiber, fcont);
949
950	if (th->fiber != cont->saved_thread.fiber) {
951	    rb_raise(rb_eRuntimeError, "continuation called across fiber");
952	}
953    }
954
955    cont->argc = argc;
956    cont->value = make_passing_arg(argc, argv);
957
958    /* restore `tracing' context. see [Feature #4347] */
959    th->trace_arg = cont->saved_thread.trace_arg;
960
961    cont_restore_0(cont, &contval);
962    return Qnil; /* unreachable */
963}
964
965/*********/
966/* fiber */
967/*********/
968
969/*
970 *  Document-class: Fiber
971 *
972 *  Fibers are primitives for implementing light weight cooperative
973 *  concurrency in Ruby. Basically they are a means of creating code blocks
974 *  that can be paused and resumed, much like threads. The main difference
975 *  is that they are never preempted and that the scheduling must be done by
976 *  the programmer and not the VM.
977 *
978 *  As opposed to other stackless light weight concurrency models, each fiber
979 *  comes with a small 4KB stack. This enables the fiber to be paused from deeply
980 *  nested function calls within the fiber block.
981 *
982 *  When a fiber is created it will not run automatically. Rather it must be
983 *  be explicitly asked to run using the <code>Fiber#resume</code> method.
984 *  The code running inside the fiber can give up control by calling
985 *  <code>Fiber.yield</code> in which case it yields control back to caller
986 *  (the caller of the <code>Fiber#resume</code>).
987 *
988 *  Upon yielding or termination the Fiber returns the value of the last
989 *  executed expression
990 *
991 *  For instance:
992 *
993 *    fiber = Fiber.new do
994 *      Fiber.yield 1
995 *      2
996 *    end
997 *
998 *    puts fiber.resume
999 *    puts fiber.resume
1000 *    puts fiber.resume
1001 *
1002 *  <em>produces</em>
1003 *
1004 *    1
1005 *    2
1006 *    FiberError: dead fiber called
1007 *
1008 *  The <code>Fiber#resume</code> method accepts an arbitrary number of
1009 *  parameters, if it is the first call to <code>resume</code> then they
1010 *  will be passed as block arguments. Otherwise they will be the return
1011 *  value of the call to <code>Fiber.yield</code>
1012 *
1013 *  Example:
1014 *
1015 *    fiber = Fiber.new do |first|
1016 *      second = Fiber.yield first + 2
1017 *    end
1018 *
1019 *    puts fiber.resume 10
1020 *    puts fiber.resume 14
1021 *    puts fiber.resume 18
1022 *
1023 *  <em>produces</em>
1024 *
1025 *    12
1026 *    14
1027 *    FiberError: dead fiber called
1028 *
1029 */
1030
1031static const rb_data_type_t fiber_data_type = {
1032    "fiber",
1033    {fiber_mark, fiber_free, fiber_memsize,},
1034};
1035
1036static VALUE
1037fiber_alloc(VALUE klass)
1038{
1039    return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1040}
1041
1042static rb_fiber_t*
1043fiber_t_alloc(VALUE fibval)
1044{
1045    rb_fiber_t *fib;
1046    rb_thread_t *th = GET_THREAD();
1047
1048    if (DATA_PTR(fibval) != 0) {
1049	rb_raise(rb_eRuntimeError, "cannot initialize twice");
1050    }
1051
1052    THREAD_MUST_BE_RUNNING(th);
1053    fib = ALLOC(rb_fiber_t);
1054    memset(fib, 0, sizeof(rb_fiber_t));
1055    fib->cont.self = fibval;
1056    fib->cont.type = FIBER_CONTEXT;
1057    cont_init(&fib->cont, th);
1058    fib->prev = Qnil;
1059    fib->status = CREATED;
1060
1061    DATA_PTR(fibval) = fib;
1062
1063    return fib;
1064}
1065
1066static VALUE
1067fiber_init(VALUE fibval, VALUE proc)
1068{
1069    rb_fiber_t *fib = fiber_t_alloc(fibval);
1070    rb_context_t *cont = &fib->cont;
1071    rb_thread_t *th = &cont->saved_thread;
1072
1073    /* initialize cont */
1074    cont->vm_stack = 0;
1075
1076    th->stack = 0;
1077    th->stack_size = 0;
1078
1079    fiber_link_join(fib);
1080
1081    th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1082    th->stack = ALLOC_N(VALUE, th->stack_size);
1083
1084    th->cfp = (void *)(th->stack + th->stack_size);
1085    th->cfp--;
1086    th->cfp->pc = 0;
1087    th->cfp->sp = th->stack + 1;
1088#if VM_DEBUG_BP_CHECK
1089    th->cfp->bp_check = 0;
1090#endif
1091    th->cfp->ep = th->stack;
1092    *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
1093    th->cfp->self = Qnil;
1094    th->cfp->klass = Qnil;
1095    th->cfp->flag = 0;
1096    th->cfp->iseq = 0;
1097    th->cfp->proc = 0;
1098    th->cfp->block_iseq = 0;
1099    th->cfp->me = 0;
1100    th->tag = 0;
1101    th->local_storage = st_init_numtable();
1102
1103    th->first_proc = proc;
1104
1105#if !FIBER_USE_NATIVE
1106    MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
1107#endif
1108
1109    return fibval;
1110}
1111
1112/* :nodoc: */
1113static VALUE
1114rb_fiber_init(VALUE fibval)
1115{
1116    return fiber_init(fibval, rb_block_proc());
1117}
1118
1119VALUE
1120rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
1121{
1122    return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
1123}
1124
1125static VALUE
1126return_fiber(void)
1127{
1128    rb_fiber_t *fib;
1129    VALUE curr = rb_fiber_current();
1130    VALUE prev;
1131    GetFiberPtr(curr, fib);
1132
1133    prev = fib->prev;
1134    if (NIL_P(prev)) {
1135	const VALUE root_fiber = GET_THREAD()->root_fiber;
1136
1137	if (root_fiber == curr) {
1138	    rb_raise(rb_eFiberError, "can't yield from root fiber");
1139	}
1140	return root_fiber;
1141    }
1142    else {
1143	fib->prev = Qnil;
1144	return prev;
1145    }
1146}
1147
1148VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
1149
1150static void
1151rb_fiber_terminate(rb_fiber_t *fib)
1152{
1153    VALUE value = fib->cont.value;
1154    fib->status = TERMINATED;
1155#if FIBER_USE_NATIVE && !defined(_WIN32)
1156    /* Ruby must not switch to other thread until storing terminated_machine_stack */
1157    terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
1158    terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
1159    fib->context.uc_stack.ss_sp = NULL;
1160    fib->cont.machine_stack = NULL;
1161    fib->cont.machine_stack_size = 0;
1162#endif
1163    rb_fiber_transfer(return_fiber(), 1, &value);
1164}
1165
1166void
1167rb_fiber_start(void)
1168{
1169    rb_thread_t *th = GET_THREAD();
1170    rb_fiber_t *fib;
1171    rb_context_t *cont;
1172    rb_proc_t *proc;
1173    int state;
1174
1175    GetFiberPtr(th->fiber, fib);
1176    cont = &fib->cont;
1177
1178    TH_PUSH_TAG(th);
1179    if ((state = EXEC_TAG()) == 0) {
1180	int argc;
1181	VALUE *argv, args;
1182	GetProcPtr(cont->saved_thread.first_proc, proc);
1183	args = cont->value;
1184	argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
1185	cont->value = Qnil;
1186	th->errinfo = Qnil;
1187	th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
1188	th->root_svar = Qnil;
1189
1190	fib->status = RUNNING;
1191	cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
1192    }
1193    TH_POP_TAG();
1194
1195    if (state) {
1196	if (state == TAG_RAISE || state == TAG_FATAL) {
1197	    rb_threadptr_pending_interrupt_enque(th, th->errinfo);
1198	}
1199	else {
1200	    VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
1201	    if (!NIL_P(err))
1202		rb_threadptr_pending_interrupt_enque(th, err);
1203	}
1204	RUBY_VM_SET_INTERRUPT(th);
1205    }
1206
1207    rb_fiber_terminate(fib);
1208    rb_bug("rb_fiber_start: unreachable");
1209}
1210
1211static rb_fiber_t *
1212root_fiber_alloc(rb_thread_t *th)
1213{
1214    rb_fiber_t *fib;
1215    /* no need to allocate vm stack */
1216    fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
1217    fib->cont.type = ROOT_FIBER_CONTEXT;
1218#if FIBER_USE_NATIVE
1219#ifdef _WIN32
1220    fib->fib_handle = ConvertThreadToFiber(0);
1221#endif
1222#endif
1223    fib->status = RUNNING;
1224    fib->prev_fiber = fib->next_fiber = fib;
1225
1226    return fib;
1227}
1228
1229VALUE
1230rb_fiber_current(void)
1231{
1232    rb_thread_t *th = GET_THREAD();
1233    if (th->fiber == 0) {
1234	/* save root */
1235	rb_fiber_t *fib = root_fiber_alloc(th);
1236	th->root_fiber = th->fiber = fib->cont.self;
1237    }
1238    return th->fiber;
1239}
1240
1241static VALUE
1242fiber_store(rb_fiber_t *next_fib)
1243{
1244    rb_thread_t *th = GET_THREAD();
1245    rb_fiber_t *fib;
1246
1247    if (th->fiber) {
1248	GetFiberPtr(th->fiber, fib);
1249	cont_save_thread(&fib->cont, th);
1250    }
1251    else {
1252	/* create current fiber */
1253	fib = root_fiber_alloc(th);
1254	th->root_fiber = th->fiber = fib->cont.self;
1255    }
1256
1257#if !FIBER_USE_NATIVE
1258    cont_save_machine_stack(th, &fib->cont);
1259#endif
1260
1261    if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
1262#if FIBER_USE_NATIVE
1263	fiber_setcontext(next_fib, fib);
1264#ifndef _WIN32
1265	if (terminated_machine_stack.ptr) {
1266	    if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
1267		machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1268		machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1269		machine_stack_cache_index++;
1270	    }
1271	    else {
1272		if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
1273		    munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1274		}
1275		else {
1276		    rb_bug("terminated fiber resumed");
1277		}
1278	    }
1279	    terminated_machine_stack.ptr = NULL;
1280	    terminated_machine_stack.size = 0;
1281	}
1282#endif
1283#endif
1284	/* restored */
1285	GetFiberPtr(th->fiber, fib);
1286	if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1287	return fib->cont.value;
1288    }
1289#if !FIBER_USE_NATIVE
1290    else {
1291	return Qundef;
1292    }
1293#endif
1294}
1295
1296static inline VALUE
1297fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
1298{
1299    VALUE value;
1300    rb_fiber_t *fib;
1301    rb_context_t *cont;
1302    rb_thread_t *th = GET_THREAD();
1303
1304    GetFiberPtr(fibval, fib);
1305    cont = &fib->cont;
1306
1307    if (th->fiber == fibval) {
1308	/* ignore fiber context switch
1309         * because destination fiber is same as current fiber
1310	 */
1311	return make_passing_arg(argc, argv);
1312    }
1313
1314    if (cont->saved_thread.self != th->self) {
1315	rb_raise(rb_eFiberError, "fiber called across threads");
1316    }
1317    else if (cont->saved_thread.protect_tag != th->protect_tag) {
1318	rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1319    }
1320    else if (fib->status == TERMINATED) {
1321	value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1322	if (th->fiber != fibval) {
1323	    GetFiberPtr(th->fiber, fib);
1324	    if (fib->status != TERMINATED) rb_exc_raise(value);
1325	    fibval = th->root_fiber;
1326	}
1327	else {
1328	    fibval = fib->prev;
1329	    if (NIL_P(fibval)) fibval = th->root_fiber;
1330	}
1331	GetFiberPtr(fibval, fib);
1332	cont = &fib->cont;
1333	cont->argc = -1;
1334	cont->value = value;
1335#if FIBER_USE_NATIVE
1336	{
1337	    VALUE oldfibval;
1338	    rb_fiber_t *oldfib;
1339	    oldfibval = rb_fiber_current();
1340	    GetFiberPtr(oldfibval, oldfib);
1341	    fiber_setcontext(fib, oldfib);
1342	}
1343#else
1344	cont_restore_0(cont, &value);
1345#endif
1346    }
1347
1348    if (is_resume) {
1349	fib->prev = rb_fiber_current();
1350    }
1351    else {
1352	/* restore `tracing' context. see [Feature #4347] */
1353	th->trace_arg = cont->saved_thread.trace_arg;
1354    }
1355
1356    cont->argc = argc;
1357    cont->value = make_passing_arg(argc, argv);
1358
1359    value = fiber_store(fib);
1360#if !FIBER_USE_NATIVE
1361    if (value == Qundef) {
1362	cont_restore_0(cont, &value);
1363	rb_bug("rb_fiber_resume: unreachable");
1364    }
1365#endif
1366    RUBY_VM_CHECK_INTS(th);
1367
1368    return value;
1369}
1370
1371VALUE
1372rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
1373{
1374    return fiber_switch(fib, argc, argv, 0);
1375}
1376
1377VALUE
1378rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
1379{
1380    rb_fiber_t *fib;
1381    GetFiberPtr(fibval, fib);
1382
1383    if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
1384	rb_raise(rb_eFiberError, "double resume");
1385    }
1386    if (fib->transfered != 0) {
1387	rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1388    }
1389
1390    return fiber_switch(fibval, argc, argv, 1);
1391}
1392
1393VALUE
1394rb_fiber_yield(int argc, VALUE *argv)
1395{
1396    return rb_fiber_transfer(return_fiber(), argc, argv);
1397}
1398
1399void
1400rb_fiber_reset_root_local_storage(VALUE thval)
1401{
1402    rb_thread_t *th;
1403    rb_fiber_t	*fib;
1404
1405    GetThreadPtr(thval, th);
1406    if (th->root_fiber && th->root_fiber != th->fiber) {
1407	GetFiberPtr(th->root_fiber, fib);
1408	th->local_storage = fib->cont.saved_thread.local_storage;
1409    }
1410}
1411
1412/*
1413 *  call-seq:
1414 *     fiber.alive? -> true or false
1415 *
1416 *  Returns true if the fiber can still be resumed (or transferred
1417 *  to). After finishing execution of the fiber block this method will
1418 *  always return false. You need to <code>require 'fiber'</code>
1419 *  before using this method.
1420 */
1421VALUE
1422rb_fiber_alive_p(VALUE fibval)
1423{
1424    rb_fiber_t *fib;
1425    GetFiberPtr(fibval, fib);
1426    return fib->status != TERMINATED ? Qtrue : Qfalse;
1427}
1428
1429/*
1430 *  call-seq:
1431 *     fiber.resume(args, ...) -> obj
1432 *
1433 *  Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1434 *  was called, or starts running it if it is the first call to
1435 *  <code>resume</code>. Arguments passed to resume will be the value of
1436 *  the <code>Fiber.yield</code> expression or will be passed as block
1437 *  parameters to the fiber's block if this is the first <code>resume</code>.
1438 *
1439 *  Alternatively, when resume is called it evaluates to the arguments passed
1440 *  to the next <code>Fiber.yield</code> statement inside the fiber's block
1441 *  or to the block value if it runs to completion without any
1442 *  <code>Fiber.yield</code>
1443 */
1444static VALUE
1445rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
1446{
1447    return rb_fiber_resume(fib, argc, argv);
1448}
1449
1450/*
1451 *  call-seq:
1452 *     fiber.transfer(args, ...) -> obj
1453 *
1454 *  Transfer control to another fiber, resuming it from where it last
1455 *  stopped or starting it if it was not resumed before. The calling
1456 *  fiber will be suspended much like in a call to
1457 *  <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1458 *  before using this method.
1459 *
1460 *  The fiber which receives the transfer call is treats it much like
1461 *  a resume call. Arguments passed to transfer are treated like those
1462 *  passed to resume.
1463 *
1464 *  You cannot resume a fiber that transferred control to another one.
1465 *  This will cause a double resume error. You need to transfer control
1466 *  back to this fiber before it can yield and resume.
1467 *
1468 *  Example:
1469 *
1470 *    fiber1 = Fiber.new do
1471 *      puts "In Fiber 1"
1472 *      Fiber.yield
1473 *    end
1474 *
1475 *    fiber2 = Fiber.new do
1476 *      puts "In Fiber 2"
1477 *      fiber1.transfer
1478 *      puts "Never see this message"
1479 *    end
1480 *
1481 *    fiber3 = Fiber.new do
1482 *      puts "In Fiber 3"
1483 *    end
1484 *
1485 *    fiber2.resume
1486 *    fiber3.resume
1487 *
1488 *    <em>produces</em>
1489 *
1490 *    In fiber 2
1491 *    In fiber 1
1492 *    In fiber 3
1493 *
1494 */
1495static VALUE
1496rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
1497{
1498    rb_fiber_t *fib;
1499    GetFiberPtr(fibval, fib);
1500    fib->transfered = 1;
1501    return rb_fiber_transfer(fibval, argc, argv);
1502}
1503
1504/*
1505 *  call-seq:
1506 *     Fiber.yield(args, ...) -> obj
1507 *
1508 *  Yields control back to the context that resumed the fiber, passing
1509 *  along any arguments that were passed to it. The fiber will resume
1510 *  processing at this point when <code>resume</code> is called next.
1511 *  Any arguments passed to the next <code>resume</code> will be the
1512 *  value that this <code>Fiber.yield</code> expression evaluates to.
1513 */
1514static VALUE
1515rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
1516{
1517    return rb_fiber_yield(argc, argv);
1518}
1519
1520/*
1521 *  call-seq:
1522 *     Fiber.current() -> fiber
1523 *
1524 *  Returns the current fiber. You need to <code>require 'fiber'</code>
1525 *  before using this method. If you are not running in the context of
1526 *  a fiber this method will return the root fiber.
1527 */
1528static VALUE
1529rb_fiber_s_current(VALUE klass)
1530{
1531    return rb_fiber_current();
1532}
1533
1534
1535
1536/*
1537 *  Document-class: FiberError
1538 *
1539 *  Raised when an invalid operation is attempted on a Fiber, in
1540 *  particular when attempting to call/resume a dead fiber,
1541 *  attempting to yield from the root fiber, or calling a fiber across
1542 *  threads.
1543 *
1544 *     fiber = Fiber.new{}
1545 *     fiber.resume #=> nil
1546 *     fiber.resume #=> FiberError: dead fiber called
1547 */
1548
1549void
1550Init_Cont(void)
1551{
1552#if FIBER_USE_NATIVE
1553    rb_thread_t *th = GET_THREAD();
1554
1555#ifdef _WIN32
1556    SYSTEM_INFO info;
1557    GetSystemInfo(&info);
1558    pagesize = info.dwPageSize;
1559#else /* not WIN32 */
1560    pagesize = sysconf(_SC_PAGESIZE);
1561#endif
1562    SET_MACHINE_STACK_END(&th->machine_stack_end);
1563#endif
1564
1565    rb_cFiber = rb_define_class("Fiber", rb_cObject);
1566    rb_define_alloc_func(rb_cFiber, fiber_alloc);
1567    rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
1568    rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
1569    rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1570    rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
1571}
1572
1573#if defined __GNUC__ && __GNUC__ >= 4
1574#pragma GCC visibility push(default)
1575#endif
1576
1577void
1578ruby_Init_Continuation_body(void)
1579{
1580    rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1581    rb_undef_alloc_func(rb_cContinuation);
1582    rb_undef_method(CLASS_OF(rb_cContinuation), "new");
1583    rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
1584    rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
1585    rb_define_global_function("callcc", rb_callcc, 0);
1586}
1587
1588void
1589ruby_Init_Fiber_as_Coroutine(void)
1590{
1591    rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
1592    rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
1593    rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
1594}
1595
1596#if defined __GNUC__ && __GNUC__ >= 4
1597#pragma GCC visibility pop
1598#endif
1599