Ruby  2.0.0p481(2014-05-08revision45883)
cont.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   cont.c -
00004 
00005   $Author: nagachika $
00006   created at: Thu May 23 09:03:43 2007
00007 
00008   Copyright (C) 2007 Koichi Sasada
00009 
00010 **********************************************************************/
00011 
00012 #include "ruby/ruby.h"
00013 #include "internal.h"
00014 #include "vm_core.h"
00015 #include "gc.h"
00016 #include "eval_intern.h"
00017 
00018 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
00019  * dependent method such as make/setcontext on POSIX system or
00020  * CreateFiber() API on Windows.
00021  * This hack make Fiber context switch faster (x2 or more).
00022  * However, it decrease maximum number of Fiber.  For example, on the
00023  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
00024  *
00025  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
00026  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
00027  */
00028 
00029 #if !defined(FIBER_USE_NATIVE)
00030 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
00031 #   if 0
00032 #   elif defined(__NetBSD__)
00033 /* On our experience, NetBSD doesn't support using setcontext() and pthread
00034  * simultaneously.  This is because pthread_self(), TLS and other information
00035  * are represented by stack pointer (higher bits of stack pointer).
00036  * TODO: check such constraint on configure.
00037  */
00038 #     define FIBER_USE_NATIVE 0
00039 #   elif defined(__sun)
00040 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
00041  */
00042 #     define FIBER_USE_NATIVE 0
00043 #   elif defined(__ia64)
00044 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
00045  */
00046 #     define FIBER_USE_NATIVE 0
00047 #   elif defined(__GNU__)
00048 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
00049  * and swapcontext functions. Disabling their usage till support is
00050  * implemented. More info at
00051  * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
00052  */
00053 #     define FIBER_USE_NATIVE 0
00054 #   else
00055 #     define FIBER_USE_NATIVE 1
00056 #   endif
00057 # elif defined(_WIN32)
00058 #   if _WIN32_WINNT >= 0x0400
00059 /* only when _WIN32_WINNT >= 0x0400 on Windows because Fiber APIs are
00060  * supported only such building (and running) environments.
00061  * [ruby-dev:41192]
00062  */
00063 #     define FIBER_USE_NATIVE 1
00064 #   endif
00065 # endif
00066 #endif
00067 #if !defined(FIBER_USE_NATIVE)
00068 #define FIBER_USE_NATIVE 0
00069 #endif
00070 
00071 #if FIBER_USE_NATIVE
00072 #ifndef _WIN32
00073 #include <unistd.h>
00074 #include <sys/mman.h>
00075 #include <ucontext.h>
00076 #endif
00077 #define RB_PAGE_SIZE (pagesize)
00078 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
00079 static long pagesize;
00080 #endif /*FIBER_USE_NATIVE*/
00081 
00082 #define CAPTURE_JUST_VALID_VM_STACK 1
00083 
00084 enum context_type {
00085     CONTINUATION_CONTEXT = 0,
00086     FIBER_CONTEXT = 1,
00087     ROOT_FIBER_CONTEXT = 2
00088 };
00089 
00090 typedef struct rb_context_struct {
00091     enum context_type type;
00092     VALUE self;
00093     int argc;
00094     VALUE value;
00095     VALUE *vm_stack;
00096 #ifdef CAPTURE_JUST_VALID_VM_STACK
00097     size_t vm_stack_slen;  /* length of stack (head of th->stack) */
00098     size_t vm_stack_clen;  /* length of control frames (tail of th->stack) */
00099 #endif
00100     VALUE *machine_stack;
00101     VALUE *machine_stack_src;
00102 #ifdef __ia64
00103     VALUE *machine_register_stack;
00104     VALUE *machine_register_stack_src;
00105     int machine_register_stack_size;
00106 #endif
00107     rb_thread_t saved_thread;
00108     rb_jmpbuf_t jmpbuf;
00109     size_t machine_stack_size;
00110 } rb_context_t;
00111 
00112 enum fiber_status {
00113     CREATED,
00114     RUNNING,
00115     TERMINATED
00116 };
00117 
00118 #if FIBER_USE_NATIVE && !defined(_WIN32)
00119 #define MAX_MAHINE_STACK_CACHE  10
00120 static int machine_stack_cache_index = 0;
00121 typedef struct machine_stack_cache_struct {
00122     void *ptr;
00123     size_t size;
00124 } machine_stack_cache_t;
00125 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE];
00126 static machine_stack_cache_t terminated_machine_stack;
00127 #endif
00128 
00129 typedef struct rb_fiber_struct {
00130     rb_context_t cont;
00131     VALUE prev;
00132     enum fiber_status status;
00133     struct rb_fiber_struct *prev_fiber;
00134     struct rb_fiber_struct *next_fiber;
00135     /* If a fiber invokes "transfer",
00136      * then this fiber can't "resume" any more after that.
00137      * You shouldn't mix "transfer" and "resume".
00138      */
00139     int transfered;
00140 
00141 #if FIBER_USE_NATIVE
00142 #ifdef _WIN32
00143     void *fib_handle;
00144 #else
00145     ucontext_t context;
00146 #endif
00147 #endif
00148 } rb_fiber_t;
00149 
00150 static const rb_data_type_t cont_data_type, fiber_data_type;
00151 static VALUE rb_cContinuation;
00152 static VALUE rb_cFiber;
00153 static VALUE rb_eFiberError;
00154 
00155 #define GetContPtr(obj, ptr)  \
00156     TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
00157 
00158 #define GetFiberPtr(obj, ptr)  do {\
00159     TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
00160     if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
00161 } while (0)
00162 
00163 NOINLINE(static VALUE cont_capture(volatile int *stat));
00164 
00165 #define THREAD_MUST_BE_RUNNING(th) do { \
00166         if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread");        \
00167     } while (0)
00168 
00169 static void
00170 cont_mark(void *ptr)
00171 {
00172     RUBY_MARK_ENTER("cont");
00173     if (ptr) {
00174         rb_context_t *cont = ptr;
00175         rb_gc_mark(cont->value);
00176         rb_thread_mark(&cont->saved_thread);
00177         rb_gc_mark(cont->saved_thread.self);
00178 
00179         if (cont->vm_stack) {
00180 #ifdef CAPTURE_JUST_VALID_VM_STACK
00181             rb_gc_mark_locations(cont->vm_stack,
00182                                  cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
00183 #else
00184             rb_gc_mark_localtion(cont->vm_stack,
00185                                  cont->vm_stack, cont->saved_thread.stack_size);
00186 #endif
00187         }
00188 
00189         if (cont->machine_stack) {
00190             if (cont->type == CONTINUATION_CONTEXT) {
00191                 /* cont */
00192                 rb_gc_mark_locations(cont->machine_stack,
00193                                      cont->machine_stack + cont->machine_stack_size);
00194             }
00195             else {
00196                 /* fiber */
00197                 rb_thread_t *th;
00198                 rb_fiber_t *fib = (rb_fiber_t*)cont;
00199                 GetThreadPtr(cont->saved_thread.self, th);
00200                 if ((th->fiber != cont->self) && fib->status == RUNNING) {
00201                     rb_gc_mark_locations(cont->machine_stack,
00202                                          cont->machine_stack + cont->machine_stack_size);
00203                 }
00204             }
00205         }
00206 #ifdef __ia64
00207         if (cont->machine_register_stack) {
00208             rb_gc_mark_locations(cont->machine_register_stack,
00209                                  cont->machine_register_stack + cont->machine_register_stack_size);
00210         }
00211 #endif
00212     }
00213     RUBY_MARK_LEAVE("cont");
00214 }
00215 
00216 static void
00217 cont_free(void *ptr)
00218 {
00219     RUBY_FREE_ENTER("cont");
00220     if (ptr) {
00221         rb_context_t *cont = ptr;
00222         RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
00223 #if FIBER_USE_NATIVE
00224         if (cont->type == CONTINUATION_CONTEXT) {
00225             /* cont */
00226             RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00227         }
00228         else {
00229             /* fiber */
00230 #ifdef _WIN32
00231             if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
00232                 /* don't delete root fiber handle */
00233                 rb_fiber_t *fib = (rb_fiber_t*)cont;
00234                 if (fib->fib_handle) {
00235                     DeleteFiber(fib->fib_handle);
00236                 }
00237             }
00238 #else /* not WIN32 */
00239             if (GET_THREAD()->fiber != cont->self) {
00240                 rb_fiber_t *fib = (rb_fiber_t*)cont;
00241                 if (fib->context.uc_stack.ss_sp) {
00242                     if (cont->type == ROOT_FIBER_CONTEXT) {
00243                         rb_bug("Illegal root fiber parameter");
00244                     }
00245                     munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size);
00246                 }
00247             }
00248             else {
00249                 /* It may reached here when finalize */
00250                 /* TODO examine whether it is a bug */
00251                 /* rb_bug("cont_free: release self"); */
00252             }
00253 #endif
00254         }
00255 #else /* not FIBER_USE_NATIVE */
00256         RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00257 #endif
00258 #ifdef __ia64
00259         RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
00260 #endif
00261         RUBY_FREE_UNLESS_NULL(cont->vm_stack);
00262 
00263         /* free rb_cont_t or rb_fiber_t */
00264         ruby_xfree(ptr);
00265     }
00266     RUBY_FREE_LEAVE("cont");
00267 }
00268 
00269 static size_t
00270 cont_memsize(const void *ptr)
00271 {
00272     const rb_context_t *cont = ptr;
00273     size_t size = 0;
00274     if (cont) {
00275         size = sizeof(*cont);
00276         if (cont->vm_stack) {
00277 #ifdef CAPTURE_JUST_VALID_VM_STACK
00278             size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
00279 #else
00280             size_t n = cont->saved_thread.stack_size;
00281 #endif
00282             size += n * sizeof(*cont->vm_stack);
00283         }
00284 
00285         if (cont->machine_stack) {
00286             size += cont->machine_stack_size * sizeof(*cont->machine_stack);
00287         }
00288 #ifdef __ia64
00289         if (cont->machine_register_stack) {
00290             size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
00291         }
00292 #endif
00293     }
00294     return size;
00295 }
00296 
00297 static void
00298 fiber_mark(void *ptr)
00299 {
00300     RUBY_MARK_ENTER("cont");
00301     if (ptr) {
00302         rb_fiber_t *fib = ptr;
00303         rb_gc_mark(fib->prev);
00304         cont_mark(&fib->cont);
00305     }
00306     RUBY_MARK_LEAVE("cont");
00307 }
00308 
00309 static void
00310 fiber_link_join(rb_fiber_t *fib)
00311 {
00312     VALUE current_fibval = rb_fiber_current();
00313     rb_fiber_t *current_fib;
00314     GetFiberPtr(current_fibval, current_fib);
00315 
00316     /* join fiber link */
00317     fib->next_fiber = current_fib->next_fiber;
00318     fib->prev_fiber = current_fib;
00319     current_fib->next_fiber->prev_fiber = fib;
00320     current_fib->next_fiber = fib;
00321 }
00322 
00323 static void
00324 fiber_link_remove(rb_fiber_t *fib)
00325 {
00326     fib->prev_fiber->next_fiber = fib->next_fiber;
00327     fib->next_fiber->prev_fiber = fib->prev_fiber;
00328 }
00329 
00330 static void
00331 fiber_free(void *ptr)
00332 {
00333     RUBY_FREE_ENTER("fiber");
00334     if (ptr) {
00335         rb_fiber_t *fib = ptr;
00336         if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00337             fib->cont.saved_thread.local_storage) {
00338             st_free_table(fib->cont.saved_thread.local_storage);
00339         }
00340         fiber_link_remove(fib);
00341 
00342         cont_free(&fib->cont);
00343     }
00344     RUBY_FREE_LEAVE("fiber");
00345 }
00346 
00347 static size_t
00348 fiber_memsize(const void *ptr)
00349 {
00350     const rb_fiber_t *fib = ptr;
00351     size_t size = 0;
00352     if (ptr) {
00353         size = sizeof(*fib);
00354         if (fib->cont.type != ROOT_FIBER_CONTEXT) {
00355             size += st_memsize(fib->cont.saved_thread.local_storage);
00356         }
00357         size += cont_memsize(&fib->cont);
00358     }
00359     return size;
00360 }
00361 
00362 VALUE
00363 rb_obj_is_fiber(VALUE obj)
00364 {
00365     if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
00366         return Qtrue;
00367     }
00368     else {
00369         return Qfalse;
00370     }
00371 }
00372 
00373 static void
00374 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
00375 {
00376     size_t size;
00377 
00378     SET_MACHINE_STACK_END(&th->machine_stack_end);
00379 #ifdef __ia64
00380     th->machine_register_stack_end = rb_ia64_bsp();
00381 #endif
00382 
00383     if (th->machine_stack_start > th->machine_stack_end) {
00384         size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00385         cont->machine_stack_src = th->machine_stack_end;
00386     }
00387     else {
00388         size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00389         cont->machine_stack_src = th->machine_stack_start;
00390     }
00391 
00392     if (cont->machine_stack) {
00393         REALLOC_N(cont->machine_stack, VALUE, size);
00394     }
00395     else {
00396         cont->machine_stack = ALLOC_N(VALUE, size);
00397     }
00398 
00399     FLUSH_REGISTER_WINDOWS;
00400     MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
00401 
00402 #ifdef __ia64
00403     rb_ia64_flushrs();
00404     size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
00405     cont->machine_register_stack_src = th->machine_register_stack_start;
00406     if (cont->machine_register_stack) {
00407         REALLOC_N(cont->machine_register_stack, VALUE, size);
00408     }
00409     else {
00410         cont->machine_register_stack = ALLOC_N(VALUE, size);
00411     }
00412 
00413     MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
00414 #endif
00415 }
00416 
00417 static const rb_data_type_t cont_data_type = {
00418     "continuation",
00419     {cont_mark, cont_free, cont_memsize,},
00420 };
00421 
00422 static void
00423 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
00424 {
00425     /* save thread context */
00426     cont->saved_thread = *th;
00427     /* saved_thread->machine_stack_(start|end) should be NULL */
00428     /* because it may happen GC afterward */
00429     cont->saved_thread.machine_stack_start = 0;
00430     cont->saved_thread.machine_stack_end = 0;
00431 #ifdef __ia64
00432     cont->saved_thread.machine_register_stack_start = 0;
00433     cont->saved_thread.machine_register_stack_end = 0;
00434 #endif
00435 }
00436 
00437 static void
00438 cont_init(rb_context_t *cont, rb_thread_t *th)
00439 {
00440     /* save thread context */
00441     cont_save_thread(cont, th);
00442     cont->saved_thread.local_storage = 0;
00443 }
00444 
00445 static rb_context_t *
00446 cont_new(VALUE klass)
00447 {
00448     rb_context_t *cont;
00449     volatile VALUE contval;
00450     rb_thread_t *th = GET_THREAD();
00451 
00452     THREAD_MUST_BE_RUNNING(th);
00453     contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
00454     cont->self = contval;
00455     cont_init(cont, th);
00456     return cont;
00457 }
00458 
00459 static VALUE
00460 cont_capture(volatile int *stat)
00461 {
00462     rb_context_t *cont;
00463     rb_thread_t *th = GET_THREAD(), *sth;
00464     volatile VALUE contval;
00465 
00466     THREAD_MUST_BE_RUNNING(th);
00467     rb_vm_stack_to_heap(th);
00468     cont = cont_new(rb_cContinuation);
00469     contval = cont->self;
00470     sth = &cont->saved_thread;
00471 
00472 #ifdef CAPTURE_JUST_VALID_VM_STACK
00473     cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
00474     cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
00475     cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
00476     MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
00477     MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
00478 #else
00479     cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
00480     MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
00481 #endif
00482     sth->stack = 0;
00483 
00484     cont_save_machine_stack(th, cont);
00485 
00486     if (ruby_setjmp(cont->jmpbuf)) {
00487         volatile VALUE value;
00488 
00489         value = cont->value;
00490         if (cont->argc == -1) rb_exc_raise(value);
00491         cont->value = Qnil;
00492         *stat = 1;
00493         return value;
00494     }
00495     else {
00496         *stat = 0;
00497         return contval;
00498     }
00499 }
00500 
00501 static void
00502 cont_restore_thread(rb_context_t *cont)
00503 {
00504     rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
00505 
00506     /* restore thread context */
00507     if (cont->type == CONTINUATION_CONTEXT) {
00508         /* continuation */
00509         VALUE fib;
00510 
00511         th->fiber = sth->fiber;
00512         fib = th->fiber ? th->fiber : th->root_fiber;
00513 
00514         if (fib) {
00515             rb_fiber_t *fcont;
00516             GetFiberPtr(fib, fcont);
00517             th->stack_size = fcont->cont.saved_thread.stack_size;
00518             th->stack = fcont->cont.saved_thread.stack;
00519         }
00520 #ifdef CAPTURE_JUST_VALID_VM_STACK
00521         MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
00522         MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
00523                cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
00524 #else
00525         MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
00526 #endif
00527     }
00528     else {
00529         /* fiber */
00530         th->stack = sth->stack;
00531         th->stack_size = sth->stack_size;
00532         th->local_storage = sth->local_storage;
00533         th->fiber = cont->self;
00534     }
00535 
00536     th->cfp = sth->cfp;
00537     th->safe_level = sth->safe_level;
00538     th->raised_flag = sth->raised_flag;
00539     th->state = sth->state;
00540     th->status = sth->status;
00541     th->tag = sth->tag;
00542     th->protect_tag = sth->protect_tag;
00543     th->errinfo = sth->errinfo;
00544     th->first_proc = sth->first_proc;
00545     th->root_lep = sth->root_lep;
00546     th->root_svar = sth->root_svar;
00547 }
00548 
00549 #if FIBER_USE_NATIVE
00550 #ifdef _WIN32
00551 static void
00552 fiber_set_stack_location(void)
00553 {
00554     rb_thread_t *th = GET_THREAD();
00555     VALUE *ptr;
00556 
00557     SET_MACHINE_STACK_END(&ptr);
00558     th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
00559 }
00560 
00561 static VOID CALLBACK
00562 fiber_entry(void *arg)
00563 {
00564     fiber_set_stack_location();
00565     rb_fiber_start();
00566 }
00567 #else /* _WIN32 */
00568 
00569 /*
00570  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
00571  * if MAP_STACK is passed.
00572  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
00573  */
00574 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
00575 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
00576 #else
00577 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
00578 #endif
00579 
00580 static char*
00581 fiber_machine_stack_alloc(size_t size)
00582 {
00583     char *ptr;
00584 
00585     if (machine_stack_cache_index > 0) {
00586         if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
00587             ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
00588             machine_stack_cache_index--;
00589             machine_stack_cache[machine_stack_cache_index].ptr = NULL;
00590             machine_stack_cache[machine_stack_cache_index].size = 0;
00591         }
00592         else{
00593             /* TODO handle multiple machine stack size */
00594             rb_bug("machine_stack_cache size is not canonicalized");
00595         }
00596     }
00597     else {
00598         void *page;
00599         STACK_GROW_DIR_DETECTION;
00600 
00601         ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
00602         if (ptr == MAP_FAILED) {
00603             rb_raise(rb_eFiberError, "can't alloc machine stack to fiber");
00604         }
00605 
00606         /* guard page setup */
00607         page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
00608         if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
00609             rb_raise(rb_eFiberError, "mprotect failed");
00610         }
00611     }
00612 
00613     return ptr;
00614 }
00615 #endif
00616 
00617 static void
00618 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
00619 {
00620     rb_thread_t *sth = &fib->cont.saved_thread;
00621 
00622 #ifdef _WIN32
00623     fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00624     if (!fib->fib_handle) {
00625         /* try to release unnecessary fibers & retry to create */
00626         rb_gc();
00627         fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00628         if (!fib->fib_handle) {
00629             rb_raise(rb_eFiberError, "can't create fiber");
00630         }
00631     }
00632     sth->machine_stack_maxsize = size;
00633 #else /* not WIN32 */
00634     ucontext_t *context = &fib->context;
00635     char *ptr;
00636     STACK_GROW_DIR_DETECTION;
00637 
00638     getcontext(context);
00639     ptr = fiber_machine_stack_alloc(size);
00640     context->uc_link = NULL;
00641     context->uc_stack.ss_sp = ptr;
00642     context->uc_stack.ss_size = size;
00643     makecontext(context, rb_fiber_start, 0);
00644     sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
00645     sth->machine_stack_maxsize = size - RB_PAGE_SIZE;
00646 #endif
00647 #ifdef __ia64
00648     sth->machine_register_stack_maxsize = sth->machine_stack_maxsize;
00649 #endif
00650 }
00651 
00652 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
00653 
00654 static void
00655 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
00656 {
00657     rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
00658 
00659     if (newfib->status != RUNNING) {
00660         fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
00661     }
00662 
00663     /* restore thread context */
00664     cont_restore_thread(&newfib->cont);
00665     th->machine_stack_maxsize = sth->machine_stack_maxsize;
00666     if (sth->machine_stack_end && (newfib != oldfib)) {
00667         rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value");
00668     }
00669 
00670     /* save  oldfib's machine stack */
00671     if (oldfib->status != TERMINATED) {
00672         STACK_GROW_DIR_DETECTION;
00673         SET_MACHINE_STACK_END(&th->machine_stack_end);
00674         if (STACK_DIR_UPPER(0, 1)) {
00675             oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00676             oldfib->cont.machine_stack = th->machine_stack_end;
00677         }
00678         else {
00679             oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00680             oldfib->cont.machine_stack = th->machine_stack_start;
00681         }
00682     }
00683     /* exchange machine_stack_start between oldfib and newfib */
00684     oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start;
00685     th->machine_stack_start = sth->machine_stack_start;
00686     /* oldfib->machine_stack_end should be NULL */
00687     oldfib->cont.saved_thread.machine_stack_end = 0;
00688 #ifndef _WIN32
00689     if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
00690         rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
00691     }
00692 #endif
00693 
00694     /* swap machine context */
00695 #ifdef _WIN32
00696     SwitchToFiber(newfib->fib_handle);
00697 #else
00698     swapcontext(&oldfib->context, &newfib->context);
00699 #endif
00700 }
00701 #endif
00702 
00703 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
00704 
00705 static void
00706 cont_restore_1(rb_context_t *cont)
00707 {
00708     cont_restore_thread(cont);
00709 
00710     /* restore machine stack */
00711 #ifdef _M_AMD64
00712     {
00713         /* workaround for x64 SEH */
00714         jmp_buf buf;
00715         setjmp(buf);
00716         ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
00717             ((_JUMP_BUFFER*)(&buf))->Frame;
00718     }
00719 #endif
00720     if (cont->machine_stack_src) {
00721         FLUSH_REGISTER_WINDOWS;
00722         MEMCPY(cont->machine_stack_src, cont->machine_stack,
00723                 VALUE, cont->machine_stack_size);
00724     }
00725 
00726 #ifdef __ia64
00727     if (cont->machine_register_stack_src) {
00728         MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
00729                VALUE, cont->machine_register_stack_size);
00730     }
00731 #endif
00732 
00733     ruby_longjmp(cont->jmpbuf, 1);
00734 }
00735 
00736 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
00737 
00738 #ifdef __ia64
00739 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
00740 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
00741 static volatile int C(a), C(b), C(c), C(d), C(e);
00742 static volatile int C(f), C(g), C(h), C(i), C(j);
00743 static volatile int C(k), C(l), C(m), C(n), C(o);
00744 static volatile int C(p), C(q), C(r), C(s), C(t);
00745 #if 0
00746 {/* the above lines make cc-mode.el confused so much */}
00747 #endif
00748 int rb_dummy_false = 0;
00749 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
00750 static void
00751 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
00752 {
00753     if (rb_dummy_false) {
00754         /* use registers as much as possible */
00755         E(a) = E(b) = E(c) = E(d) = E(e) =
00756         E(f) = E(g) = E(h) = E(i) = E(j) =
00757         E(k) = E(l) = E(m) = E(n) = E(o) =
00758         E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00759         E(a) = E(b) = E(c) = E(d) = E(e) =
00760         E(f) = E(g) = E(h) = E(i) = E(j) =
00761         E(k) = E(l) = E(m) = E(n) = E(o) =
00762         E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00763     }
00764     if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
00765         register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00766     }
00767     cont_restore_0(cont, vp);
00768 }
00769 #undef C
00770 #undef E
00771 #endif
00772 
00773 static void
00774 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
00775 {
00776     if (cont->machine_stack_src) {
00777 #ifdef HAVE_ALLOCA
00778 #define STACK_PAD_SIZE 1
00779 #else
00780 #define STACK_PAD_SIZE 1024
00781 #endif
00782         VALUE space[STACK_PAD_SIZE];
00783 
00784 #if !STACK_GROW_DIRECTION
00785         if (addr_in_prev_frame > &space[0]) {
00786             /* Stack grows downward */
00787 #endif
00788 #if STACK_GROW_DIRECTION <= 0
00789             volatile VALUE *const end = cont->machine_stack_src;
00790             if (&space[0] > end) {
00791 # ifdef HAVE_ALLOCA
00792                 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
00793                 space[0] = *sp;
00794 # else
00795                 cont_restore_0(cont, &space[0]);
00796 # endif
00797             }
00798 #endif
00799 #if !STACK_GROW_DIRECTION
00800         }
00801         else {
00802             /* Stack grows upward */
00803 #endif
00804 #if STACK_GROW_DIRECTION >= 0
00805             volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
00806             if (&space[STACK_PAD_SIZE] < end) {
00807 # ifdef HAVE_ALLOCA
00808                 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
00809                 space[0] = *sp;
00810 # else
00811                 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
00812 # endif
00813             }
00814 #endif
00815 #if !STACK_GROW_DIRECTION
00816         }
00817 #endif
00818     }
00819     cont_restore_1(cont);
00820 }
00821 #ifdef __ia64
00822 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
00823 #endif
00824 
00825 /*
00826  *  Document-class: Continuation
00827  *
00828  *  Continuation objects are generated by Kernel#callcc,
00829  *  after having +require+d <i>continuation</i>. They hold
00830  *  a return address and execution context, allowing a nonlocal return
00831  *  to the end of the <code>callcc</code> block from anywhere within a
00832  *  program. Continuations are somewhat analogous to a structured
00833  *  version of C's <code>setjmp/longjmp</code> (although they contain
00834  *  more state, so you might consider them closer to threads).
00835  *
00836  *  For instance:
00837  *
00838  *     require "continuation"
00839  *     arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
00840  *     callcc{|cc| $cc = cc}
00841  *     puts(message = arr.shift)
00842  *     $cc.call unless message =~ /Max/
00843  *
00844  *  <em>produces:</em>
00845  *
00846  *     Freddie
00847  *     Herbie
00848  *     Ron
00849  *     Max
00850  *
00851  *  This (somewhat contrived) example allows the inner loop to abandon
00852  *  processing early:
00853  *
00854  *     require "continuation"
00855  *     callcc {|cont|
00856  *       for i in 0..4
00857  *         print "\n#{i}: "
00858  *         for j in i*5...(i+1)*5
00859  *           cont.call() if j == 17
00860  *           printf "%3d", j
00861  *         end
00862  *       end
00863  *     }
00864  *     puts
00865  *
00866  *  <em>produces:</em>
00867  *
00868  *     0:   0  1  2  3  4
00869  *     1:   5  6  7  8  9
00870  *     2:  10 11 12 13 14
00871  *     3:  15 16
00872  */
00873 
00874 /*
00875  *  call-seq:
00876  *     callcc {|cont| block }   ->  obj
00877  *
00878  *  Generates a Continuation object, which it passes to
00879  *  the associated block. You need to <code>require
00880  *  'continuation'</code> before using this method. Performing a
00881  *  <em>cont</em><code>.call</code> will cause the #callcc
00882  *  to return (as will falling through the end of the block). The
00883  *  value returned by the #callcc is the value of the
00884  *  block, or the value passed to <em>cont</em><code>.call</code>. See
00885  *  class Continuation for more details. Also see
00886  *  Kernel#throw for an alternative mechanism for
00887  *  unwinding a call stack.
00888  */
00889 
00890 static VALUE
00891 rb_callcc(VALUE self)
00892 {
00893     volatile int called;
00894     volatile VALUE val = cont_capture(&called);
00895 
00896     if (called) {
00897         return val;
00898     }
00899     else {
00900         return rb_yield(val);
00901     }
00902 }
00903 
00904 static VALUE
00905 make_passing_arg(int argc, VALUE *argv)
00906 {
00907     switch (argc) {
00908       case 0:
00909         return Qnil;
00910       case 1:
00911         return argv[0];
00912       default:
00913         return rb_ary_new4(argc, argv);
00914     }
00915 }
00916 
00917 /*
00918  *  call-seq:
00919  *     cont.call(args, ...)
00920  *     cont[args, ...]
00921  *
00922  *  Invokes the continuation. The program continues from the end of the
00923  *  <code>callcc</code> block. If no arguments are given, the original
00924  *  <code>callcc</code> returns <code>nil</code>. If one argument is
00925  *  given, <code>callcc</code> returns it. Otherwise, an array
00926  *  containing <i>args</i> is returned.
00927  *
00928  *     callcc {|cont|  cont.call }           #=> nil
00929  *     callcc {|cont|  cont.call 1 }         #=> 1
00930  *     callcc {|cont|  cont.call 1, 2, 3 }   #=> [1, 2, 3]
00931  */
00932 
00933 static VALUE
00934 rb_cont_call(int argc, VALUE *argv, VALUE contval)
00935 {
00936     rb_context_t *cont;
00937     rb_thread_t *th = GET_THREAD();
00938     GetContPtr(contval, cont);
00939 
00940     if (cont->saved_thread.self != th->self) {
00941         rb_raise(rb_eRuntimeError, "continuation called across threads");
00942     }
00943     if (cont->saved_thread.protect_tag != th->protect_tag) {
00944         rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
00945     }
00946     if (cont->saved_thread.fiber) {
00947         rb_fiber_t *fcont;
00948         GetFiberPtr(cont->saved_thread.fiber, fcont);
00949 
00950         if (th->fiber != cont->saved_thread.fiber) {
00951             rb_raise(rb_eRuntimeError, "continuation called across fiber");
00952         }
00953     }
00954 
00955     cont->argc = argc;
00956     cont->value = make_passing_arg(argc, argv);
00957 
00958     /* restore `tracing' context. see [Feature #4347] */
00959     th->trace_arg = cont->saved_thread.trace_arg;
00960 
00961     cont_restore_0(cont, &contval);
00962     return Qnil; /* unreachable */
00963 }
00964 
00965 /*********/
00966 /* fiber */
00967 /*********/
00968 
00969 /*
00970  *  Document-class: Fiber
00971  *
00972  *  Fibers are primitives for implementing light weight cooperative
00973  *  concurrency in Ruby. Basically they are a means of creating code blocks
00974  *  that can be paused and resumed, much like threads. The main difference
00975  *  is that they are never preempted and that the scheduling must be done by
00976  *  the programmer and not the VM.
00977  *
00978  *  As opposed to other stackless light weight concurrency models, each fiber
00979  *  comes with a small 4KB stack. This enables the fiber to be paused from deeply
00980  *  nested function calls within the fiber block.
00981  *
00982  *  When a fiber is created it will not run automatically. Rather it must be
00983  *  be explicitly asked to run using the <code>Fiber#resume</code> method.
00984  *  The code running inside the fiber can give up control by calling
00985  *  <code>Fiber.yield</code> in which case it yields control back to caller
00986  *  (the caller of the <code>Fiber#resume</code>).
00987  *
00988  *  Upon yielding or termination the Fiber returns the value of the last
00989  *  executed expression
00990  *
00991  *  For instance:
00992  *
00993  *    fiber = Fiber.new do
00994  *      Fiber.yield 1
00995  *      2
00996  *    end
00997  *
00998  *    puts fiber.resume
00999  *    puts fiber.resume
01000  *    puts fiber.resume
01001  *
01002  *  <em>produces</em>
01003  *
01004  *    1
01005  *    2
01006  *    FiberError: dead fiber called
01007  *
01008  *  The <code>Fiber#resume</code> method accepts an arbitrary number of
01009  *  parameters, if it is the first call to <code>resume</code> then they
01010  *  will be passed as block arguments. Otherwise they will be the return
01011  *  value of the call to <code>Fiber.yield</code>
01012  *
01013  *  Example:
01014  *
01015  *    fiber = Fiber.new do |first|
01016  *      second = Fiber.yield first + 2
01017  *    end
01018  *
01019  *    puts fiber.resume 10
01020  *    puts fiber.resume 14
01021  *    puts fiber.resume 18
01022  *
01023  *  <em>produces</em>
01024  *
01025  *    12
01026  *    14
01027  *    FiberError: dead fiber called
01028  *
01029  */
01030 
01031 static const rb_data_type_t fiber_data_type = {
01032     "fiber",
01033     {fiber_mark, fiber_free, fiber_memsize,},
01034 };
01035 
01036 static VALUE
01037 fiber_alloc(VALUE klass)
01038 {
01039     return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
01040 }
01041 
01042 static rb_fiber_t*
01043 fiber_t_alloc(VALUE fibval)
01044 {
01045     rb_fiber_t *fib;
01046     rb_thread_t *th = GET_THREAD();
01047 
01048     if (DATA_PTR(fibval) != 0) {
01049         rb_raise(rb_eRuntimeError, "cannot initialize twice");
01050     }
01051 
01052     THREAD_MUST_BE_RUNNING(th);
01053     fib = ALLOC(rb_fiber_t);
01054     memset(fib, 0, sizeof(rb_fiber_t));
01055     fib->cont.self = fibval;
01056     fib->cont.type = FIBER_CONTEXT;
01057     cont_init(&fib->cont, th);
01058     fib->prev = Qnil;
01059     fib->status = CREATED;
01060 
01061     DATA_PTR(fibval) = fib;
01062 
01063     return fib;
01064 }
01065 
01066 static VALUE
01067 fiber_init(VALUE fibval, VALUE proc)
01068 {
01069     rb_fiber_t *fib = fiber_t_alloc(fibval);
01070     rb_context_t *cont = &fib->cont;
01071     rb_thread_t *th = &cont->saved_thread;
01072 
01073     /* initialize cont */
01074     cont->vm_stack = 0;
01075 
01076     th->stack = 0;
01077     th->stack_size = 0;
01078 
01079     fiber_link_join(fib);
01080 
01081     th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
01082     th->stack = ALLOC_N(VALUE, th->stack_size);
01083 
01084     th->cfp = (void *)(th->stack + th->stack_size);
01085     th->cfp--;
01086     th->cfp->pc = 0;
01087     th->cfp->sp = th->stack + 1;
01088 #if VM_DEBUG_BP_CHECK
01089     th->cfp->bp_check = 0;
01090 #endif
01091     th->cfp->ep = th->stack;
01092     *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
01093     th->cfp->self = Qnil;
01094     th->cfp->klass = Qnil;
01095     th->cfp->flag = 0;
01096     th->cfp->iseq = 0;
01097     th->cfp->proc = 0;
01098     th->cfp->block_iseq = 0;
01099     th->cfp->me = 0;
01100     th->tag = 0;
01101     th->local_storage = st_init_numtable();
01102 
01103     th->first_proc = proc;
01104 
01105 #if !FIBER_USE_NATIVE
01106     MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
01107 #endif
01108 
01109     return fibval;
01110 }
01111 
01112 /* :nodoc: */
01113 static VALUE
01114 rb_fiber_init(VALUE fibval)
01115 {
01116     return fiber_init(fibval, rb_block_proc());
01117 }
01118 
01119 VALUE
01120 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
01121 {
01122     return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
01123 }
01124 
01125 static VALUE
01126 return_fiber(void)
01127 {
01128     rb_fiber_t *fib;
01129     VALUE curr = rb_fiber_current();
01130     VALUE prev;
01131     GetFiberPtr(curr, fib);
01132 
01133     prev = fib->prev;
01134     if (NIL_P(prev)) {
01135         const VALUE root_fiber = GET_THREAD()->root_fiber;
01136 
01137         if (root_fiber == curr) {
01138             rb_raise(rb_eFiberError, "can't yield from root fiber");
01139         }
01140         return root_fiber;
01141     }
01142     else {
01143         fib->prev = Qnil;
01144         return prev;
01145     }
01146 }
01147 
01148 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
01149 
01150 static void
01151 rb_fiber_terminate(rb_fiber_t *fib)
01152 {
01153     VALUE value = fib->cont.value;
01154     fib->status = TERMINATED;
01155 #if FIBER_USE_NATIVE && !defined(_WIN32)
01156     /* Ruby must not switch to other thread until storing terminated_machine_stack */
01157     terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp;
01158     terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE);
01159     fib->context.uc_stack.ss_sp = NULL;
01160     fib->cont.machine_stack = NULL;
01161     fib->cont.machine_stack_size = 0;
01162 #endif
01163     rb_fiber_transfer(return_fiber(), 1, &value);
01164 }
01165 
01166 void
01167 rb_fiber_start(void)
01168 {
01169     rb_thread_t *th = GET_THREAD();
01170     rb_fiber_t *fib;
01171     rb_context_t *cont;
01172     rb_proc_t *proc;
01173     int state;
01174 
01175     GetFiberPtr(th->fiber, fib);
01176     cont = &fib->cont;
01177 
01178     TH_PUSH_TAG(th);
01179     if ((state = EXEC_TAG()) == 0) {
01180         int argc;
01181         VALUE *argv, args;
01182         GetProcPtr(cont->saved_thread.first_proc, proc);
01183         args = cont->value;
01184         argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
01185         cont->value = Qnil;
01186         th->errinfo = Qnil;
01187         th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
01188         th->root_svar = Qnil;
01189 
01190         fib->status = RUNNING;
01191         cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
01192     }
01193     TH_POP_TAG();
01194 
01195     if (state) {
01196         if (state == TAG_RAISE || state == TAG_FATAL) {
01197             rb_threadptr_pending_interrupt_enque(th, th->errinfo);
01198         }
01199         else {
01200             VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
01201             if (!NIL_P(err))
01202                 rb_threadptr_pending_interrupt_enque(th, err);
01203         }
01204         RUBY_VM_SET_INTERRUPT(th);
01205     }
01206 
01207     rb_fiber_terminate(fib);
01208     rb_bug("rb_fiber_start: unreachable");
01209 }
01210 
01211 static rb_fiber_t *
01212 root_fiber_alloc(rb_thread_t *th)
01213 {
01214     rb_fiber_t *fib;
01215     /* no need to allocate vm stack */
01216     fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
01217     fib->cont.type = ROOT_FIBER_CONTEXT;
01218 #if FIBER_USE_NATIVE
01219 #ifdef _WIN32
01220     fib->fib_handle = ConvertThreadToFiber(0);
01221 #endif
01222 #endif
01223     fib->status = RUNNING;
01224     fib->prev_fiber = fib->next_fiber = fib;
01225 
01226     return fib;
01227 }
01228 
01229 VALUE
01230 rb_fiber_current(void)
01231 {
01232     rb_thread_t *th = GET_THREAD();
01233     if (th->fiber == 0) {
01234         /* save root */
01235         rb_fiber_t *fib = root_fiber_alloc(th);
01236         th->root_fiber = th->fiber = fib->cont.self;
01237     }
01238     return th->fiber;
01239 }
01240 
01241 static VALUE
01242 fiber_store(rb_fiber_t *next_fib)
01243 {
01244     rb_thread_t *th = GET_THREAD();
01245     rb_fiber_t *fib;
01246 
01247     if (th->fiber) {
01248         GetFiberPtr(th->fiber, fib);
01249         cont_save_thread(&fib->cont, th);
01250     }
01251     else {
01252         /* create current fiber */
01253         fib = root_fiber_alloc(th);
01254         th->root_fiber = th->fiber = fib->cont.self;
01255     }
01256 
01257 #if !FIBER_USE_NATIVE
01258     cont_save_machine_stack(th, &fib->cont);
01259 #endif
01260 
01261     if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
01262 #if FIBER_USE_NATIVE
01263         fiber_setcontext(next_fib, fib);
01264 #ifndef _WIN32
01265         if (terminated_machine_stack.ptr) {
01266             if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) {
01267                 machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
01268                 machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
01269                 machine_stack_cache_index++;
01270             }
01271             else {
01272                 if (terminated_machine_stack.ptr != fib->cont.machine_stack) {
01273                     munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
01274                 }
01275                 else {
01276                     rb_bug("terminated fiber resumed");
01277                 }
01278             }
01279             terminated_machine_stack.ptr = NULL;
01280             terminated_machine_stack.size = 0;
01281         }
01282 #endif
01283 #endif
01284         /* restored */
01285         GetFiberPtr(th->fiber, fib);
01286         if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
01287         return fib->cont.value;
01288     }
01289 #if !FIBER_USE_NATIVE
01290     else {
01291         return Qundef;
01292     }
01293 #endif
01294 }
01295 
01296 static inline VALUE
01297 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
01298 {
01299     VALUE value;
01300     rb_fiber_t *fib;
01301     rb_context_t *cont;
01302     rb_thread_t *th = GET_THREAD();
01303 
01304     GetFiberPtr(fibval, fib);
01305     cont = &fib->cont;
01306 
01307     if (th->fiber == fibval) {
01308         /* ignore fiber context switch
01309          * because destination fiber is same as current fiber
01310          */
01311         return make_passing_arg(argc, argv);
01312     }
01313 
01314     if (cont->saved_thread.self != th->self) {
01315         rb_raise(rb_eFiberError, "fiber called across threads");
01316     }
01317     else if (cont->saved_thread.protect_tag != th->protect_tag) {
01318         rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
01319     }
01320     else if (fib->status == TERMINATED) {
01321         value = rb_exc_new2(rb_eFiberError, "dead fiber called");
01322         if (th->fiber != fibval) {
01323             GetFiberPtr(th->fiber, fib);
01324             if (fib->status != TERMINATED) rb_exc_raise(value);
01325             fibval = th->root_fiber;
01326         }
01327         else {
01328             fibval = fib->prev;
01329             if (NIL_P(fibval)) fibval = th->root_fiber;
01330         }
01331         GetFiberPtr(fibval, fib);
01332         cont = &fib->cont;
01333         cont->argc = -1;
01334         cont->value = value;
01335 #if FIBER_USE_NATIVE
01336         {
01337             VALUE oldfibval;
01338             rb_fiber_t *oldfib;
01339             oldfibval = rb_fiber_current();
01340             GetFiberPtr(oldfibval, oldfib);
01341             fiber_setcontext(fib, oldfib);
01342         }
01343 #else
01344         cont_restore_0(cont, &value);
01345 #endif
01346     }
01347 
01348     if (is_resume) {
01349         fib->prev = rb_fiber_current();
01350     }
01351     else {
01352         /* restore `tracing' context. see [Feature #4347] */
01353         th->trace_arg = cont->saved_thread.trace_arg;
01354     }
01355 
01356     cont->argc = argc;
01357     cont->value = make_passing_arg(argc, argv);
01358 
01359     value = fiber_store(fib);
01360 #if !FIBER_USE_NATIVE
01361     if (value == Qundef) {
01362         cont_restore_0(cont, &value);
01363         rb_bug("rb_fiber_resume: unreachable");
01364     }
01365 #endif
01366     RUBY_VM_CHECK_INTS(th);
01367 
01368     return value;
01369 }
01370 
01371 VALUE
01372 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
01373 {
01374     return fiber_switch(fib, argc, argv, 0);
01375 }
01376 
01377 VALUE
01378 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
01379 {
01380     rb_fiber_t *fib;
01381     GetFiberPtr(fibval, fib);
01382 
01383     if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
01384         rb_raise(rb_eFiberError, "double resume");
01385     }
01386     if (fib->transfered != 0) {
01387         rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
01388     }
01389 
01390     return fiber_switch(fibval, argc, argv, 1);
01391 }
01392 
01393 VALUE
01394 rb_fiber_yield(int argc, VALUE *argv)
01395 {
01396     return rb_fiber_transfer(return_fiber(), argc, argv);
01397 }
01398 
01399 void
01400 rb_fiber_reset_root_local_storage(VALUE thval)
01401 {
01402     rb_thread_t *th;
01403     rb_fiber_t  *fib;
01404 
01405     GetThreadPtr(thval, th);
01406     if (th->root_fiber && th->root_fiber != th->fiber) {
01407         GetFiberPtr(th->root_fiber, fib);
01408         th->local_storage = fib->cont.saved_thread.local_storage;
01409     }
01410 }
01411 
01412 /*
01413  *  call-seq:
01414  *     fiber.alive? -> true or false
01415  *
01416  *  Returns true if the fiber can still be resumed (or transferred
01417  *  to). After finishing execution of the fiber block this method will
01418  *  always return false. You need to <code>require 'fiber'</code>
01419  *  before using this method.
01420  */
01421 VALUE
01422 rb_fiber_alive_p(VALUE fibval)
01423 {
01424     rb_fiber_t *fib;
01425     GetFiberPtr(fibval, fib);
01426     return fib->status != TERMINATED ? Qtrue : Qfalse;
01427 }
01428 
01429 /*
01430  *  call-seq:
01431  *     fiber.resume(args, ...) -> obj
01432  *
01433  *  Resumes the fiber from the point at which the last <code>Fiber.yield</code>
01434  *  was called, or starts running it if it is the first call to
01435  *  <code>resume</code>. Arguments passed to resume will be the value of
01436  *  the <code>Fiber.yield</code> expression or will be passed as block
01437  *  parameters to the fiber's block if this is the first <code>resume</code>.
01438  *
01439  *  Alternatively, when resume is called it evaluates to the arguments passed
01440  *  to the next <code>Fiber.yield</code> statement inside the fiber's block
01441  *  or to the block value if it runs to completion without any
01442  *  <code>Fiber.yield</code>
01443  */
01444 static VALUE
01445 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
01446 {
01447     return rb_fiber_resume(fib, argc, argv);
01448 }
01449 
01450 /*
01451  *  call-seq:
01452  *     fiber.transfer(args, ...) -> obj
01453  *
01454  *  Transfer control to another fiber, resuming it from where it last
01455  *  stopped or starting it if it was not resumed before. The calling
01456  *  fiber will be suspended much like in a call to
01457  *  <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
01458  *  before using this method.
01459  *
01460  *  The fiber which receives the transfer call is treats it much like
01461  *  a resume call. Arguments passed to transfer are treated like those
01462  *  passed to resume.
01463  *
01464  *  You cannot resume a fiber that transferred control to another one.
01465  *  This will cause a double resume error. You need to transfer control
01466  *  back to this fiber before it can yield and resume.
01467  *
01468  *  Example:
01469  *
01470  *    fiber1 = Fiber.new do
01471  *      puts "In Fiber 1"
01472  *      Fiber.yield
01473  *    end
01474  *
01475  *    fiber2 = Fiber.new do
01476  *      puts "In Fiber 2"
01477  *      fiber1.transfer
01478  *      puts "Never see this message"
01479  *    end
01480  *
01481  *    fiber3 = Fiber.new do
01482  *      puts "In Fiber 3"
01483  *    end
01484  *
01485  *    fiber2.resume
01486  *    fiber3.resume
01487  *
01488  *    <em>produces</em>
01489  *
01490  *    In fiber 2
01491  *    In fiber 1
01492  *    In fiber 3
01493  *
01494  */
01495 static VALUE
01496 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
01497 {
01498     rb_fiber_t *fib;
01499     GetFiberPtr(fibval, fib);
01500     fib->transfered = 1;
01501     return rb_fiber_transfer(fibval, argc, argv);
01502 }
01503 
01504 /*
01505  *  call-seq:
01506  *     Fiber.yield(args, ...) -> obj
01507  *
01508  *  Yields control back to the context that resumed the fiber, passing
01509  *  along any arguments that were passed to it. The fiber will resume
01510  *  processing at this point when <code>resume</code> is called next.
01511  *  Any arguments passed to the next <code>resume</code> will be the
01512  *  value that this <code>Fiber.yield</code> expression evaluates to.
01513  */
01514 static VALUE
01515 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
01516 {
01517     return rb_fiber_yield(argc, argv);
01518 }
01519 
01520 /*
01521  *  call-seq:
01522  *     Fiber.current() -> fiber
01523  *
01524  *  Returns the current fiber. You need to <code>require 'fiber'</code>
01525  *  before using this method. If you are not running in the context of
01526  *  a fiber this method will return the root fiber.
01527  */
01528 static VALUE
01529 rb_fiber_s_current(VALUE klass)
01530 {
01531     return rb_fiber_current();
01532 }
01533 
01534 
01535 
01536 /*
01537  *  Document-class: FiberError
01538  *
01539  *  Raised when an invalid operation is attempted on a Fiber, in
01540  *  particular when attempting to call/resume a dead fiber,
01541  *  attempting to yield from the root fiber, or calling a fiber across
01542  *  threads.
01543  *
01544  *     fiber = Fiber.new{}
01545  *     fiber.resume #=> nil
01546  *     fiber.resume #=> FiberError: dead fiber called
01547  */
01548 
01549 void
01550 Init_Cont(void)
01551 {
01552 #if FIBER_USE_NATIVE
01553     rb_thread_t *th = GET_THREAD();
01554 
01555 #ifdef _WIN32
01556     SYSTEM_INFO info;
01557     GetSystemInfo(&info);
01558     pagesize = info.dwPageSize;
01559 #else /* not WIN32 */
01560     pagesize = sysconf(_SC_PAGESIZE);
01561 #endif
01562     SET_MACHINE_STACK_END(&th->machine_stack_end);
01563 #endif
01564 
01565     rb_cFiber = rb_define_class("Fiber", rb_cObject);
01566     rb_define_alloc_func(rb_cFiber, fiber_alloc);
01567     rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
01568     rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
01569     rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
01570     rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
01571 }
01572 
01573 #if defined __GNUC__ && __GNUC__ >= 4
01574 #pragma GCC visibility push(default)
01575 #endif
01576 
01577 void
01578 ruby_Init_Continuation_body(void)
01579 {
01580     rb_cContinuation = rb_define_class("Continuation", rb_cObject);
01581     rb_undef_alloc_func(rb_cContinuation);
01582     rb_undef_method(CLASS_OF(rb_cContinuation), "new");
01583     rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
01584     rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
01585     rb_define_global_function("callcc", rb_callcc, 0);
01586 }
01587 
01588 void
01589 ruby_Init_Fiber_as_Coroutine(void)
01590 {
01591     rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
01592     rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
01593     rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
01594 }
01595 
01596 #if defined __GNUC__ && __GNUC__ >= 4
01597 #pragma GCC visibility pop
01598 #endif
01599