Ruby  2.0.0p481(2014-05-08revision45883)
thread.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   thread.c -
00004 
00005   $Author: nagachika $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 /*
00012   YARV Thread Design
00013 
00014   model 1: Userlevel Thread
00015     Same as traditional ruby thread.
00016 
00017   model 2: Native Thread with Global VM lock
00018     Using pthread (or Windows thread) and Ruby threads run concurrent.
00019 
00020   model 3: Native Thread with fine grain lock
00021     Using pthread and Ruby threads run concurrent or parallel.
00022 
00023 ------------------------------------------------------------------------
00024 
00025   model 2:
00026     A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
00027     When thread scheduling, running thread release GVL.  If running thread
00028     try blocking operation, this thread must release GVL and another
00029     thread can continue this flow.  After blocking operation, thread
00030     must check interrupt (RUBY_VM_CHECK_INTS).
00031 
00032     Every VM can run parallel.
00033 
00034     Ruby threads are scheduled by OS thread scheduler.
00035 
00036 ------------------------------------------------------------------------
00037 
00038   model 3:
00039     Every threads run concurrent or parallel and to access shared object
00040     exclusive access control is needed.  For example, to access String
00041     object or Array object, fine grain lock must be locked every time.
00042  */
00043 
00044 
00045 /*
00046  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
00047  * 2.15 or later and set _FORTIFY_SOURCE > 0.
00048  * However, the implementation is wrong. Even though Linux's select(2)
00049  * support large fd size (>FD_SETSIZE), it wrongly assume fd is always
00050  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
00051  * it doesn't work correctly and makes program abort. Therefore we need to
00052  * disable FORTY_SOURCE until glibc fixes it.
00053  */
00054 #undef _FORTIFY_SOURCE
00055 #undef __USE_FORTIFY_LEVEL
00056 #define __USE_FORTIFY_LEVEL 0
00057 
00058 /* for model 2 */
00059 
00060 #include "eval_intern.h"
00061 #include "gc.h"
00062 #include "internal.h"
00063 #include "ruby/io.h"
00064 #include "ruby/thread.h"
00065 
00066 #ifndef USE_NATIVE_THREAD_PRIORITY
00067 #define USE_NATIVE_THREAD_PRIORITY 0
00068 #define RUBY_THREAD_PRIORITY_MAX 3
00069 #define RUBY_THREAD_PRIORITY_MIN -3
00070 #endif
00071 
00072 #ifndef THREAD_DEBUG
00073 #define THREAD_DEBUG 0
00074 #endif
00075 
00076 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
00077 #define TIMET_MIN (~(time_t)0 <= 0 ? (time_t)(((unsigned_time_t)1) << (sizeof(time_t) * CHAR_BIT - 1)) : (time_t)0)
00078 
00079 VALUE rb_cMutex;
00080 VALUE rb_cThreadShield;
00081 
00082 static VALUE sym_immediate;
00083 static VALUE sym_on_blocking;
00084 static VALUE sym_never;
00085 
00086 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
00087 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
00088 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
00089 static double timeofday(void);
00090 static int rb_threadptr_dead(rb_thread_t *th);
00091 static void rb_check_deadlock(rb_vm_t *vm);
00092 static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th);
00093 
00094 #define eKillSignal INT2FIX(0)
00095 #define eTerminateSignal INT2FIX(1)
00096 static volatile int system_working = 1;
00097 
00098 #define closed_stream_error GET_VM()->special_exceptions[ruby_error_closed_stream]
00099 
00100 inline static void
00101 st_delete_wrap(st_table *table, st_data_t key)
00102 {
00103     st_delete(table, &key, 0);
00104 }
00105 
00106 /********************************************************************************/
00107 
00108 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00109 
00110 struct rb_blocking_region_buffer {
00111     enum rb_thread_status prev_status;
00112     struct rb_unblock_callback oldubf;
00113 };
00114 
00115 static int set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00116                                 struct rb_unblock_callback *old, int fail_if_interrupted);
00117 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
00118 
00119 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
00120                                         rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
00121 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
00122 
00123 #ifdef __ia64
00124 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)          \
00125     do{(th)->machine_register_stack_end = rb_ia64_bsp();}while(0)
00126 #else
00127 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
00128 #endif
00129 #define RB_GC_SAVE_MACHINE_CONTEXT(th)                          \
00130     do {                                                        \
00131         FLUSH_REGISTER_WINDOWS;                                 \
00132         RB_GC_SAVE_MACHINE_REGISTER_STACK(th);                  \
00133         setjmp((th)->machine_regs);                             \
00134         SET_MACHINE_STACK_END(&(th)->machine_stack_end);        \
00135     } while (0)
00136 
00137 #define GVL_UNLOCK_BEGIN() do { \
00138   rb_thread_t *_th_stored = GET_THREAD(); \
00139   RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
00140   gvl_release(_th_stored->vm);
00141 
00142 #define GVL_UNLOCK_END() \
00143   gvl_acquire(_th_stored->vm, _th_stored); \
00144   rb_thread_set_current(_th_stored); \
00145 } while(0)
00146 
00147 #ifdef __GNUC__
00148 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
00149 #else
00150 #define only_if_constant(expr, notconst) notconst
00151 #endif
00152 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
00153     rb_thread_t *__th = GET_THREAD(); \
00154     struct rb_blocking_region_buffer __region; \
00155     if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
00156         /* always return true unless fail_if_interrupted */ \
00157         !only_if_constant(fail_if_interrupted, TRUE)) { \
00158         exec; \
00159         blocking_region_end(__th, &__region); \
00160     }; \
00161 } while(0)
00162 
00163 #if THREAD_DEBUG
00164 #ifdef HAVE_VA_ARGS_MACRO
00165 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
00166 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
00167 #define POSITION_FORMAT "%s:%d:"
00168 #define POSITION_ARGS ,file, line
00169 #else
00170 void rb_thread_debug(const char *fmt, ...);
00171 #define thread_debug rb_thread_debug
00172 #define POSITION_FORMAT
00173 #define POSITION_ARGS
00174 #endif
00175 
00176 # if THREAD_DEBUG < 0
00177 static int rb_thread_debug_enabled;
00178 
00179 /*
00180  *  call-seq:
00181  *     Thread.DEBUG     -> num
00182  *
00183  *  Returns the thread debug level.  Available only if compiled with
00184  *  THREAD_DEBUG=-1.
00185  */
00186 
00187 static VALUE
00188 rb_thread_s_debug(void)
00189 {
00190     return INT2NUM(rb_thread_debug_enabled);
00191 }
00192 
00193 /*
00194  *  call-seq:
00195  *     Thread.DEBUG = num
00196  *
00197  *  Sets the thread debug level.  Available only if compiled with
00198  *  THREAD_DEBUG=-1.
00199  */
00200 
00201 static VALUE
00202 rb_thread_s_debug_set(VALUE self, VALUE val)
00203 {
00204     rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
00205     return val;
00206 }
00207 # else
00208 # define rb_thread_debug_enabled THREAD_DEBUG
00209 # endif
00210 #else
00211 #define thread_debug if(0)printf
00212 #endif
00213 
00214 #ifndef __ia64
00215 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
00216 #endif
00217 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
00218                                         VALUE *register_stack_start));
00219 static void timer_thread_function(void *);
00220 
00221 #if   defined(_WIN32)
00222 #include "thread_win32.c"
00223 
00224 #define DEBUG_OUT() \
00225   WaitForSingleObject(&debug_mutex, INFINITE); \
00226   printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
00227   fflush(stdout); \
00228   ReleaseMutex(&debug_mutex);
00229 
00230 #elif defined(HAVE_PTHREAD_H)
00231 #include "thread_pthread.c"
00232 
00233 #define DEBUG_OUT() \
00234   pthread_mutex_lock(&debug_mutex); \
00235   printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
00236   fflush(stdout); \
00237   pthread_mutex_unlock(&debug_mutex);
00238 
00239 #else
00240 #error "unsupported thread type"
00241 #endif
00242 
00243 #if THREAD_DEBUG
00244 static int debug_mutex_initialized = 1;
00245 static rb_thread_lock_t debug_mutex;
00246 
00247 void
00248 rb_thread_debug(
00249 #ifdef HAVE_VA_ARGS_MACRO
00250     const char *file, int line,
00251 #endif
00252     const char *fmt, ...)
00253 {
00254     va_list args;
00255     char buf[BUFSIZ];
00256 
00257     if (!rb_thread_debug_enabled) return;
00258 
00259     if (debug_mutex_initialized == 1) {
00260         debug_mutex_initialized = 0;
00261         native_mutex_initialize(&debug_mutex);
00262     }
00263 
00264     va_start(args, fmt);
00265     vsnprintf(buf, BUFSIZ, fmt, args);
00266     va_end(args);
00267 
00268     DEBUG_OUT();
00269 }
00270 #endif
00271 
00272 void
00273 rb_vm_gvl_destroy(rb_vm_t *vm)
00274 {
00275     gvl_release(vm);
00276     gvl_destroy(vm);
00277     native_mutex_destroy(&vm->thread_destruct_lock);
00278 }
00279 
00280 void
00281 rb_thread_lock_unlock(rb_thread_lock_t *lock)
00282 {
00283     native_mutex_unlock(lock);
00284 }
00285 
00286 void
00287 rb_thread_lock_destroy(rb_thread_lock_t *lock)
00288 {
00289     native_mutex_destroy(lock);
00290 }
00291 
00292 static int
00293 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
00294                      struct rb_unblock_callback *old, int fail_if_interrupted)
00295 {
00296   check_ints:
00297     if (fail_if_interrupted) {
00298         if (RUBY_VM_INTERRUPTED_ANY(th)) {
00299             return FALSE;
00300         }
00301     }
00302     else {
00303         RUBY_VM_CHECK_INTS(th);
00304     }
00305 
00306     native_mutex_lock(&th->interrupt_lock);
00307     if (RUBY_VM_INTERRUPTED_ANY(th)) {
00308         native_mutex_unlock(&th->interrupt_lock);
00309         goto check_ints;
00310     }
00311     else {
00312         if (old) *old = th->unblock;
00313         th->unblock.func = func;
00314         th->unblock.arg = arg;
00315     }
00316     native_mutex_unlock(&th->interrupt_lock);
00317 
00318     return TRUE;
00319 }
00320 
00321 static void
00322 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
00323 {
00324     native_mutex_lock(&th->interrupt_lock);
00325     th->unblock = *old;
00326     native_mutex_unlock(&th->interrupt_lock);
00327 }
00328 
00329 static void
00330 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
00331 {
00332     native_mutex_lock(&th->interrupt_lock);
00333     if (trap)
00334         RUBY_VM_SET_TRAP_INTERRUPT(th);
00335     else
00336         RUBY_VM_SET_INTERRUPT(th);
00337     if (th->unblock.func) {
00338         (th->unblock.func)(th->unblock.arg);
00339     }
00340     else {
00341         /* none */
00342     }
00343     native_mutex_unlock(&th->interrupt_lock);
00344 }
00345 
00346 void
00347 rb_threadptr_interrupt(rb_thread_t *th)
00348 {
00349     rb_threadptr_interrupt_common(th, 0);
00350 }
00351 
00352 void
00353 rb_threadptr_trap_interrupt(rb_thread_t *th)
00354 {
00355     rb_threadptr_interrupt_common(th, 1);
00356 }
00357 
00358 static int
00359 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
00360 {
00361     VALUE thval = key;
00362     rb_thread_t *th;
00363     GetThreadPtr(thval, th);
00364 
00365     if (th != main_thread) {
00366         thread_debug("terminate_i: %p\n", (void *)th);
00367         rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
00368         rb_threadptr_interrupt(th);
00369     }
00370     else {
00371         thread_debug("terminate_i: main thread (%p)\n", (void *)th);
00372     }
00373     return ST_CONTINUE;
00374 }
00375 
00376 typedef struct rb_mutex_struct
00377 {
00378     rb_thread_lock_t lock;
00379     rb_thread_cond_t cond;
00380     struct rb_thread_struct volatile *th;
00381     int cond_waiting;
00382     struct rb_mutex_struct *next_mutex;
00383     int allow_trap;
00384 } rb_mutex_t;
00385 
00386 static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
00387 static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
00388 static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
00389 static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th);
00390 
00391 void
00392 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
00393 {
00394     const char *err;
00395     rb_mutex_t *mutex;
00396     rb_mutex_t *mutexes = th->keeping_mutexes;
00397 
00398     while (mutexes) {
00399         mutex = mutexes;
00400         /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
00401                 mutexes); */
00402         mutexes = mutex->next_mutex;
00403         err = rb_mutex_unlock_th(mutex, th);
00404         if (err) rb_bug("invalid keeping_mutexes: %s", err);
00405     }
00406 }
00407 
00408 void
00409 rb_thread_terminate_all(void)
00410 {
00411     rb_thread_t *th = GET_THREAD(); /* main thread */
00412     rb_vm_t *vm = th->vm;
00413 
00414     if (vm->main_thread != th) {
00415         rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
00416                (void *)vm->main_thread, (void *)th);
00417     }
00418 
00419     /* unlock all locking mutexes */
00420     rb_threadptr_unlock_all_locking_mutexes(th);
00421 
00422   retry:
00423     thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
00424     st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
00425 
00426     while (!rb_thread_alone()) {
00427         int state;
00428 
00429         TH_PUSH_TAG(th);
00430         if ((state = TH_EXEC_TAG()) == 0) {
00431             native_sleep(th, 0);
00432             RUBY_VM_CHECK_INTS_BLOCKING(th);
00433         }
00434         TH_POP_TAG();
00435 
00436         if (state) {
00437             goto retry;
00438         }
00439     }
00440 }
00441 
00442 static void
00443 thread_cleanup_func_before_exec(void *th_ptr)
00444 {
00445     rb_thread_t *th = th_ptr;
00446     th->status = THREAD_KILLED;
00447     th->machine_stack_start = th->machine_stack_end = 0;
00448 #ifdef __ia64
00449     th->machine_register_stack_start = th->machine_register_stack_end = 0;
00450 #endif
00451 }
00452 
00453 static void
00454 thread_cleanup_func(void *th_ptr, int atfork)
00455 {
00456     rb_thread_t *th = th_ptr;
00457 
00458     th->locking_mutex = Qfalse;
00459     thread_cleanup_func_before_exec(th_ptr);
00460 
00461     /*
00462      * Unfortunately, we can't release native threading resource at fork
00463      * because libc may have unstable locking state therefore touching
00464      * a threading resource may cause a deadlock.
00465      */
00466     if (atfork)
00467         return;
00468 
00469     native_mutex_destroy(&th->interrupt_lock);
00470     native_thread_destroy(th);
00471 }
00472 
00473 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
00474 
00475 void
00476 ruby_thread_init_stack(rb_thread_t *th)
00477 {
00478     native_thread_init_stack(th);
00479 }
00480 
00481 static int
00482 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
00483 {
00484     int state;
00485     VALUE args = th->first_args;
00486     rb_proc_t *proc;
00487     rb_thread_list_t *join_list;
00488     rb_thread_t *main_th;
00489     VALUE errinfo = Qnil;
00490 # ifdef USE_SIGALTSTACK
00491     void rb_register_sigaltstack(rb_thread_t *th);
00492 
00493     rb_register_sigaltstack(th);
00494 # endif
00495 
00496     if (th == th->vm->main_thread)
00497         rb_bug("thread_start_func_2 must not used for main thread");
00498 
00499     ruby_thread_set_native(th);
00500 
00501     th->machine_stack_start = stack_start;
00502 #ifdef __ia64
00503     th->machine_register_stack_start = register_stack_start;
00504 #endif
00505     thread_debug("thread start: %p\n", (void *)th);
00506 
00507     gvl_acquire(th->vm, th);
00508     {
00509         thread_debug("thread start (get lock): %p\n", (void *)th);
00510         rb_thread_set_current(th);
00511 
00512         TH_PUSH_TAG(th);
00513         if ((state = EXEC_TAG()) == 0) {
00514             SAVE_ROOT_JMPBUF(th, {
00515                 if (!th->first_func) {
00516                     GetProcPtr(th->first_proc, proc);
00517                     th->errinfo = Qnil;
00518                     th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
00519                     th->root_svar = Qnil;
00520                     EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
00521                     th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_PTR(args), 0);
00522                     EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
00523                 }
00524                 else {
00525                     th->value = (*th->first_func)((void *)args);
00526                 }
00527             });
00528         }
00529         else {
00530             errinfo = th->errinfo;
00531             if (state == TAG_FATAL) {
00532                 /* fatal error within this thread, need to stop whole script */
00533             }
00534             else if (th->safe_level >= 4) {
00535                 /* Ignore it. Main thread shouldn't be harmed from untrusted thread. */
00536                 errinfo = Qnil;
00537             }
00538             else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
00539                 /* exit on main_thread. */
00540             }
00541             else if (th->vm->thread_abort_on_exception ||
00542                      th->abort_on_exception || RTEST(ruby_debug)) {
00543                 /* exit on main_thread */
00544             }
00545             else {
00546                 errinfo = Qnil;
00547             }
00548             th->value = Qnil;
00549         }
00550 
00551         th->status = THREAD_KILLED;
00552         thread_debug("thread end: %p\n", (void *)th);
00553 
00554         main_th = th->vm->main_thread;
00555         if (RB_TYPE_P(errinfo, T_OBJECT)) {
00556             /* treat with normal error object */
00557             rb_threadptr_raise(main_th, 1, &errinfo);
00558         }
00559         TH_POP_TAG();
00560 
00561         /* locking_mutex must be Qfalse */
00562         if (th->locking_mutex != Qfalse) {
00563             rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
00564                    (void *)th, th->locking_mutex);
00565         }
00566 
00567         /* delete self other than main thread from living_threads */
00568         st_delete_wrap(th->vm->living_threads, th->self);
00569         if (rb_thread_alone()) {
00570             /* I'm last thread. wake up main thread from rb_thread_terminate_all */
00571             rb_threadptr_interrupt(main_th);
00572         }
00573 
00574         /* wake up joining threads */
00575         join_list = th->join_list;
00576         while (join_list) {
00577             rb_threadptr_interrupt(join_list->th);
00578             switch (join_list->th->status) {
00579               case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
00580                 join_list->th->status = THREAD_RUNNABLE;
00581               default: break;
00582             }
00583             join_list = join_list->next;
00584         }
00585 
00586         rb_threadptr_unlock_all_locking_mutexes(th);
00587         rb_check_deadlock(th->vm);
00588 
00589         if (!th->root_fiber) {
00590             rb_thread_recycle_stack_release(th->stack);
00591             th->stack = 0;
00592         }
00593     }
00594     native_mutex_lock(&th->vm->thread_destruct_lock);
00595     /* make sure vm->running_thread never point me after this point.*/
00596     th->vm->running_thread = NULL;
00597     native_mutex_unlock(&th->vm->thread_destruct_lock);
00598     thread_cleanup_func(th, FALSE);
00599     gvl_release(th->vm);
00600 
00601     return 0;
00602 }
00603 
00604 static VALUE
00605 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
00606 {
00607     rb_thread_t *th, *current_th = GET_THREAD();
00608     int err;
00609 
00610     if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
00611         rb_raise(rb_eThreadError,
00612                  "can't start a new thread (frozen ThreadGroup)");
00613     }
00614     GetThreadPtr(thval, th);
00615 
00616     /* setup thread environment */
00617     th->first_func = fn;
00618     th->first_proc = fn ? Qfalse : rb_block_proc();
00619     th->first_args = args; /* GC: shouldn't put before above line */
00620 
00621     th->priority = current_th->priority;
00622     th->thgroup = current_th->thgroup;
00623 
00624     th->pending_interrupt_queue = rb_ary_tmp_new(0);
00625     th->pending_interrupt_queue_checked = 0;
00626     th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
00627     RBASIC(th->pending_interrupt_mask_stack)->klass = 0;
00628 
00629     th->interrupt_mask = 0;
00630 
00631     native_mutex_initialize(&th->interrupt_lock);
00632 
00633     /* kick thread */
00634     err = native_thread_create(th);
00635     if (err) {
00636         th->status = THREAD_KILLED;
00637         rb_raise(rb_eThreadError, "can't create Thread (%d)", err);
00638     }
00639     st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
00640     return thval;
00641 }
00642 
00643 /*
00644  * call-seq:
00645  *  Thread.new { ... }                  -> thread
00646  *  Thread.new(*args, &proc)            -> thread
00647  *  Thread.new(*args) { |args| ... }    -> thread
00648  *
00649  *  Creates a new thread executing the given block.
00650  *
00651  *  Any +args+ given to ::new will be passed to the block:
00652  *
00653  *      arr = []
00654  *      a, b, c = 1, 2, 3
00655  *      Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
00656  *      arr #=> [1, 2, 3]
00657  *
00658  *  A ThreadError exception is raised if ::new is called without a block.
00659  *
00660  *  If you're going to subclass Thread, be sure to call super in your
00661  *  +initialize+ method, otherwise a ThreadError will be raised.
00662  */
00663 static VALUE
00664 thread_s_new(int argc, VALUE *argv, VALUE klass)
00665 {
00666     rb_thread_t *th;
00667     VALUE thread = rb_thread_alloc(klass);
00668 
00669     if (GET_VM()->main_thread->status == THREAD_KILLED)
00670         rb_raise(rb_eThreadError, "can't alloc thread");
00671 
00672     rb_obj_call_init(thread, argc, argv);
00673     GetThreadPtr(thread, th);
00674     if (!th->first_args) {
00675         rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
00676                  rb_class2name(klass));
00677     }
00678     return thread;
00679 }
00680 
00681 /*
00682  *  call-seq:
00683  *     Thread.start([args]*) {|args| block }   -> thread
00684  *     Thread.fork([args]*) {|args| block }    -> thread
00685  *
00686  *  Basically the same as ::new. However, if class Thread is subclassed, then
00687  *  calling +start+ in that subclass will not invoke the subclass's
00688  *  +initialize+ method.
00689  */
00690 
00691 static VALUE
00692 thread_start(VALUE klass, VALUE args)
00693 {
00694     return thread_create_core(rb_thread_alloc(klass), args, 0);
00695 }
00696 
00697 /* :nodoc: */
00698 static VALUE
00699 thread_initialize(VALUE thread, VALUE args)
00700 {
00701     rb_thread_t *th;
00702     if (!rb_block_given_p()) {
00703         rb_raise(rb_eThreadError, "must be called with a block");
00704     }
00705     GetThreadPtr(thread, th);
00706     if (th->first_args) {
00707         VALUE proc = th->first_proc, line, loc;
00708         const char *file;
00709         if (!proc || !RTEST(loc = rb_proc_location(proc))) {
00710             rb_raise(rb_eThreadError, "already initialized thread");
00711         }
00712         file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
00713         if (NIL_P(line = RARRAY_PTR(loc)[1])) {
00714             rb_raise(rb_eThreadError, "already initialized thread - %s",
00715                      file);
00716         }
00717         rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
00718                  file, NUM2INT(line));
00719     }
00720     return thread_create_core(thread, args, 0);
00721 }
00722 
00723 VALUE
00724 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
00725 {
00726     return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
00727 }
00728 
00729 
00730 /* +infty, for this purpose */
00731 #define DELAY_INFTY 1E30
00732 
00733 struct join_arg {
00734     rb_thread_t *target, *waiting;
00735     double limit;
00736     int forever;
00737 };
00738 
00739 static VALUE
00740 remove_from_join_list(VALUE arg)
00741 {
00742     struct join_arg *p = (struct join_arg *)arg;
00743     rb_thread_t *target_th = p->target, *th = p->waiting;
00744 
00745     if (target_th->status != THREAD_KILLED) {
00746         rb_thread_list_t **p = &target_th->join_list;
00747 
00748         while (*p) {
00749             if ((*p)->th == th) {
00750                 *p = (*p)->next;
00751                 break;
00752             }
00753             p = &(*p)->next;
00754         }
00755     }
00756 
00757     return Qnil;
00758 }
00759 
00760 static VALUE
00761 thread_join_sleep(VALUE arg)
00762 {
00763     struct join_arg *p = (struct join_arg *)arg;
00764     rb_thread_t *target_th = p->target, *th = p->waiting;
00765     double now, limit = p->limit;
00766 
00767     while (target_th->status != THREAD_KILLED) {
00768         if (p->forever) {
00769             sleep_forever(th, 1, 0);
00770         }
00771         else {
00772             now = timeofday();
00773             if (now > limit) {
00774                 thread_debug("thread_join: timeout (thid: %p)\n",
00775                              (void *)target_th->thread_id);
00776                 return Qfalse;
00777             }
00778             sleep_wait_for_interrupt(th, limit - now, 0);
00779         }
00780         thread_debug("thread_join: interrupted (thid: %p)\n",
00781                      (void *)target_th->thread_id);
00782     }
00783     return Qtrue;
00784 }
00785 
00786 static VALUE
00787 thread_join(rb_thread_t *target_th, double delay)
00788 {
00789     rb_thread_t *th = GET_THREAD();
00790     struct join_arg arg;
00791 
00792     if (th == target_th) {
00793         rb_raise(rb_eThreadError, "Target thread must not be current thread");
00794     }
00795     if (GET_VM()->main_thread == target_th) {
00796         rb_raise(rb_eThreadError, "Target thread must not be main thread");
00797     }
00798 
00799     arg.target = target_th;
00800     arg.waiting = th;
00801     arg.limit = timeofday() + delay;
00802     arg.forever = delay == DELAY_INFTY;
00803 
00804     thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
00805 
00806     if (target_th->status != THREAD_KILLED) {
00807         rb_thread_list_t list;
00808         list.next = target_th->join_list;
00809         list.th = th;
00810         target_th->join_list = &list;
00811         if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
00812                        remove_from_join_list, (VALUE)&arg)) {
00813             return Qnil;
00814         }
00815     }
00816 
00817     thread_debug("thread_join: success (thid: %p)\n",
00818                  (void *)target_th->thread_id);
00819 
00820     if (target_th->errinfo != Qnil) {
00821         VALUE err = target_th->errinfo;
00822 
00823         if (FIXNUM_P(err)) {
00824             /* */
00825         }
00826         else if (RB_TYPE_P(target_th->errinfo, T_NODE)) {
00827             rb_exc_raise(rb_vm_make_jump_tag_but_local_jump(
00828                 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
00829         }
00830         else {
00831             /* normal exception */
00832             rb_exc_raise(err);
00833         }
00834     }
00835     return target_th->self;
00836 }
00837 
00838 /*
00839  *  call-seq:
00840  *     thr.join          -> thr
00841  *     thr.join(limit)   -> thr
00842  *
00843  *  The calling thread will suspend execution and run <i>thr</i>. Does not
00844  *  return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
00845  *  the time limit expires, <code>nil</code> will be returned, otherwise
00846  *  <i>thr</i> is returned.
00847  *
00848  *  Any threads not joined will be killed when the main program exits.  If
00849  *  <i>thr</i> had previously raised an exception and the
00850  *  <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
00851  *  (so the exception has not yet been processed) it will be processed at this
00852  *  time.
00853  *
00854  *     a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
00855  *     x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
00856  *     x.join # Let x thread finish, a will be killed on exit.
00857  *
00858  *  <em>produces:</em>
00859  *
00860  *     axyz
00861  *
00862  *  The following example illustrates the <i>limit</i> parameter.
00863  *
00864  *     y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
00865  *     puts "Waiting" until y.join(0.15)
00866  *
00867  *  <em>produces:</em>
00868  *
00869  *     tick...
00870  *     Waiting
00871  *     tick...
00872  *     Waitingtick...
00873  *
00874  *
00875  *     tick...
00876  */
00877 
00878 static VALUE
00879 thread_join_m(int argc, VALUE *argv, VALUE self)
00880 {
00881     rb_thread_t *target_th;
00882     double delay = DELAY_INFTY;
00883     VALUE limit;
00884 
00885     GetThreadPtr(self, target_th);
00886 
00887     rb_scan_args(argc, argv, "01", &limit);
00888     if (!NIL_P(limit)) {
00889         delay = rb_num2dbl(limit);
00890     }
00891 
00892     return thread_join(target_th, delay);
00893 }
00894 
00895 /*
00896  *  call-seq:
00897  *     thr.value   -> obj
00898  *
00899  *  Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
00900  *  its value.
00901  *
00902  *     a = Thread.new { 2 + 2 }
00903  *     a.value   #=> 4
00904  */
00905 
00906 static VALUE
00907 thread_value(VALUE self)
00908 {
00909     rb_thread_t *th;
00910     GetThreadPtr(self, th);
00911     thread_join(th, DELAY_INFTY);
00912     return th->value;
00913 }
00914 
00915 /*
00916  * Thread Scheduling
00917  */
00918 
00919 static struct timeval
00920 double2timeval(double d)
00921 {
00922     struct timeval time;
00923 
00924     if (isinf(d)) {
00925         time.tv_sec = TIMET_MAX;
00926         time.tv_usec = 0;
00927         return time;
00928     }
00929 
00930     time.tv_sec = (int)d;
00931     time.tv_usec = (int)((d - (int)d) * 1e6);
00932     if (time.tv_usec < 0) {
00933         time.tv_usec += (int)1e6;
00934         time.tv_sec -= 1;
00935     }
00936     return time;
00937 }
00938 
00939 static void
00940 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
00941 {
00942     enum rb_thread_status prev_status = th->status;
00943     enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
00944 
00945     th->status = status;
00946     RUBY_VM_CHECK_INTS_BLOCKING(th);
00947     while (th->status == status) {
00948         if (deadlockable) {
00949             th->vm->sleeper++;
00950             rb_check_deadlock(th->vm);
00951         }
00952         native_sleep(th, 0);
00953         if (deadlockable) {
00954             th->vm->sleeper--;
00955         }
00956         RUBY_VM_CHECK_INTS_BLOCKING(th);
00957         if (!spurious_check)
00958             break;
00959     }
00960     th->status = prev_status;
00961 }
00962 
00963 static void
00964 getclockofday(struct timeval *tp)
00965 {
00966 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
00967     struct timespec ts;
00968 
00969     if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
00970         tp->tv_sec = ts.tv_sec;
00971         tp->tv_usec = ts.tv_nsec / 1000;
00972     } else
00973 #endif
00974     {
00975         gettimeofday(tp, NULL);
00976     }
00977 }
00978 
00979 static void
00980 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
00981 {
00982     struct timeval to, tvn;
00983     enum rb_thread_status prev_status = th->status;
00984 
00985     getclockofday(&to);
00986     if (TIMET_MAX - tv.tv_sec < to.tv_sec)
00987         to.tv_sec = TIMET_MAX;
00988     else
00989         to.tv_sec += tv.tv_sec;
00990     if ((to.tv_usec += tv.tv_usec) >= 1000000) {
00991         if (to.tv_sec == TIMET_MAX)
00992             to.tv_usec = 999999;
00993         else {
00994             to.tv_sec++;
00995             to.tv_usec -= 1000000;
00996         }
00997     }
00998 
00999     th->status = THREAD_STOPPED;
01000     RUBY_VM_CHECK_INTS_BLOCKING(th);
01001     while (th->status == THREAD_STOPPED) {
01002         native_sleep(th, &tv);
01003         RUBY_VM_CHECK_INTS_BLOCKING(th);
01004         getclockofday(&tvn);
01005         if (to.tv_sec < tvn.tv_sec) break;
01006         if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
01007         thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
01008                      (long)to.tv_sec, (long)to.tv_usec,
01009                      (long)tvn.tv_sec, (long)tvn.tv_usec);
01010         tv.tv_sec = to.tv_sec - tvn.tv_sec;
01011         if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
01012             --tv.tv_sec;
01013             tv.tv_usec += 1000000;
01014         }
01015         if (!spurious_check)
01016             break;
01017     }
01018     th->status = prev_status;
01019 }
01020 
01021 void
01022 rb_thread_sleep_forever(void)
01023 {
01024     thread_debug("rb_thread_sleep_forever\n");
01025     sleep_forever(GET_THREAD(), 0, 1);
01026 }
01027 
01028 static void
01029 rb_thread_sleep_deadly(void)
01030 {
01031     thread_debug("rb_thread_sleep_deadly\n");
01032     sleep_forever(GET_THREAD(), 1, 1);
01033 }
01034 
01035 static double
01036 timeofday(void)
01037 {
01038 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
01039     struct timespec tp;
01040 
01041     if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
01042         return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
01043     } else
01044 #endif
01045     {
01046         struct timeval tv;
01047         gettimeofday(&tv, NULL);
01048         return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
01049     }
01050 }
01051 
01052 static void
01053 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
01054 {
01055     sleep_timeval(th, double2timeval(sleepsec), spurious_check);
01056 }
01057 
01058 static void
01059 sleep_for_polling(rb_thread_t *th)
01060 {
01061     struct timeval time;
01062     time.tv_sec = 0;
01063     time.tv_usec = 100 * 1000;  /* 0.1 sec */
01064     sleep_timeval(th, time, 1);
01065 }
01066 
01067 void
01068 rb_thread_wait_for(struct timeval time)
01069 {
01070     rb_thread_t *th = GET_THREAD();
01071     sleep_timeval(th, time, 1);
01072 }
01073 
01074 void
01075 rb_thread_polling(void)
01076 {
01077     if (!rb_thread_alone()) {
01078         rb_thread_t *th = GET_THREAD();
01079         RUBY_VM_CHECK_INTS_BLOCKING(th);
01080         sleep_for_polling(th);
01081     }
01082 }
01083 
01084 /*
01085  * CAUTION: This function causes thread switching.
01086  *          rb_thread_check_ints() check ruby's interrupts.
01087  *          some interrupt needs thread switching/invoke handlers,
01088  *          and so on.
01089  */
01090 
01091 void
01092 rb_thread_check_ints(void)
01093 {
01094     RUBY_VM_CHECK_INTS_BLOCKING(GET_THREAD());
01095 }
01096 
01097 /*
01098  * Hidden API for tcl/tk wrapper.
01099  * There is no guarantee to perpetuate it.
01100  */
01101 int
01102 rb_thread_check_trap_pending(void)
01103 {
01104     return rb_signal_buff_size() != 0;
01105 }
01106 
01107 /* This function can be called in blocking region. */
01108 int
01109 rb_thread_interrupted(VALUE thval)
01110 {
01111     rb_thread_t *th;
01112     GetThreadPtr(thval, th);
01113     return (int)RUBY_VM_INTERRUPTED(th);
01114 }
01115 
01116 void
01117 rb_thread_sleep(int sec)
01118 {
01119     rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
01120 }
01121 
01122 static void
01123 rb_thread_schedule_limits(unsigned long limits_us)
01124 {
01125     thread_debug("rb_thread_schedule\n");
01126     if (!rb_thread_alone()) {
01127         rb_thread_t *th = GET_THREAD();
01128 
01129         if (th->running_time_us >= limits_us) {
01130             thread_debug("rb_thread_schedule/switch start\n");
01131             RB_GC_SAVE_MACHINE_CONTEXT(th);
01132             gvl_yield(th->vm, th);
01133             rb_thread_set_current(th);
01134             thread_debug("rb_thread_schedule/switch done\n");
01135         }
01136     }
01137 }
01138 
01139 void
01140 rb_thread_schedule(void)
01141 {
01142     rb_thread_t *cur_th = GET_THREAD();
01143     rb_thread_schedule_limits(0);
01144 
01145     if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(cur_th))) {
01146         rb_threadptr_execute_interrupts(cur_th, 0);
01147     }
01148 }
01149 
01150 /* blocking region */
01151 
01152 static inline int
01153 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
01154                       rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
01155 {
01156     region->prev_status = th->status;
01157     if (set_unblock_function(th, ubf, arg, &region->oldubf, fail_if_interrupted)) {
01158         th->blocking_region_buffer = region;
01159         th->status = THREAD_STOPPED;
01160         thread_debug("enter blocking region (%p)\n", (void *)th);
01161         RB_GC_SAVE_MACHINE_CONTEXT(th);
01162         gvl_release(th->vm);
01163         return TRUE;
01164     }
01165     else {
01166         return FALSE;
01167     }
01168 }
01169 
01170 static inline void
01171 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
01172 {
01173     gvl_acquire(th->vm, th);
01174     rb_thread_set_current(th);
01175     thread_debug("leave blocking region (%p)\n", (void *)th);
01176     remove_signal_thread_list(th);
01177     th->blocking_region_buffer = 0;
01178     reset_unblock_function(th, &region->oldubf);
01179     if (th->status == THREAD_STOPPED) {
01180         th->status = region->prev_status;
01181     }
01182 }
01183 
01184 struct rb_blocking_region_buffer *
01185 rb_thread_blocking_region_begin(void)
01186 {
01187     rb_thread_t *th = GET_THREAD();
01188     struct rb_blocking_region_buffer *region = ALLOC(struct rb_blocking_region_buffer);
01189     blocking_region_begin(th, region, ubf_select, th, FALSE);
01190     return region;
01191 }
01192 
01193 void
01194 rb_thread_blocking_region_end(struct rb_blocking_region_buffer *region)
01195 {
01196     int saved_errno = errno;
01197     rb_thread_t *th = ruby_thread_from_native();
01198     blocking_region_end(th, region);
01199     xfree(region);
01200     RUBY_VM_CHECK_INTS_BLOCKING(th);
01201     errno = saved_errno;
01202 }
01203 
01204 static void *
01205 call_without_gvl(void *(*func)(void *), void *data1,
01206                  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
01207 {
01208     void *val = 0;
01209 
01210     rb_thread_t *th = GET_THREAD();
01211     int saved_errno = 0;
01212 
01213     th->waiting_fd = -1;
01214     if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
01215         ubf = ubf_select;
01216         data2 = th;
01217     }
01218 
01219     BLOCKING_REGION({
01220         val = func(data1);
01221         saved_errno = errno;
01222     }, ubf, data2, fail_if_interrupted);
01223 
01224     if (!fail_if_interrupted) {
01225         RUBY_VM_CHECK_INTS_BLOCKING(th);
01226     }
01227 
01228     errno = saved_errno;
01229 
01230     return val;
01231 }
01232 
01233 /*
01234  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
01235  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
01236  *                               without interrupt proceess.
01237  *
01238  * rb_thread_call_without_gvl() does:
01239  *   (1) Check interrupts.
01240  *   (2) release GVL.
01241  *       Other Ruby threads may run in parallel.
01242  *   (3) call func with data1
01243  *   (4) acquire GVL.
01244  *       Other Ruby threads can not run in parallel any more.
01245  *   (5) Check interrupts.
01246  *
01247  * rb_thread_call_without_gvl2() does:
01248  *   (1) Check interrupt and return if interrupted.
01249  *   (2) release GVL.
01250  *   (3) call func with data1 and a pointer to the flags.
01251  *   (4) acquire GVL.
01252  *
01253  * If another thread interrupts this thread (Thread#kill, signal delivery,
01254  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
01255  * "un-blocking function").  `ubf()' should interrupt `func()' execution by
01256  * toggling a cancellation flag, canceling the invocation of a call inside
01257  * `func()' or similar.  Note that `ubf()' may not be called with the GVL.
01258  *
01259  * There are built-in ubfs and you can specify these ubfs:
01260  *
01261  * * RUBY_UBF_IO: ubf for IO operation
01262  * * RUBY_UBF_PROCESS: ubf for process operation
01263  *
01264  * However, we can not guarantee our built-in ubfs interrupt your `func()'
01265  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
01266  * provide proper ubf(), your program will not stop for Control+C or other
01267  * shutdown events.
01268  *
01269  * "Check interrupts" on above list means that check asynchronous
01270  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
01271  * request, and so on) and call corresponding procedures
01272  * (such as `trap' for signals, raise an exception for Thread#raise).
01273  * If `func()' finished and receive interrupts, you may skip interrupt
01274  * checking.  For example, assume the following func() it read data from file.
01275  *
01276  *   read_func(...) {
01277  *                   // (a) before read
01278  *     read(buffer); // (b) reading
01279  *                   // (c) after read
01280  *   }
01281  *
01282  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
01283  * `read_func()' and interrupts are checked. However, if an interrupt occurs
01284  * at (c), after *read* operation is completed, check intterrupts is harmful
01285  * because it causes irrevocable side-effect, the read data will vanish.  To
01286  * avoid such problem, the `read_func()' should be used with
01287  * `rb_thread_call_without_gvl2()'.
01288  *
01289  * If `rb_thread_call_without_gvl2()' detects interrupt, return its execution
01290  * immediately. This function does not show when the execution was interrupted.
01291  * For example, there are 4 possible timing (a), (b), (c) and before calling
01292  * read_func(). You need to record progress of a read_func() and check
01293  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
01294  * `rb_thread_check_ints()' correctly or your program can not process proper
01295  * process such as `trap' and so on.
01296  *
01297  * NOTE: You can not execute most of Ruby C API and touch Ruby
01298  *       objects in `func()' and `ubf()', including raising an
01299  *       exception, because current thread doesn't acquire GVL
01300  *       (it causes synchronization problems).  If you need to
01301  *       call ruby functions either use rb_thread_call_with_gvl()
01302  *       or read source code of C APIs and confirm safety by
01303  *       yourself.
01304  *
01305  * NOTE: In short, this API is difficult to use safely.  I recommend you
01306  *       use other ways if you have.  We lack experiences to use this API.
01307  *       Please report your problem related on it.
01308  *
01309  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
01310  *       for a short running `func()'. Be sure to benchmark and use this
01311  *       mechanism when `func()' consumes enough time.
01312  *
01313  * Safe C API:
01314  * * rb_thread_interrupted() - check interrupt flag
01315  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
01316  *   they will work without GVL, and may acquire GVL when GC is needed.
01317  */
01318 void *
01319 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
01320                             rb_unblock_function_t *ubf, void *data2)
01321 {
01322     return call_without_gvl(func, data1, ubf, data2, TRUE);
01323 }
01324 
01325 void *
01326 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
01327                             rb_unblock_function_t *ubf, void *data2)
01328 {
01329     return call_without_gvl(func, data1, ubf, data2, FALSE);
01330 }
01331 
01332 VALUE
01333 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
01334 {
01335     VALUE val = Qundef; /* shouldn't be used */
01336     rb_thread_t *th = GET_THREAD();
01337     int saved_errno = 0;
01338     int state;
01339 
01340     th->waiting_fd = fd;
01341 
01342     TH_PUSH_TAG(th);
01343     if ((state = EXEC_TAG()) == 0) {
01344         BLOCKING_REGION({
01345             val = func(data1);
01346             saved_errno = errno;
01347         }, ubf_select, th, FALSE);
01348     }
01349     TH_POP_TAG();
01350 
01351     /* clear waitinf_fd anytime */
01352     th->waiting_fd = -1;
01353 
01354     if (state) {
01355         JUMP_TAG(state);
01356     }
01357     /* TODO: check func() */
01358     RUBY_VM_CHECK_INTS_BLOCKING(th);
01359 
01360     errno = saved_errno;
01361 
01362     return val;
01363 }
01364 
01365 VALUE
01366 rb_thread_blocking_region(
01367     rb_blocking_function_t *func, void *data1,
01368     rb_unblock_function_t *ubf, void *data2)
01369 {
01370     void *(*f)(void*) = (void *(*)(void*))func;
01371     return (VALUE)rb_thread_call_without_gvl(f, data1, ubf, data2);
01372 }
01373 
01374 /*
01375  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
01376  *
01377  * After releasing GVL using rb_thread_blocking_region() or
01378  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
01379  * methods. If you need to access Ruby you must use this function
01380  * rb_thread_call_with_gvl().
01381  *
01382  * This function rb_thread_call_with_gvl() does:
01383  * (1) acquire GVL.
01384  * (2) call passed function `func'.
01385  * (3) release GVL.
01386  * (4) return a value which is returned at (2).
01387  *
01388  * NOTE: You should not return Ruby object at (2) because such Object
01389  *       will not marked.
01390  *
01391  * NOTE: If an exception is raised in `func', this function DOES NOT
01392  *       protect (catch) the exception.  If you have any resources
01393  *       which should free before throwing exception, you need use
01394  *       rb_protect() in `func' and return a value which represents
01395  *       exception is raised.
01396  *
01397  * NOTE: This function should not be called by a thread which was not
01398  *       created as Ruby thread (created by Thread.new or so).  In other
01399  *       words, this function *DOES NOT* associate or convert a NON-Ruby
01400  *       thread to a Ruby thread.
01401  */
01402 void *
01403 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
01404 {
01405     rb_thread_t *th = ruby_thread_from_native();
01406     struct rb_blocking_region_buffer *brb;
01407     struct rb_unblock_callback prev_unblock;
01408     void *r;
01409 
01410     if (th == 0) {
01411         /* Error is occurred, but we can't use rb_bug()
01412          * because this thread is not Ruby's thread.
01413          * What should we do?
01414          */
01415 
01416         fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
01417         exit(EXIT_FAILURE);
01418     }
01419 
01420     brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
01421     prev_unblock = th->unblock;
01422 
01423     if (brb == 0) {
01424         rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
01425     }
01426 
01427     blocking_region_end(th, brb);
01428     /* enter to Ruby world: You can access Ruby values, methods and so on. */
01429     r = (*func)(data1);
01430     /* leave from Ruby world: You can not access Ruby values, etc. */
01431     blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
01432     return r;
01433 }
01434 
01435 /*
01436  * ruby_thread_has_gvl_p - check if current native thread has GVL.
01437  *
01438  ***
01439  *** This API is EXPERIMENTAL!
01440  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
01441  ***
01442  */
01443 
01444 int
01445 ruby_thread_has_gvl_p(void)
01446 {
01447     rb_thread_t *th = ruby_thread_from_native();
01448 
01449     if (th && th->blocking_region_buffer == 0) {
01450         return 1;
01451     }
01452     else {
01453         return 0;
01454     }
01455 }
01456 
01457 /*
01458  * call-seq:
01459  *    Thread.pass   -> nil
01460  *
01461  * Give the thread scheduler a hint to pass execution to another thread.
01462  * A running thread may or may not switch, it depends on OS and processor.
01463  */
01464 
01465 static VALUE
01466 thread_s_pass(VALUE klass)
01467 {
01468     rb_thread_schedule();
01469     return Qnil;
01470 }
01471 
01472 /*****************************************************/
01473 
01474 /*
01475  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
01476  *
01477  * Async events such as an exception throwed by Thread#raise,
01478  * Thread#kill and thread termination (after main thread termination)
01479  * will be queued to th->pending_interrupt_queue.
01480  * - clear: clear the queue.
01481  * - enque: enque err object into queue.
01482  * - deque: deque err object from queue.
01483  * - active_p: return 1 if the queue should be checked.
01484  *
01485  * All rb_threadptr_pending_interrupt_* functions are called by
01486  * a GVL acquired thread, of course.
01487  * Note that all "rb_" prefix APIs need GVL to call.
01488  */
01489 
01490 void
01491 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
01492 {
01493     rb_ary_clear(th->pending_interrupt_queue);
01494 }
01495 
01496 void
01497 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
01498 {
01499     rb_ary_push(th->pending_interrupt_queue, v);
01500     th->pending_interrupt_queue_checked = 0;
01501 }
01502 
01503 enum handle_interrupt_timing {
01504     INTERRUPT_NONE,
01505     INTERRUPT_IMMEDIATE,
01506     INTERRUPT_ON_BLOCKING,
01507     INTERRUPT_NEVER
01508 };
01509 
01510 static enum handle_interrupt_timing
01511 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
01512 {
01513     VALUE mask;
01514     long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
01515     VALUE *mask_stack = RARRAY_PTR(th->pending_interrupt_mask_stack);
01516     VALUE ancestors = rb_mod_ancestors(err); /* TODO: GC guard */
01517     long ancestors_len = RARRAY_LEN(ancestors);
01518     VALUE *ancestors_ptr = RARRAY_PTR(ancestors);
01519     int i, j;
01520 
01521     for (i=0; i<mask_stack_len; i++) {
01522         mask = mask_stack[mask_stack_len-(i+1)];
01523 
01524         for (j=0; j<ancestors_len; j++) {
01525             VALUE klass = ancestors_ptr[j];
01526             VALUE sym;
01527 
01528             /* TODO: remove rb_intern() */
01529             if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
01530                 if (sym == sym_immediate) {
01531                     return INTERRUPT_IMMEDIATE;
01532                 }
01533                 else if (sym == sym_on_blocking) {
01534                     return INTERRUPT_ON_BLOCKING;
01535                 }
01536                 else if (sym == sym_never) {
01537                     return INTERRUPT_NEVER;
01538                 }
01539                 else {
01540                     rb_raise(rb_eThreadError, "unknown mask signature");
01541                 }
01542             }
01543         }
01544         /* try to next mask */
01545     }
01546     return INTERRUPT_NONE;
01547 }
01548 
01549 static int
01550 rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
01551 {
01552     return RARRAY_LEN(th->pending_interrupt_queue) == 0;
01553 }
01554 
01555 static int
01556 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
01557 {
01558     int i;
01559     for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
01560         VALUE e = RARRAY_PTR(th->pending_interrupt_queue)[i];
01561         if (rb_class_inherited_p(e, err)) {
01562             return TRUE;
01563         }
01564     }
01565     return FALSE;
01566 }
01567 
01568 static VALUE
01569 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
01570 {
01571 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
01572     int i;
01573 
01574     for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
01575         VALUE err = RARRAY_PTR(th->pending_interrupt_queue)[i];
01576 
01577         enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
01578 
01579         switch (mask_timing) {
01580           case INTERRUPT_ON_BLOCKING:
01581             if (timing != INTERRUPT_ON_BLOCKING) {
01582                 break;
01583             }
01584             /* fall through */
01585           case INTERRUPT_NONE: /* default: IMMEDIATE */
01586           case INTERRUPT_IMMEDIATE:
01587             rb_ary_delete_at(th->pending_interrupt_queue, i);
01588             return err;
01589           case INTERRUPT_NEVER:
01590             break;
01591         }
01592     }
01593 
01594     th->pending_interrupt_queue_checked = 1;
01595     return Qundef;
01596 #else
01597     VALUE err = rb_ary_shift(th->pending_interrupt_queue);
01598     if (rb_threadptr_pending_interrupt_empty_p(th)) {
01599         th->pending_interrupt_queue_checked = 1;
01600     }
01601     return err;
01602 #endif
01603 }
01604 
01605 int
01606 rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
01607 {
01608     /*
01609      * For optimization, we don't check async errinfo queue
01610      * if it nor a thread interrupt mask were not changed
01611      * since last check.
01612      */
01613     if (th->pending_interrupt_queue_checked) {
01614         return 0;
01615     }
01616 
01617     if (rb_threadptr_pending_interrupt_empty_p(th)) {
01618         return 0;
01619     }
01620 
01621     return 1;
01622 }
01623 
01624 static int
01625 handle_interrupt_arg_check_i(VALUE key, VALUE val)
01626 {
01627     if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
01628         rb_raise(rb_eArgError, "unknown mask signature");
01629     }
01630 
01631     return ST_CONTINUE;
01632 }
01633 
01634 /*
01635  * call-seq:
01636  *   Thread.handle_interrupt(hash) { ... } -> result of the block
01637  *
01638  * Changes asynchronous interrupt timing.
01639  *
01640  * _interrupt_ means asynchronous event and corresponding procedure
01641  * by Thread#raise, Thread#kill, signal trap (not supported yet)
01642  * and main thread termination (if main thread terminates, then all
01643  * other thread will be killed).
01644  *
01645  * The given +hash+ has pairs like <code>ExceptionClass =>
01646  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
01647  * the given block. The TimingSymbol can be one of the following symbols:
01648  *
01649  * [+:immediate+]   Invoke interrupts immediately.
01650  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
01651  * [+:never+]       Never invoke all interrupts.
01652  *
01653  * _BlockingOperation_ means that the operation will block the calling thread,
01654  * such as read and write.  On CRuby implementation, _BlockingOperation_ is any
01655  * operation executed without GVL.
01656  *
01657  * Masked asynchronous interrupts are delayed until they are enabled.
01658  * This method is similar to sigprocmask(3).
01659  *
01660  * === NOTE
01661  *
01662  * Asynchronous interrupts are difficult to use.
01663  *
01664  * If you need to communicate between threads, please consider to use another way such as Queue.
01665  *
01666  * Or use them with deep understanding about this method.
01667  *
01668  * === Usage
01669  *
01670  * In this example, we can guard from Thread#raise exceptions.
01671  *
01672  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
01673  * ignored in the first block of the main thread. In the second
01674  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
01675  *
01676  *   th = Thread.new do
01677  *     Thead.handle_interrupt(RuntimeError => :never) {
01678  *       begin
01679  *         # You can write resource allocation code safely.
01680  *         Thread.handle_interrupt(RuntimeError => :immediate) {
01681  *           # ...
01682  *         }
01683  *       ensure
01684  *         # You can write resource deallocation code safely.
01685  *       end
01686  *     }
01687  *   end
01688  *   Thread.pass
01689  *   # ...
01690  *   th.raise "stop"
01691  *
01692  * While we are ignoring the RuntimeError exception, it's safe to write our
01693  * resource allocation code. Then, the ensure block is where we can safely
01694  * deallocate your resources.
01695  *
01696  * ==== Guarding from TimeoutError
01697  *
01698  * In the next example, we will guard from the TimeoutError exception. This
01699  * will help prevent from leaking resources when TimeoutError exceptions occur
01700  * during normal ensure clause. For this example we use the help of the
01701  * standard library Timeout, from lib/timeout.rb
01702  *
01703  *   require 'timeout'
01704  *   Thread.handle_interrupt(TimeoutError => :never) {
01705  *     timeout(10){
01706  *       # TimeoutError doesn't occur here
01707  *       Thread.handle_interrupt(TimeoutError => :on_blocking) {
01708  *         # possible to be killed by TimeoutError
01709  *         # while blocking operation
01710  *       }
01711  *       # TimeoutError doesn't occur here
01712  *     }
01713  *   }
01714  *
01715  * In the first part of the +timeout+ block, we can rely on TimeoutError being
01716  * ignored. Then in the <code>TimeoutError => :on_blocking</code> block, any
01717  * operation that will block the calling thread is susceptible to a
01718  * TimeoutError exception being raised.
01719  *
01720  * ==== Stack control settings
01721  *
01722  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
01723  * to control more than one ExceptionClass and TimingSymbol at a time.
01724  *
01725  *   Thread.handle_interrupt(FooError => :never) {
01726  *     Thread.handle_interrupt(BarError => :never) {
01727  *        # FooError and BarError are prohibited.
01728  *     }
01729  *   }
01730  *
01731  * ==== Inheritance with ExceptionClass
01732  *
01733  * All exceptions inherited from the ExceptionClass parameter will be considered.
01734  *
01735  *   Thread.handle_interrupt(Exception => :never) {
01736  *     # all exceptions inherited from Exception are prohibited.
01737  *   }
01738  *
01739  */
01740 static VALUE
01741 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
01742 {
01743     VALUE mask;
01744     rb_thread_t *th = GET_THREAD();
01745     VALUE r = Qnil;
01746     int state;
01747 
01748     if (!rb_block_given_p()) {
01749         rb_raise(rb_eArgError, "block is needed.");
01750     }
01751 
01752     mask = rb_convert_type(mask_arg, T_HASH, "Hash", "to_hash");
01753     rb_hash_foreach(mask, handle_interrupt_arg_check_i, 0);
01754     rb_ary_push(th->pending_interrupt_mask_stack, mask);
01755     if (!rb_threadptr_pending_interrupt_empty_p(th)) {
01756         th->pending_interrupt_queue_checked = 0;
01757         RUBY_VM_SET_INTERRUPT(th);
01758     }
01759 
01760     TH_PUSH_TAG(th);
01761     if ((state = EXEC_TAG()) == 0) {
01762         r = rb_yield(Qnil);
01763     }
01764     TH_POP_TAG();
01765 
01766     rb_ary_pop(th->pending_interrupt_mask_stack);
01767     if (!rb_threadptr_pending_interrupt_empty_p(th)) {
01768         th->pending_interrupt_queue_checked = 0;
01769         RUBY_VM_SET_INTERRUPT(th);
01770     }
01771 
01772     RUBY_VM_CHECK_INTS(th);
01773 
01774     if (state) {
01775         JUMP_TAG(state);
01776     }
01777 
01778     return r;
01779 }
01780 
01781 /*
01782  * call-seq:
01783  *   target_thread.pending_interrupt?(error = nil) -> true/false
01784  *
01785  * Returns whether or not the asychronous queue is empty for the target thread.
01786  *
01787  * If +error+ is given, then check only for +error+ type deferred events.
01788  *
01789  * See ::pending_interrupt? for more information.
01790  */
01791 static VALUE
01792 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
01793 {
01794     rb_thread_t *target_th;
01795 
01796     GetThreadPtr(target_thread, target_th);
01797 
01798     if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
01799         return Qfalse;
01800     }
01801     else {
01802         if (argc == 1) {
01803             VALUE err;
01804             rb_scan_args(argc, argv, "01", &err);
01805             if (!rb_obj_is_kind_of(err, rb_cModule)) {
01806                 rb_raise(rb_eTypeError, "class or module required for rescue clause");
01807             }
01808             if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
01809                 return Qtrue;
01810             }
01811             else {
01812                 return Qfalse;
01813             }
01814         }
01815         return Qtrue;
01816     }
01817 }
01818 
01819 /*
01820  * call-seq:
01821  *   Thread.pending_interrupt?(error = nil) -> true/false
01822  *
01823  * Returns whether or not the asynchronous queue is empty.
01824  *
01825  * Since Thread::handle_interrupt can be used to defer asynchronous events.
01826  * This method can be used to determine if there are any deferred events.
01827  *
01828  * If you find this method returns true, then you may finish +:never+ blocks.
01829  *
01830  * For example, the following method processes deferred asynchronous events
01831  * immediately.
01832  *
01833  *   def Thread.kick_interrupt_immediately
01834  *     Thread.handle_interrupt(Object => :immediate) {
01835  *       Thread.pass
01836  *     }
01837  *   end
01838  *
01839  * If +error+ is given, then check only for +error+ type deferred events.
01840  *
01841  * === Usage
01842  *
01843  *   th = Thread.new{
01844  *     Thread.handle_interrupt(RuntimeError => :on_blocking){
01845  *       while true
01846  *         ...
01847  *         # reach safe point to invoke interrupt
01848  *         if Thread.pending_interrupt?
01849  *           Thread.handle_interrupt(Object => :immediate){}
01850  *         end
01851  *         ...
01852  *       end
01853  *     }
01854  *   }
01855  *   ...
01856  *   th.raise # stop thread
01857  *
01858  * This example can also be written as the following, which you should use to
01859  * avoid asynchronous interrupts.
01860  *
01861  *   flag = true
01862  *   th = Thread.new{
01863  *     Thread.handle_interrupt(RuntimeError => :on_blocking){
01864  *       while true
01865  *         ...
01866  *         # reach safe point to invoke interrupt
01867  *         break if flag == false
01868  *         ...
01869  *       end
01870  *     }
01871  *   }
01872  *   ...
01873  *   flag = false # stop thread
01874  */
01875 
01876 static VALUE
01877 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
01878 {
01879     return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
01880 }
01881 
01882 static void
01883 rb_threadptr_to_kill(rb_thread_t *th)
01884 {
01885     rb_threadptr_pending_interrupt_clear(th);
01886     th->status = THREAD_RUNNABLE;
01887     th->to_kill = 1;
01888     th->errinfo = INT2FIX(TAG_FATAL);
01889     TH_JUMP_TAG(th, TAG_FATAL);
01890 }
01891 
01892 void
01893 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
01894 {
01895     if (th->raised_flag) return;
01896 
01897     while (1) {
01898         rb_atomic_t interrupt;
01899         rb_atomic_t old;
01900         int sig;
01901         int timer_interrupt;
01902         int pending_interrupt;
01903         int finalizer_interrupt;
01904         int trap_interrupt;
01905 
01906         do {
01907             interrupt = th->interrupt_flag;
01908             old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
01909         } while (old != interrupt);
01910 
01911         interrupt &= (rb_atomic_t)~th->interrupt_mask;
01912         if (!interrupt)
01913             return;
01914 
01915         timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
01916         pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
01917         finalizer_interrupt = interrupt & FINALIZER_INTERRUPT_MASK;
01918         trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
01919 
01920         /* signal handling */
01921         if (trap_interrupt && (th == th->vm->main_thread)) {
01922             enum rb_thread_status prev_status = th->status;
01923             th->status = THREAD_RUNNABLE;
01924             while ((sig = rb_get_next_signal()) != 0) {
01925                 rb_signal_exec(th, sig);
01926             }
01927             th->status = prev_status;
01928         }
01929 
01930         /* exception from another thread */
01931         if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
01932             VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
01933             thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
01934 
01935             if (err == Qundef) {
01936                 /* no error */
01937             }
01938             else if (err == eKillSignal        /* Thread#kill receieved */  ||
01939                      err == eTerminateSignal   /* Terminate thread */       ||
01940                      err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */         ) {
01941                 rb_threadptr_to_kill(th);
01942             }
01943             else {
01944                 /* set runnable if th was slept. */
01945                 if (th->status == THREAD_STOPPED ||
01946                     th->status == THREAD_STOPPED_FOREVER)
01947                     th->status = THREAD_RUNNABLE;
01948                 rb_exc_raise(err);
01949             }
01950         }
01951 
01952         if (finalizer_interrupt) {
01953             rb_gc_finalize_deferred();
01954         }
01955 
01956         if (timer_interrupt) {
01957             unsigned long limits_us = TIME_QUANTUM_USEC;
01958 
01959             if (th->priority > 0)
01960                 limits_us <<= th->priority;
01961             else
01962                 limits_us >>= -th->priority;
01963 
01964             if (th->status == THREAD_RUNNABLE)
01965                 th->running_time_us += TIME_QUANTUM_USEC;
01966 
01967             EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0, Qundef);
01968 
01969             rb_thread_schedule_limits(limits_us);
01970         }
01971     }
01972 }
01973 
01974 void
01975 rb_thread_execute_interrupts(VALUE thval)
01976 {
01977     rb_thread_t *th;
01978     GetThreadPtr(thval, th);
01979     rb_threadptr_execute_interrupts(th, 1);
01980 }
01981 
01982 static void
01983 rb_threadptr_ready(rb_thread_t *th)
01984 {
01985     rb_threadptr_interrupt(th);
01986 }
01987 
01988 static VALUE
01989 rb_threadptr_raise(rb_thread_t *th, int argc, VALUE *argv)
01990 {
01991     VALUE exc;
01992 
01993     if (rb_threadptr_dead(th)) {
01994         return Qnil;
01995     }
01996 
01997     if (argc == 0) {
01998         exc = rb_exc_new(rb_eRuntimeError, 0, 0);
01999     }
02000     else {
02001         exc = rb_make_exception(argc, argv);
02002     }
02003     rb_threadptr_pending_interrupt_enque(th, exc);
02004     rb_threadptr_interrupt(th);
02005     return Qnil;
02006 }
02007 
02008 void
02009 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
02010 {
02011     VALUE argv[2];
02012 
02013     argv[0] = rb_eSignal;
02014     argv[1] = INT2FIX(sig);
02015     rb_threadptr_raise(th->vm->main_thread, 2, argv);
02016 }
02017 
02018 void
02019 rb_threadptr_signal_exit(rb_thread_t *th)
02020 {
02021     VALUE argv[2];
02022 
02023     argv[0] = rb_eSystemExit;
02024     argv[1] = rb_str_new2("exit");
02025     rb_threadptr_raise(th->vm->main_thread, 2, argv);
02026 }
02027 
02028 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
02029 #define USE_SIGALTSTACK
02030 #endif
02031 
02032 void
02033 ruby_thread_stack_overflow(rb_thread_t *th)
02034 {
02035     th->raised_flag = 0;
02036 #ifdef USE_SIGALTSTACK
02037     rb_exc_raise(sysstack_error);
02038 #else
02039     th->errinfo = sysstack_error;
02040     TH_JUMP_TAG(th, TAG_RAISE);
02041 #endif
02042 }
02043 
02044 int
02045 rb_threadptr_set_raised(rb_thread_t *th)
02046 {
02047     if (th->raised_flag & RAISED_EXCEPTION) {
02048         return 1;
02049     }
02050     th->raised_flag |= RAISED_EXCEPTION;
02051     return 0;
02052 }
02053 
02054 int
02055 rb_threadptr_reset_raised(rb_thread_t *th)
02056 {
02057     if (!(th->raised_flag & RAISED_EXCEPTION)) {
02058         return 0;
02059     }
02060     th->raised_flag &= ~RAISED_EXCEPTION;
02061     return 1;
02062 }
02063 
02064 static int
02065 thread_fd_close_i(st_data_t key, st_data_t val, st_data_t data)
02066 {
02067     int fd = (int)data;
02068     rb_thread_t *th;
02069     GetThreadPtr((VALUE)key, th);
02070 
02071     if (th->waiting_fd == fd) {
02072         VALUE err = th->vm->special_exceptions[ruby_error_closed_stream];
02073         rb_threadptr_pending_interrupt_enque(th, err);
02074         rb_threadptr_interrupt(th);
02075     }
02076     return ST_CONTINUE;
02077 }
02078 
02079 void
02080 rb_thread_fd_close(int fd)
02081 {
02082     st_foreach(GET_THREAD()->vm->living_threads, thread_fd_close_i, (st_index_t)fd);
02083 }
02084 
02085 /*
02086  *  call-seq:
02087  *     thr.raise
02088  *     thr.raise(string)
02089  *     thr.raise(exception [, string [, array]])
02090  *
02091  *  Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
02092  *  caller does not have to be <i>thr</i>.
02093  *
02094  *     Thread.abort_on_exception = true
02095  *     a = Thread.new { sleep(200) }
02096  *     a.raise("Gotcha")
02097  *
02098  *  <em>produces:</em>
02099  *
02100  *     prog.rb:3: Gotcha (RuntimeError)
02101  *      from prog.rb:2:in `initialize'
02102  *      from prog.rb:2:in `new'
02103  *      from prog.rb:2
02104  */
02105 
02106 static VALUE
02107 thread_raise_m(int argc, VALUE *argv, VALUE self)
02108 {
02109     rb_thread_t *target_th;
02110     rb_thread_t *th = GET_THREAD();
02111     GetThreadPtr(self, target_th);
02112     rb_threadptr_raise(target_th, argc, argv);
02113 
02114     /* To perform Thread.current.raise as Kernel.raise */
02115     if (th == target_th) {
02116         RUBY_VM_CHECK_INTS(th);
02117     }
02118     return Qnil;
02119 }
02120 
02121 
02122 /*
02123  *  call-seq:
02124  *     thr.exit        -> thr or nil
02125  *     thr.kill        -> thr or nil
02126  *     thr.terminate   -> thr or nil
02127  *
02128  *  Terminates <i>thr</i> and schedules another thread to be run. If this thread
02129  *  is already marked to be killed, <code>exit</code> returns the
02130  *  <code>Thread</code>. If this is the main thread, or the last thread, exits
02131  *  the process.
02132  */
02133 
02134 VALUE
02135 rb_thread_kill(VALUE thread)
02136 {
02137     rb_thread_t *th;
02138 
02139     GetThreadPtr(thread, th);
02140 
02141     if (th != GET_THREAD() && th->safe_level < 4) {
02142         rb_secure(4);
02143     }
02144     if (th->to_kill || th->status == THREAD_KILLED) {
02145         return thread;
02146     }
02147     if (th == th->vm->main_thread) {
02148         rb_exit(EXIT_SUCCESS);
02149     }
02150 
02151     thread_debug("rb_thread_kill: %p (%p)\n", (void *)th, (void *)th->thread_id);
02152 
02153     if (th == GET_THREAD()) {
02154         /* kill myself immediately */
02155         rb_threadptr_to_kill(th);
02156     }
02157     else {
02158         rb_threadptr_pending_interrupt_enque(th, eKillSignal);
02159         rb_threadptr_interrupt(th);
02160     }
02161     return thread;
02162 }
02163 
02164 
02165 /*
02166  *  call-seq:
02167  *     Thread.kill(thread)   -> thread
02168  *
02169  *  Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
02170  *
02171  *     count = 0
02172  *     a = Thread.new { loop { count += 1 } }
02173  *     sleep(0.1)       #=> 0
02174  *     Thread.kill(a)   #=> #<Thread:0x401b3d30 dead>
02175  *     count            #=> 93947
02176  *     a.alive?         #=> false
02177  */
02178 
02179 static VALUE
02180 rb_thread_s_kill(VALUE obj, VALUE th)
02181 {
02182     return rb_thread_kill(th);
02183 }
02184 
02185 
02186 /*
02187  *  call-seq:
02188  *     Thread.exit   -> thread
02189  *
02190  *  Terminates the currently running thread and schedules another thread to be
02191  *  run. If this thread is already marked to be killed, <code>exit</code>
02192  *  returns the <code>Thread</code>. If this is the main thread, or the last
02193  *  thread, exit the process.
02194  */
02195 
02196 static VALUE
02197 rb_thread_exit(void)
02198 {
02199     rb_thread_t *th = GET_THREAD();
02200     return rb_thread_kill(th->self);
02201 }
02202 
02203 
02204 /*
02205  *  call-seq:
02206  *     thr.wakeup   -> thr
02207  *
02208  *  Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
02209  *  I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
02210  *
02211  *     c = Thread.new { Thread.stop; puts "hey!" }
02212  *     sleep 0.1 while c.status!='sleep'
02213  *     c.wakeup
02214  *     c.join
02215  *
02216  *  <em>produces:</em>
02217  *
02218  *     hey!
02219  */
02220 
02221 VALUE
02222 rb_thread_wakeup(VALUE thread)
02223 {
02224     if (!RTEST(rb_thread_wakeup_alive(thread))) {
02225         rb_raise(rb_eThreadError, "killed thread");
02226     }
02227     return thread;
02228 }
02229 
02230 VALUE
02231 rb_thread_wakeup_alive(VALUE thread)
02232 {
02233     rb_thread_t *th;
02234     GetThreadPtr(thread, th);
02235 
02236     if (th->status == THREAD_KILLED) {
02237         return Qnil;
02238     }
02239     rb_threadptr_ready(th);
02240     if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
02241         th->status = THREAD_RUNNABLE;
02242     return thread;
02243 }
02244 
02245 
02246 /*
02247  *  call-seq:
02248  *     thr.run   -> thr
02249  *
02250  *  Wakes up <i>thr</i>, making it eligible for scheduling.
02251  *
02252  *     a = Thread.new { puts "a"; Thread.stop; puts "c" }
02253  *     sleep 0.1 while a.status!='sleep'
02254  *     puts "Got here"
02255  *     a.run
02256  *     a.join
02257  *
02258  *  <em>produces:</em>
02259  *
02260  *     a
02261  *     Got here
02262  *     c
02263  */
02264 
02265 VALUE
02266 rb_thread_run(VALUE thread)
02267 {
02268     rb_thread_wakeup(thread);
02269     rb_thread_schedule();
02270     return thread;
02271 }
02272 
02273 
02274 /*
02275  *  call-seq:
02276  *     Thread.stop   -> nil
02277  *
02278  *  Stops execution of the current thread, putting it into a ``sleep'' state,
02279  *  and schedules execution of another thread.
02280  *
02281  *     a = Thread.new { print "a"; Thread.stop; print "c" }
02282  *     sleep 0.1 while a.status!='sleep'
02283  *     print "b"
02284  *     a.run
02285  *     a.join
02286  *
02287  *  <em>produces:</em>
02288  *
02289  *     abc
02290  */
02291 
02292 VALUE
02293 rb_thread_stop(void)
02294 {
02295     if (rb_thread_alone()) {
02296         rb_raise(rb_eThreadError,
02297                  "stopping only thread\n\tnote: use sleep to stop forever");
02298     }
02299     rb_thread_sleep_deadly();
02300     return Qnil;
02301 }
02302 
02303 static int
02304 thread_list_i(st_data_t key, st_data_t val, void *data)
02305 {
02306     VALUE ary = (VALUE)data;
02307     rb_thread_t *th;
02308     GetThreadPtr((VALUE)key, th);
02309 
02310     switch (th->status) {
02311       case THREAD_RUNNABLE:
02312       case THREAD_STOPPED:
02313       case THREAD_STOPPED_FOREVER:
02314         rb_ary_push(ary, th->self);
02315       default:
02316         break;
02317     }
02318     return ST_CONTINUE;
02319 }
02320 
02321 /********************************************************************/
02322 
02323 /*
02324  *  call-seq:
02325  *     Thread.list   -> array
02326  *
02327  *  Returns an array of <code>Thread</code> objects for all threads that are
02328  *  either runnable or stopped.
02329  *
02330  *     Thread.new { sleep(200) }
02331  *     Thread.new { 1000000.times {|i| i*i } }
02332  *     Thread.new { Thread.stop }
02333  *     Thread.list.each {|t| p t}
02334  *
02335  *  <em>produces:</em>
02336  *
02337  *     #<Thread:0x401b3e84 sleep>
02338  *     #<Thread:0x401b3f38 run>
02339  *     #<Thread:0x401b3fb0 sleep>
02340  *     #<Thread:0x401bdf4c run>
02341  */
02342 
02343 VALUE
02344 rb_thread_list(void)
02345 {
02346     VALUE ary = rb_ary_new();
02347     st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
02348     return ary;
02349 }
02350 
02351 VALUE
02352 rb_thread_current(void)
02353 {
02354     return GET_THREAD()->self;
02355 }
02356 
02357 /*
02358  *  call-seq:
02359  *     Thread.current   -> thread
02360  *
02361  *  Returns the currently executing thread.
02362  *
02363  *     Thread.current   #=> #<Thread:0x401bdf4c run>
02364  */
02365 
02366 static VALUE
02367 thread_s_current(VALUE klass)
02368 {
02369     return rb_thread_current();
02370 }
02371 
02372 VALUE
02373 rb_thread_main(void)
02374 {
02375     return GET_THREAD()->vm->main_thread->self;
02376 }
02377 
02378 /*
02379  *  call-seq:
02380  *     Thread.main   -> thread
02381  *
02382  *  Returns the main thread.
02383  */
02384 
02385 static VALUE
02386 rb_thread_s_main(VALUE klass)
02387 {
02388     return rb_thread_main();
02389 }
02390 
02391 
02392 /*
02393  *  call-seq:
02394  *     Thread.abort_on_exception   -> true or false
02395  *
02396  *  Returns the status of the global ``abort on exception'' condition.  The
02397  *  default is <code>false</code>. When set to <code>true</code>, or if the
02398  *  global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
02399  *  command line option <code>-d</code> was specified) all threads will abort
02400  *  (the process will <code>exit(0)</code>) if an exception is raised in any
02401  *  thread. See also <code>Thread::abort_on_exception=</code>.
02402  */
02403 
02404 static VALUE
02405 rb_thread_s_abort_exc(void)
02406 {
02407     return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
02408 }
02409 
02410 
02411 /*
02412  *  call-seq:
02413  *     Thread.abort_on_exception= boolean   -> true or false
02414  *
02415  *  When set to <code>true</code>, all threads will abort if an exception is
02416  *  raised. Returns the new state.
02417  *
02418  *     Thread.abort_on_exception = true
02419  *     t1 = Thread.new do
02420  *       puts  "In new thread"
02421  *       raise "Exception from thread"
02422  *     end
02423  *     sleep(1)
02424  *     puts "not reached"
02425  *
02426  *  <em>produces:</em>
02427  *
02428  *     In new thread
02429  *     prog.rb:4: Exception from thread (RuntimeError)
02430  *      from prog.rb:2:in `initialize'
02431  *      from prog.rb:2:in `new'
02432  *      from prog.rb:2
02433  */
02434 
02435 static VALUE
02436 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
02437 {
02438     rb_secure(4);
02439     GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
02440     return val;
02441 }
02442 
02443 
02444 /*
02445  *  call-seq:
02446  *     thr.abort_on_exception   -> true or false
02447  *
02448  *  Returns the status of the thread-local ``abort on exception'' condition for
02449  *  <i>thr</i>. The default is <code>false</code>. See also
02450  *  <code>Thread::abort_on_exception=</code>.
02451  */
02452 
02453 static VALUE
02454 rb_thread_abort_exc(VALUE thread)
02455 {
02456     rb_thread_t *th;
02457     GetThreadPtr(thread, th);
02458     return th->abort_on_exception ? Qtrue : Qfalse;
02459 }
02460 
02461 
02462 /*
02463  *  call-seq:
02464  *     thr.abort_on_exception= boolean   -> true or false
02465  *
02466  *  When set to <code>true</code>, causes all threads (including the main
02467  *  program) to abort if an exception is raised in <i>thr</i>. The process will
02468  *  effectively <code>exit(0)</code>.
02469  */
02470 
02471 static VALUE
02472 rb_thread_abort_exc_set(VALUE thread, VALUE val)
02473 {
02474     rb_thread_t *th;
02475     rb_secure(4);
02476 
02477     GetThreadPtr(thread, th);
02478     th->abort_on_exception = RTEST(val);
02479     return val;
02480 }
02481 
02482 
02483 /*
02484  *  call-seq:
02485  *     thr.group   -> thgrp or nil
02486  *
02487  *  Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
02488  *  the thread is not a member of any group.
02489  *
02490  *     Thread.main.group   #=> #<ThreadGroup:0x4029d914>
02491  */
02492 
02493 VALUE
02494 rb_thread_group(VALUE thread)
02495 {
02496     rb_thread_t *th;
02497     VALUE group;
02498     GetThreadPtr(thread, th);
02499     group = th->thgroup;
02500 
02501     if (!group) {
02502         group = Qnil;
02503     }
02504     return group;
02505 }
02506 
02507 static const char *
02508 thread_status_name(rb_thread_t *th)
02509 {
02510     switch (th->status) {
02511       case THREAD_RUNNABLE:
02512         if (th->to_kill)
02513             return "aborting";
02514         else
02515             return "run";
02516       case THREAD_STOPPED:
02517       case THREAD_STOPPED_FOREVER:
02518         return "sleep";
02519       case THREAD_KILLED:
02520         return "dead";
02521       default:
02522         return "unknown";
02523     }
02524 }
02525 
02526 static int
02527 rb_threadptr_dead(rb_thread_t *th)
02528 {
02529     return th->status == THREAD_KILLED;
02530 }
02531 
02532 
02533 /*
02534  *  call-seq:
02535  *     thr.status   -> string, false or nil
02536  *
02537  *  Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
02538  *  sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
02539  *  ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
02540  *  <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
02541  *  terminated with an exception.
02542  *
02543  *     a = Thread.new { raise("die now") }
02544  *     b = Thread.new { Thread.stop }
02545  *     c = Thread.new { Thread.exit }
02546  *     d = Thread.new { sleep }
02547  *     d.kill                  #=> #<Thread:0x401b3678 aborting>
02548  *     a.status                #=> nil
02549  *     b.status                #=> "sleep"
02550  *     c.status                #=> false
02551  *     d.status                #=> "aborting"
02552  *     Thread.current.status   #=> "run"
02553  */
02554 
02555 static VALUE
02556 rb_thread_status(VALUE thread)
02557 {
02558     rb_thread_t *th;
02559     GetThreadPtr(thread, th);
02560 
02561     if (rb_threadptr_dead(th)) {
02562         if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
02563             /* TODO */ ) {
02564             return Qnil;
02565         }
02566         return Qfalse;
02567     }
02568     return rb_str_new2(thread_status_name(th));
02569 }
02570 
02571 
02572 /*
02573  *  call-seq:
02574  *     thr.alive?   -> true or false
02575  *
02576  *  Returns <code>true</code> if <i>thr</i> is running or sleeping.
02577  *
02578  *     thr = Thread.new { }
02579  *     thr.join                #=> #<Thread:0x401b3fb0 dead>
02580  *     Thread.current.alive?   #=> true
02581  *     thr.alive?              #=> false
02582  */
02583 
02584 static VALUE
02585 rb_thread_alive_p(VALUE thread)
02586 {
02587     rb_thread_t *th;
02588     GetThreadPtr(thread, th);
02589 
02590     if (rb_threadptr_dead(th))
02591         return Qfalse;
02592     return Qtrue;
02593 }
02594 
02595 /*
02596  *  call-seq:
02597  *     thr.stop?   -> true or false
02598  *
02599  *  Returns <code>true</code> if <i>thr</i> is dead or sleeping.
02600  *
02601  *     a = Thread.new { Thread.stop }
02602  *     b = Thread.current
02603  *     a.stop?   #=> true
02604  *     b.stop?   #=> false
02605  */
02606 
02607 static VALUE
02608 rb_thread_stop_p(VALUE thread)
02609 {
02610     rb_thread_t *th;
02611     GetThreadPtr(thread, th);
02612 
02613     if (rb_threadptr_dead(th))
02614         return Qtrue;
02615     if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
02616         return Qtrue;
02617     return Qfalse;
02618 }
02619 
02620 /*
02621  *  call-seq:
02622  *     thr.safe_level   -> integer
02623  *
02624  *  Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
02625  *  levels can help when implementing sandboxes which run insecure code.
02626  *
02627  *     thr = Thread.new { $SAFE = 3; sleep }
02628  *     Thread.current.safe_level   #=> 0
02629  *     thr.safe_level              #=> 3
02630  */
02631 
02632 static VALUE
02633 rb_thread_safe_level(VALUE thread)
02634 {
02635     rb_thread_t *th;
02636     GetThreadPtr(thread, th);
02637 
02638     return INT2NUM(th->safe_level);
02639 }
02640 
02641 /*
02642  * call-seq:
02643  *   thr.inspect   -> string
02644  *
02645  * Dump the name, id, and status of _thr_ to a string.
02646  */
02647 
02648 static VALUE
02649 rb_thread_inspect(VALUE thread)
02650 {
02651     const char *cname = rb_obj_classname(thread);
02652     rb_thread_t *th;
02653     const char *status;
02654     VALUE str;
02655 
02656     GetThreadPtr(thread, th);
02657     status = thread_status_name(th);
02658     str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
02659     OBJ_INFECT(str, thread);
02660 
02661     return str;
02662 }
02663 
02664 VALUE
02665 rb_thread_local_aref(VALUE thread, ID id)
02666 {
02667     rb_thread_t *th;
02668     st_data_t val;
02669 
02670     GetThreadPtr(thread, th);
02671     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02672         rb_raise(rb_eSecurityError, "Insecure: thread locals");
02673     }
02674     if (!th->local_storage) {
02675         return Qnil;
02676     }
02677     if (st_lookup(th->local_storage, id, &val)) {
02678         return (VALUE)val;
02679     }
02680     return Qnil;
02681 }
02682 
02683 /*
02684  *  call-seq:
02685  *      thr[sym]   -> obj or nil
02686  *
02687  *  Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
02688  *  if not explicitely inside a Fiber), using either a symbol or a string name.
02689  *  If the specified variable does not exist, returns <code>nil</code>.
02690  *
02691  *     [
02692  *       Thread.new { Thread.current["name"] = "A" },
02693  *       Thread.new { Thread.current[:name]  = "B" },
02694  *       Thread.new { Thread.current["name"] = "C" }
02695  *     ].each do |th|
02696  *       th.join
02697  *       puts "#{th.inspect}: #{th[:name]}"
02698  *     end
02699  *
02700  *  <em>produces:</em>
02701  *
02702  *     #<Thread:0x00000002a54220 dead>: A
02703  *     #<Thread:0x00000002a541a8 dead>: B
02704  *     #<Thread:0x00000002a54130 dead>: C
02705  *
02706  *  Thread#[] and Thread#[]= are not thread-local but fiber-local.
02707  *  This confusion did not exist in Ruby 1.8 because
02708  *  fibers were only available since Ruby 1.9.
02709  *  Ruby 1.9 chooses that the methods behaves fiber-local to save
02710  *  following idiom for dynamic scope.
02711  *
02712  *    def meth(newvalue)
02713  *      begin
02714  *        oldvalue = Thread.current[:name]
02715  *        Thread.current[:name] = newvalue
02716  *        yield
02717  *      ensure
02718  *        Thread.current[:name] = oldvalue
02719  *      end
02720  *    end
02721  *
02722  *  The idiom may not work as dynamic scope if the methods are thread-local
02723  *  and a given block switches fiber.
02724  *
02725  *    f = Fiber.new {
02726  *      meth(1) {
02727  *        Fiber.yield
02728  *      }
02729  *    }
02730  *    meth(2) {
02731  *      f.resume
02732  *    }
02733  *    f.resume
02734  *    p Thread.current[:name]
02735  *    #=> nil if fiber-local
02736  *    #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
02737  *
02738  *  For thread-local variables, please see <code>Thread#thread_local_get</code>
02739  *  and <code>Thread#thread_local_set</code>.
02740  *
02741  */
02742 
02743 static VALUE
02744 rb_thread_aref(VALUE thread, VALUE id)
02745 {
02746     return rb_thread_local_aref(thread, rb_to_id(id));
02747 }
02748 
02749 VALUE
02750 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
02751 {
02752     rb_thread_t *th;
02753     GetThreadPtr(thread, th);
02754 
02755     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02756         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02757     }
02758     if (OBJ_FROZEN(thread)) {
02759         rb_error_frozen("thread locals");
02760     }
02761     if (!th->local_storage) {
02762         th->local_storage = st_init_numtable();
02763     }
02764     if (NIL_P(val)) {
02765         st_delete_wrap(th->local_storage, id);
02766         return Qnil;
02767     }
02768     st_insert(th->local_storage, id, val);
02769     return val;
02770 }
02771 
02772 /*
02773  *  call-seq:
02774  *      thr[sym] = obj   -> obj
02775  *
02776  *  Attribute Assignment---Sets or creates the value of a fiber-local variable,
02777  *  using either a symbol or a string. See also <code>Thread#[]</code>.  For
02778  *  thread-local variables, please see <code>Thread#thread_variable_set</code>
02779  *  and <code>Thread#thread_variable_get</code>.
02780  */
02781 
02782 static VALUE
02783 rb_thread_aset(VALUE self, VALUE id, VALUE val)
02784 {
02785     return rb_thread_local_aset(self, rb_to_id(id), val);
02786 }
02787 
02788 /*
02789  *  call-seq:
02790  *      thr.thread_variable_get(key)  -> obj or nil
02791  *
02792  *  Returns the value of a thread local variable that has been set.  Note that
02793  *  these are different than fiber local values.  For fiber local values,
02794  *  please see Thread#[] and Thread#[]=.
02795  *
02796  *  Thread local values are carried along with threads, and do not respect
02797  *  fibers.  For example:
02798  *
02799  *    Thread.new {
02800  *      Thread.current.thread_variable_set("foo", "bar") # set a thread local
02801  *      Thread.current["foo"] = "bar"                    # set a fiber local
02802  *
02803  *      Fiber.new {
02804  *        Fiber.yield [
02805  *          Thread.current.thread_variable_get("foo"), # get the thread local
02806  *          Thread.current["foo"],                     # get the fiber local
02807  *        ]
02808  *      }.resume
02809  *    }.join.value # => ['bar', nil]
02810  *
02811  *  The value "bar" is returned for the thread local, where nil is returned
02812  *  for the fiber local.  The fiber is executed in the same thread, so the
02813  *  thread local values are available.
02814  *
02815  *  See also Thread#[]
02816  */
02817 
02818 static VALUE
02819 rb_thread_variable_get(VALUE thread, VALUE id)
02820 {
02821     VALUE locals;
02822     rb_thread_t *th;
02823 
02824     GetThreadPtr(thread, th);
02825 
02826     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02827         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02828     }
02829 
02830     locals = rb_iv_get(thread, "locals");
02831     return rb_hash_aref(locals, ID2SYM(rb_to_id(id)));
02832 }
02833 
02834 /*
02835  *  call-seq:
02836  *      thr.thread_variable_set(key, value)
02837  *
02838  *  Sets a thread local with +key+ to +value+.  Note that these are local to
02839  *  threads, and not to fibers.  Please see Thread#thread_variable_get and
02840  *  Thread#[] for more information.
02841  */
02842 
02843 static VALUE
02844 rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
02845 {
02846     VALUE locals;
02847     rb_thread_t *th;
02848 
02849     GetThreadPtr(thread, th);
02850 
02851     if (rb_safe_level() >= 4 && th != GET_THREAD()) {
02852         rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
02853     }
02854     if (OBJ_FROZEN(thread)) {
02855         rb_error_frozen("thread locals");
02856     }
02857 
02858     locals = rb_iv_get(thread, "locals");
02859     return rb_hash_aset(locals, ID2SYM(rb_to_id(id)), val);
02860 }
02861 
02862 /*
02863  *  call-seq:
02864  *     thr.key?(sym)   -> true or false
02865  *
02866  *  Returns <code>true</code> if the given string (or symbol) exists as a
02867  *  fiber-local variable.
02868  *
02869  *     me = Thread.current
02870  *     me[:oliver] = "a"
02871  *     me.key?(:oliver)    #=> true
02872  *     me.key?(:stanley)   #=> false
02873  */
02874 
02875 static VALUE
02876 rb_thread_key_p(VALUE self, VALUE key)
02877 {
02878     rb_thread_t *th;
02879     ID id = rb_to_id(key);
02880 
02881     GetThreadPtr(self, th);
02882 
02883     if (!th->local_storage) {
02884         return Qfalse;
02885     }
02886     if (st_lookup(th->local_storage, id, 0)) {
02887         return Qtrue;
02888     }
02889     return Qfalse;
02890 }
02891 
02892 static int
02893 thread_keys_i(ID key, VALUE value, VALUE ary)
02894 {
02895     rb_ary_push(ary, ID2SYM(key));
02896     return ST_CONTINUE;
02897 }
02898 
02899 static int
02900 vm_living_thread_num(rb_vm_t *vm)
02901 {
02902     return (int)vm->living_threads->num_entries;
02903 }
02904 
02905 int
02906 rb_thread_alone(void)
02907 {
02908     int num = 1;
02909     if (GET_THREAD()->vm->living_threads) {
02910         num = vm_living_thread_num(GET_THREAD()->vm);
02911         thread_debug("rb_thread_alone: %d\n", num);
02912     }
02913     return num == 1;
02914 }
02915 
02916 /*
02917  *  call-seq:
02918  *     thr.keys   -> array
02919  *
02920  *  Returns an an array of the names of the fiber-local variables (as Symbols).
02921  *
02922  *     thr = Thread.new do
02923  *       Thread.current[:cat] = 'meow'
02924  *       Thread.current["dog"] = 'woof'
02925  *     end
02926  *     thr.join   #=> #<Thread:0x401b3f10 dead>
02927  *     thr.keys   #=> [:dog, :cat]
02928  */
02929 
02930 static VALUE
02931 rb_thread_keys(VALUE self)
02932 {
02933     rb_thread_t *th;
02934     VALUE ary = rb_ary_new();
02935     GetThreadPtr(self, th);
02936 
02937     if (th->local_storage) {
02938         st_foreach(th->local_storage, thread_keys_i, ary);
02939     }
02940     return ary;
02941 }
02942 
02943 static int
02944 keys_i(VALUE key, VALUE value, VALUE ary)
02945 {
02946     rb_ary_push(ary, key);
02947     return ST_CONTINUE;
02948 }
02949 
02950 /*
02951  *  call-seq:
02952  *     thr.thread_variables   -> array
02953  *
02954  *  Returns an an array of the names of the thread-local variables (as Symbols).
02955  *
02956  *     thr = Thread.new do
02957  *       Thread.current.thread_variable_set(:cat, 'meow')
02958  *       Thread.current.thread_variable_set("dog", 'woof')
02959  *     end
02960  *     thr.join               #=> #<Thread:0x401b3f10 dead>
02961  *     thr.thread_variables   #=> [:dog, :cat]
02962  *
02963  *  Note that these are not fiber local variables.  Please see Thread#[] and
02964  *  Thread#thread_variable_get for more details.
02965  */
02966 
02967 static VALUE
02968 rb_thread_variables(VALUE thread)
02969 {
02970     VALUE locals;
02971     VALUE ary;
02972 
02973     locals = rb_iv_get(thread, "locals");
02974     ary = rb_ary_new();
02975     rb_hash_foreach(locals, keys_i, ary);
02976 
02977     return ary;
02978 }
02979 
02980 /*
02981  *  call-seq:
02982  *     thr.thread_variable?(key)   -> true or false
02983  *
02984  *  Returns <code>true</code> if the given string (or symbol) exists as a
02985  *  thread-local variable.
02986  *
02987  *     me = Thread.current
02988  *     me.thread_variable_set(:oliver, "a")
02989  *     me.thread_variable?(:oliver)    #=> true
02990  *     me.thread_variable?(:stanley)   #=> false
02991  *
02992  *  Note that these are not fiber local variables.  Please see Thread#[] and
02993  *  Thread#thread_variable_get for more details.
02994  */
02995 
02996 static VALUE
02997 rb_thread_variable_p(VALUE thread, VALUE key)
02998 {
02999     VALUE locals;
03000 
03001     locals = rb_iv_get(thread, "locals");
03002 
03003     if (!RHASH(locals)->ntbl)
03004         return Qfalse;
03005 
03006     if (st_lookup(RHASH(locals)->ntbl, ID2SYM(rb_to_id(key)), 0)) {
03007         return Qtrue;
03008     }
03009 
03010     return Qfalse;
03011 }
03012 
03013 /*
03014  *  call-seq:
03015  *     thr.priority   -> integer
03016  *
03017  *  Returns the priority of <i>thr</i>. Default is inherited from the
03018  *  current thread which creating the new thread, or zero for the
03019  *  initial main thread; higher-priority thread will run more frequently
03020  *  than lower-priority threads (but lower-priority threads can also run).
03021  *
03022  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
03023  *  platform.
03024  *
03025  *     Thread.current.priority   #=> 0
03026  */
03027 
03028 static VALUE
03029 rb_thread_priority(VALUE thread)
03030 {
03031     rb_thread_t *th;
03032     GetThreadPtr(thread, th);
03033     return INT2NUM(th->priority);
03034 }
03035 
03036 
03037 /*
03038  *  call-seq:
03039  *     thr.priority= integer   -> thr
03040  *
03041  *  Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
03042  *  will run more frequently than lower-priority threads (but lower-priority
03043  *  threads can also run).
03044  *
03045  *  This is just hint for Ruby thread scheduler.  It may be ignored on some
03046  *  platform.
03047  *
03048  *     count1 = count2 = 0
03049  *     a = Thread.new do
03050  *           loop { count1 += 1 }
03051  *         end
03052  *     a.priority = -1
03053  *
03054  *     b = Thread.new do
03055  *           loop { count2 += 1 }
03056  *         end
03057  *     b.priority = -2
03058  *     sleep 1   #=> 1
03059  *     count1    #=> 622504
03060  *     count2    #=> 5832
03061  */
03062 
03063 static VALUE
03064 rb_thread_priority_set(VALUE thread, VALUE prio)
03065 {
03066     rb_thread_t *th;
03067     int priority;
03068     GetThreadPtr(thread, th);
03069 
03070     rb_secure(4);
03071 
03072 #if USE_NATIVE_THREAD_PRIORITY
03073     th->priority = NUM2INT(prio);
03074     native_thread_apply_priority(th);
03075 #else
03076     priority = NUM2INT(prio);
03077     if (priority > RUBY_THREAD_PRIORITY_MAX) {
03078         priority = RUBY_THREAD_PRIORITY_MAX;
03079     }
03080     else if (priority < RUBY_THREAD_PRIORITY_MIN) {
03081         priority = RUBY_THREAD_PRIORITY_MIN;
03082     }
03083     th->priority = priority;
03084 #endif
03085     return INT2NUM(th->priority);
03086 }
03087 
03088 /* for IO */
03089 
03090 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
03091 
03092 /*
03093  * several Unix platforms support file descriptors bigger than FD_SETSIZE
03094  * in select(2) system call.
03095  *
03096  * - Linux 2.2.12 (?)
03097  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
03098  *   select(2) documents how to allocate fd_set dynamically.
03099  *   http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
03100  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
03101  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
03102  *   select(2) documents how to allocate fd_set dynamically.
03103  *   http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
03104  * - HP-UX documents how to allocate fd_set dynamically.
03105  *   http://docs.hp.com/en/B2355-60105/select.2.html
03106  * - Solaris 8 has select_large_fdset
03107  * - Mac OS X 10.7 (Lion)
03108  *   select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
03109  *   _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
03110  *   http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
03111  *
03112  * When fd_set is not big enough to hold big file descriptors,
03113  * it should be allocated dynamically.
03114  * Note that this assumes fd_set is structured as bitmap.
03115  *
03116  * rb_fd_init allocates the memory.
03117  * rb_fd_term free the memory.
03118  * rb_fd_set may re-allocates bitmap.
03119  *
03120  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
03121  */
03122 
03123 void
03124 rb_fd_init(rb_fdset_t *fds)
03125 {
03126     fds->maxfd = 0;
03127     fds->fdset = ALLOC(fd_set);
03128     FD_ZERO(fds->fdset);
03129 }
03130 
03131 void
03132 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
03133 {
03134     size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
03135 
03136     if (size < sizeof(fd_set))
03137         size = sizeof(fd_set);
03138     dst->maxfd = src->maxfd;
03139     dst->fdset = xmalloc(size);
03140     memcpy(dst->fdset, src->fdset, size);
03141 }
03142 
03143 void
03144 rb_fd_term(rb_fdset_t *fds)
03145 {
03146     if (fds->fdset) xfree(fds->fdset);
03147     fds->maxfd = 0;
03148     fds->fdset = 0;
03149 }
03150 
03151 void
03152 rb_fd_zero(rb_fdset_t *fds)
03153 {
03154     if (fds->fdset)
03155         MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
03156 }
03157 
03158 static void
03159 rb_fd_resize(int n, rb_fdset_t *fds)
03160 {
03161     size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
03162     size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
03163 
03164     if (m < sizeof(fd_set)) m = sizeof(fd_set);
03165     if (o < sizeof(fd_set)) o = sizeof(fd_set);
03166 
03167     if (m > o) {
03168         fds->fdset = xrealloc(fds->fdset, m);
03169         memset((char *)fds->fdset + o, 0, m - o);
03170     }
03171     if (n >= fds->maxfd) fds->maxfd = n + 1;
03172 }
03173 
03174 void
03175 rb_fd_set(int n, rb_fdset_t *fds)
03176 {
03177     rb_fd_resize(n, fds);
03178     FD_SET(n, fds->fdset);
03179 }
03180 
03181 void
03182 rb_fd_clr(int n, rb_fdset_t *fds)
03183 {
03184     if (n >= fds->maxfd) return;
03185     FD_CLR(n, fds->fdset);
03186 }
03187 
03188 int
03189 rb_fd_isset(int n, const rb_fdset_t *fds)
03190 {
03191     if (n >= fds->maxfd) return 0;
03192     return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
03193 }
03194 
03195 void
03196 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
03197 {
03198     size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
03199 
03200     if (size < sizeof(fd_set)) size = sizeof(fd_set);
03201     dst->maxfd = max;
03202     dst->fdset = xrealloc(dst->fdset, size);
03203     memcpy(dst->fdset, src, size);
03204 }
03205 
03206 static void
03207 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
03208 {
03209     size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
03210 
03211     if (size > sizeof(fd_set)) {
03212         rb_raise(rb_eArgError, "too large fdsets");
03213     }
03214     memcpy(dst, rb_fd_ptr(src), sizeof(fd_set));
03215 }
03216 
03217 void
03218 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
03219 {
03220     size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
03221 
03222     if (size < sizeof(fd_set))
03223         size = sizeof(fd_set);
03224     dst->maxfd = src->maxfd;
03225     dst->fdset = xrealloc(dst->fdset, size);
03226     memcpy(dst->fdset, src->fdset, size);
03227 }
03228 
03229 #ifdef __native_client__
03230 int select(int nfds, fd_set *readfds, fd_set *writefds,
03231            fd_set *exceptfds, struct timeval *timeout);
03232 #endif
03233 
03234 int
03235 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
03236 {
03237     fd_set *r = NULL, *w = NULL, *e = NULL;
03238     if (readfds) {
03239         rb_fd_resize(n - 1, readfds);
03240         r = rb_fd_ptr(readfds);
03241     }
03242     if (writefds) {
03243         rb_fd_resize(n - 1, writefds);
03244         w = rb_fd_ptr(writefds);
03245     }
03246     if (exceptfds) {
03247         rb_fd_resize(n - 1, exceptfds);
03248         e = rb_fd_ptr(exceptfds);
03249     }
03250     return select(n, r, w, e, timeout);
03251 }
03252 
03253 #undef FD_ZERO
03254 #undef FD_SET
03255 #undef FD_CLR
03256 #undef FD_ISSET
03257 
03258 #define FD_ZERO(f)      rb_fd_zero(f)
03259 #define FD_SET(i, f)    rb_fd_set((i), (f))
03260 #define FD_CLR(i, f)    rb_fd_clr((i), (f))
03261 #define FD_ISSET(i, f)  rb_fd_isset((i), (f))
03262 
03263 #elif defined(_WIN32)
03264 
03265 void
03266 rb_fd_init(rb_fdset_t *set)
03267 {
03268     set->capa = FD_SETSIZE;
03269     set->fdset = ALLOC(fd_set);
03270     FD_ZERO(set->fdset);
03271 }
03272 
03273 void
03274 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
03275 {
03276     rb_fd_init(dst);
03277     rb_fd_dup(dst, src);
03278 }
03279 
03280 static void
03281 rb_fd_rcopy(fd_set *dst, rb_fdset_t *src)
03282 {
03283     int max = rb_fd_max(src);
03284 
03285     /* we assume src is the result of select() with dst, so dst should be
03286      * larger or equal than src. */
03287     if (max > FD_SETSIZE || (UINT)max > dst->fd_count) {
03288         rb_raise(rb_eArgError, "too large fdsets");
03289     }
03290 
03291     memcpy(dst->fd_array, src->fdset->fd_array, max);
03292     dst->fd_count = max;
03293 }
03294 
03295 void
03296 rb_fd_term(rb_fdset_t *set)
03297 {
03298     xfree(set->fdset);
03299     set->fdset = NULL;
03300     set->capa = 0;
03301 }
03302 
03303 void
03304 rb_fd_set(int fd, rb_fdset_t *set)
03305 {
03306     unsigned int i;
03307     SOCKET s = rb_w32_get_osfhandle(fd);
03308 
03309     for (i = 0; i < set->fdset->fd_count; i++) {
03310         if (set->fdset->fd_array[i] == s) {
03311             return;
03312         }
03313     }
03314     if (set->fdset->fd_count >= (unsigned)set->capa) {
03315         set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
03316         set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
03317     }
03318     set->fdset->fd_array[set->fdset->fd_count++] = s;
03319 }
03320 
03321 #undef FD_ZERO
03322 #undef FD_SET
03323 #undef FD_CLR
03324 #undef FD_ISSET
03325 
03326 #define FD_ZERO(f)      rb_fd_zero(f)
03327 #define FD_SET(i, f)    rb_fd_set((i), (f))
03328 #define FD_CLR(i, f)    rb_fd_clr((i), (f))
03329 #define FD_ISSET(i, f)  rb_fd_isset((i), (f))
03330 
03331 #else
03332 #define rb_fd_rcopy(d, s) (*(d) = *(s))
03333 #endif
03334 
03335 static int
03336 do_select(int n, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except,
03337           struct timeval *timeout)
03338 {
03339     int UNINITIALIZED_VAR(result);
03340     int lerrno;
03341     rb_fdset_t UNINITIALIZED_VAR(orig_read);
03342     rb_fdset_t UNINITIALIZED_VAR(orig_write);
03343     rb_fdset_t UNINITIALIZED_VAR(orig_except);
03344     double limit = 0;
03345     struct timeval wait_rest;
03346     rb_thread_t *th = GET_THREAD();
03347 
03348     if (timeout) {
03349         limit = timeofday();
03350         limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
03351         wait_rest = *timeout;
03352         timeout = &wait_rest;
03353     }
03354 
03355     if (read)
03356         rb_fd_init_copy(&orig_read, read);
03357     if (write)
03358         rb_fd_init_copy(&orig_write, write);
03359     if (except)
03360         rb_fd_init_copy(&orig_except, except);
03361 
03362   retry:
03363     lerrno = 0;
03364 
03365     BLOCKING_REGION({
03366             result = native_fd_select(n, read, write, except, timeout, th);
03367             if (result < 0) lerrno = errno;
03368         }, ubf_select, th, FALSE);
03369 
03370     RUBY_VM_CHECK_INTS_BLOCKING(th);
03371 
03372     errno = lerrno;
03373 
03374     if (result < 0) {
03375         switch (errno) {
03376           case EINTR:
03377 #ifdef ERESTART
03378           case ERESTART:
03379 #endif
03380             if (read)
03381                 rb_fd_dup(read, &orig_read);
03382             if (write)
03383                 rb_fd_dup(write, &orig_write);
03384             if (except)
03385                 rb_fd_dup(except, &orig_except);
03386 
03387             if (timeout) {
03388                 double d = limit - timeofday();
03389 
03390                 wait_rest.tv_sec = (time_t)d;
03391                 wait_rest.tv_usec = (int)((d-(double)wait_rest.tv_sec)*1e6);
03392                 if (wait_rest.tv_sec < 0)  wait_rest.tv_sec = 0;
03393                 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
03394             }
03395 
03396             goto retry;
03397           default:
03398             break;
03399         }
03400     }
03401 
03402     if (read)
03403         rb_fd_term(&orig_read);
03404     if (write)
03405         rb_fd_term(&orig_write);
03406     if (except)
03407         rb_fd_term(&orig_except);
03408 
03409     return result;
03410 }
03411 
03412 static void
03413 rb_thread_wait_fd_rw(int fd, int read)
03414 {
03415     int result = 0;
03416     int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
03417 
03418     thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
03419 
03420     if (fd < 0) {
03421         rb_raise(rb_eIOError, "closed stream");
03422     }
03423 
03424     result = rb_wait_for_single_fd(fd, events, NULL);
03425     if (result < 0) {
03426         rb_sys_fail(0);
03427     }
03428 
03429     thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
03430 }
03431 
03432 void
03433 rb_thread_wait_fd(int fd)
03434 {
03435     rb_thread_wait_fd_rw(fd, 1);
03436 }
03437 
03438 int
03439 rb_thread_fd_writable(int fd)
03440 {
03441     rb_thread_wait_fd_rw(fd, 0);
03442     return TRUE;
03443 }
03444 
03445 int
03446 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
03447                  struct timeval *timeout)
03448 {
03449     rb_fdset_t fdsets[3];
03450     rb_fdset_t *rfds = NULL;
03451     rb_fdset_t *wfds = NULL;
03452     rb_fdset_t *efds = NULL;
03453     int retval;
03454 
03455     if (read) {
03456         rfds = &fdsets[0];
03457         rb_fd_init(rfds);
03458         rb_fd_copy(rfds, read, max);
03459     }
03460     if (write) {
03461         wfds = &fdsets[1];
03462         rb_fd_init(wfds);
03463         rb_fd_copy(wfds, write, max);
03464     }
03465     if (except) {
03466         efds = &fdsets[2];
03467         rb_fd_init(efds);
03468         rb_fd_copy(efds, except, max);
03469     }
03470 
03471     retval = rb_thread_fd_select(max, rfds, wfds, efds, timeout);
03472 
03473     if (rfds) {
03474         rb_fd_rcopy(read, rfds);
03475         rb_fd_term(rfds);
03476     }
03477     if (wfds) {
03478         rb_fd_rcopy(write, wfds);
03479         rb_fd_term(wfds);
03480     }
03481     if (efds) {
03482         rb_fd_rcopy(except, efds);
03483         rb_fd_term(efds);
03484     }
03485 
03486     return retval;
03487 }
03488 
03489 int
03490 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
03491                     struct timeval *timeout)
03492 {
03493     if (!read && !write && !except) {
03494         if (!timeout) {
03495             rb_thread_sleep_forever();
03496             return 0;
03497         }
03498         rb_thread_wait_for(*timeout);
03499         return 0;
03500     }
03501 
03502     if (read) {
03503         rb_fd_resize(max - 1, read);
03504     }
03505     if (write) {
03506         rb_fd_resize(max - 1, write);
03507     }
03508     if (except) {
03509         rb_fd_resize(max - 1, except);
03510     }
03511     return do_select(max, read, write, except, timeout);
03512 }
03513 
03514 /*
03515  * poll() is supported by many OSes, but so far Linux is the only
03516  * one we know of that supports using poll() in all places select()
03517  * would work.
03518  */
03519 #if defined(HAVE_POLL) && defined(__linux__)
03520 #  define USE_POLL
03521 #endif
03522 
03523 #ifdef USE_POLL
03524 
03525 /* The same with linux kernel. TODO: make platform independent definition. */
03526 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
03527 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
03528 #define POLLEX_SET (POLLPRI)
03529 
03530 #ifndef HAVE_PPOLL
03531 /* TODO: don't ignore sigmask */
03532 int
03533 ppoll(struct pollfd *fds, nfds_t nfds,
03534       const struct timespec *ts, const sigset_t *sigmask)
03535 {
03536     int timeout_ms;
03537 
03538     if (ts) {
03539         int tmp, tmp2;
03540 
03541         if (ts->tv_sec > TIMET_MAX/1000)
03542             timeout_ms = -1;
03543         else {
03544             tmp = ts->tv_sec * 1000;
03545             tmp2 = ts->tv_nsec / (1000 * 1000);
03546             if (TIMET_MAX - tmp < tmp2)
03547                 timeout_ms = -1;
03548             else
03549                 timeout_ms = tmp + tmp2;
03550         }
03551     }
03552     else
03553         timeout_ms = -1;
03554 
03555     return poll(fds, nfds, timeout_ms);
03556 }
03557 #endif
03558 
03559 /*
03560  * returns a mask of events
03561  */
03562 int
03563 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
03564 {
03565     struct pollfd fds;
03566     int result = 0, lerrno;
03567     double limit = 0;
03568     struct timespec ts;
03569     struct timespec *timeout = NULL;
03570     rb_thread_t *th = GET_THREAD();
03571 
03572     if (tv) {
03573         ts.tv_sec = tv->tv_sec;
03574         ts.tv_nsec = tv->tv_usec * 1000;
03575         limit = timeofday();
03576         limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
03577         timeout = &ts;
03578     }
03579 
03580     fds.fd = fd;
03581     fds.events = (short)events;
03582 
03583 retry:
03584     lerrno = 0;
03585     BLOCKING_REGION({
03586         result = ppoll(&fds, 1, timeout, NULL);
03587         if (result < 0) lerrno = errno;
03588     }, ubf_select, th, FALSE);
03589 
03590     RUBY_VM_CHECK_INTS_BLOCKING(th);
03591 
03592     if (result < 0) {
03593         errno = lerrno;
03594         switch (errno) {
03595           case EINTR:
03596 #ifdef ERESTART
03597           case ERESTART:
03598 #endif
03599             if (timeout) {
03600                 double d = limit - timeofday();
03601 
03602                 ts.tv_sec = (long)d;
03603                 ts.tv_nsec = (long)((d - (double)ts.tv_sec) * 1e9);
03604                 if (ts.tv_sec < 0)
03605                     ts.tv_sec = 0;
03606                 if (ts.tv_nsec < 0)
03607                     ts.tv_nsec = 0;
03608             }
03609             goto retry;
03610         }
03611         return -1;
03612     }
03613 
03614     if (fds.revents & POLLNVAL) {
03615         errno = EBADF;
03616         return -1;
03617     }
03618 
03619     /*
03620      * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
03621      * Therefore we need fix it up.
03622      */
03623     result = 0;
03624     if (fds.revents & POLLIN_SET)
03625         result |= RB_WAITFD_IN;
03626     if (fds.revents & POLLOUT_SET)
03627         result |= RB_WAITFD_OUT;
03628     if (fds.revents & POLLEX_SET)
03629         result |= RB_WAITFD_PRI;
03630 
03631     return result;
03632 }
03633 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
03634 static rb_fdset_t *
03635 init_set_fd(int fd, rb_fdset_t *fds)
03636 {
03637     rb_fd_init(fds);
03638     rb_fd_set(fd, fds);
03639 
03640     return fds;
03641 }
03642 
03643 struct select_args {
03644     union {
03645         int fd;
03646         int error;
03647     } as;
03648     rb_fdset_t *read;
03649     rb_fdset_t *write;
03650     rb_fdset_t *except;
03651     struct timeval *tv;
03652 };
03653 
03654 static VALUE
03655 select_single(VALUE ptr)
03656 {
03657     struct select_args *args = (struct select_args *)ptr;
03658     int r;
03659 
03660     r = rb_thread_fd_select(args->as.fd + 1,
03661                             args->read, args->write, args->except, args->tv);
03662     if (r == -1)
03663         args->as.error = errno;
03664     if (r > 0) {
03665         r = 0;
03666         if (args->read && rb_fd_isset(args->as.fd, args->read))
03667             r |= RB_WAITFD_IN;
03668         if (args->write && rb_fd_isset(args->as.fd, args->write))
03669             r |= RB_WAITFD_OUT;
03670         if (args->except && rb_fd_isset(args->as.fd, args->except))
03671             r |= RB_WAITFD_PRI;
03672     }
03673     return (VALUE)r;
03674 }
03675 
03676 static VALUE
03677 select_single_cleanup(VALUE ptr)
03678 {
03679     struct select_args *args = (struct select_args *)ptr;
03680 
03681     if (args->read) rb_fd_term(args->read);
03682     if (args->write) rb_fd_term(args->write);
03683     if (args->except) rb_fd_term(args->except);
03684 
03685     return (VALUE)-1;
03686 }
03687 
03688 int
03689 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
03690 {
03691     rb_fdset_t rfds, wfds, efds;
03692     struct select_args args;
03693     int r;
03694     VALUE ptr = (VALUE)&args;
03695 
03696     args.as.fd = fd;
03697     args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
03698     args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
03699     args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
03700     args.tv = tv;
03701 
03702     r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
03703     if (r == -1)
03704         errno = args.as.error;
03705 
03706     return r;
03707 }
03708 #endif /* ! USE_POLL */
03709 
03710 /*
03711  * for GC
03712  */
03713 
03714 #ifdef USE_CONSERVATIVE_STACK_END
03715 void
03716 rb_gc_set_stack_end(VALUE **stack_end_p)
03717 {
03718     VALUE stack_end;
03719     *stack_end_p = &stack_end;
03720 }
03721 #endif
03722 
03723 
03724 /*
03725  *
03726  */
03727 
03728 void
03729 rb_threadptr_check_signal(rb_thread_t *mth)
03730 {
03731     /* mth must be main_thread */
03732     if (rb_signal_buff_size() > 0) {
03733         /* wakeup main thread */
03734         rb_threadptr_trap_interrupt(mth);
03735     }
03736 }
03737 
03738 static void
03739 timer_thread_function(void *arg)
03740 {
03741     rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
03742 
03743     /*
03744      * Tricky: thread_destruct_lock doesn't close a race against
03745      * vm->running_thread switch. however it guarantee th->running_thread
03746      * point to valid pointer or NULL.
03747      */
03748     native_mutex_lock(&vm->thread_destruct_lock);
03749     /* for time slice */
03750     if (vm->running_thread)
03751         RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
03752     native_mutex_unlock(&vm->thread_destruct_lock);
03753 
03754     /* check signal */
03755     rb_threadptr_check_signal(vm->main_thread);
03756 
03757 #if 0
03758     /* prove profiler */
03759     if (vm->prove_profile.enable) {
03760         rb_thread_t *th = vm->running_thread;
03761 
03762         if (vm->during_gc) {
03763             /* GC prove profiling */
03764         }
03765     }
03766 #endif
03767 }
03768 
03769 void
03770 rb_thread_stop_timer_thread(int close_anyway)
03771 {
03772     if (timer_thread_id && native_stop_timer_thread(close_anyway)) {
03773         native_reset_timer_thread();
03774     }
03775 }
03776 
03777 void
03778 rb_thread_reset_timer_thread(void)
03779 {
03780     native_reset_timer_thread();
03781 }
03782 
03783 void
03784 rb_thread_start_timer_thread(void)
03785 {
03786     system_working = 1;
03787     rb_thread_create_timer_thread();
03788 }
03789 
03790 static int
03791 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
03792 {
03793     int i;
03794     VALUE lines = (VALUE)val;
03795 
03796     for (i = 0; i < RARRAY_LEN(lines); i++) {
03797         if (RARRAY_PTR(lines)[i] != Qnil) {
03798             RARRAY_PTR(lines)[i] = INT2FIX(0);
03799         }
03800     }
03801     return ST_CONTINUE;
03802 }
03803 
03804 static void
03805 clear_coverage(void)
03806 {
03807     VALUE coverages = rb_get_coverages();
03808     if (RTEST(coverages)) {
03809         st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
03810     }
03811 }
03812 
03813 static void
03814 rb_thread_atfork_internal(int (*atfork)(st_data_t, st_data_t, st_data_t))
03815 {
03816     rb_thread_t *th = GET_THREAD();
03817     rb_vm_t *vm = th->vm;
03818     VALUE thval = th->self;
03819     vm->main_thread = th;
03820 
03821     gvl_atfork(th->vm);
03822     st_foreach(vm->living_threads, atfork, (st_data_t)th);
03823     st_clear(vm->living_threads);
03824     st_insert(vm->living_threads, thval, (st_data_t)th->thread_id);
03825     vm->sleeper = 0;
03826     clear_coverage();
03827 }
03828 
03829 static int
03830 terminate_atfork_i(st_data_t key, st_data_t val, st_data_t current_th)
03831 {
03832     VALUE thval = key;
03833     rb_thread_t *th;
03834     GetThreadPtr(thval, th);
03835 
03836     if (th != (rb_thread_t *)current_th) {
03837         rb_mutex_abandon_keeping_mutexes(th);
03838         rb_mutex_abandon_locking_mutex(th);
03839         thread_cleanup_func(th, TRUE);
03840     }
03841     return ST_CONTINUE;
03842 }
03843 
03844 void
03845 rb_thread_atfork(void)
03846 {
03847     rb_thread_atfork_internal(terminate_atfork_i);
03848     GET_THREAD()->join_list = NULL;
03849 
03850     /* We don't want reproduce CVE-2003-0900. */
03851     rb_reset_random_seed();
03852 }
03853 
03854 static int
03855 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, st_data_t current_th)
03856 {
03857     VALUE thval = key;
03858     rb_thread_t *th;
03859     GetThreadPtr(thval, th);
03860 
03861     if (th != (rb_thread_t *)current_th) {
03862         thread_cleanup_func_before_exec(th);
03863     }
03864     return ST_CONTINUE;
03865 }
03866 
03867 void
03868 rb_thread_atfork_before_exec(void)
03869 {
03870     rb_thread_atfork_internal(terminate_atfork_before_exec_i);
03871 }
03872 
03873 struct thgroup {
03874     int enclosed;
03875     VALUE group;
03876 };
03877 
03878 static size_t
03879 thgroup_memsize(const void *ptr)
03880 {
03881     return ptr ? sizeof(struct thgroup) : 0;
03882 }
03883 
03884 static const rb_data_type_t thgroup_data_type = {
03885     "thgroup",
03886     {NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
03887 };
03888 
03889 /*
03890  * Document-class: ThreadGroup
03891  *
03892  *  <code>ThreadGroup</code> provides a means of keeping track of a number of
03893  *  threads as a group. A <code>Thread</code> can belong to only one
03894  *  <code>ThreadGroup</code> at a time; adding a thread to a new group will
03895  *  remove it from any previous group.
03896  *
03897  *  Newly created threads belong to the same group as the thread from which they
03898  *  were created.
03899  */
03900 
03901 /*
03902  * Document-const: Default
03903  *
03904  *  The default ThreadGroup created when Ruby starts; all Threads belong to it
03905  *  by default.
03906  */
03907 static VALUE
03908 thgroup_s_alloc(VALUE klass)
03909 {
03910     VALUE group;
03911     struct thgroup *data;
03912 
03913     group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
03914     data->enclosed = 0;
03915     data->group = group;
03916 
03917     return group;
03918 }
03919 
03920 struct thgroup_list_params {
03921     VALUE ary;
03922     VALUE group;
03923 };
03924 
03925 static int
03926 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
03927 {
03928     VALUE thread = (VALUE)key;
03929     VALUE ary = ((struct thgroup_list_params *)data)->ary;
03930     VALUE group = ((struct thgroup_list_params *)data)->group;
03931     rb_thread_t *th;
03932     GetThreadPtr(thread, th);
03933 
03934     if (th->thgroup == group) {
03935         rb_ary_push(ary, thread);
03936     }
03937     return ST_CONTINUE;
03938 }
03939 
03940 /*
03941  *  call-seq:
03942  *     thgrp.list   -> array
03943  *
03944  *  Returns an array of all existing <code>Thread</code> objects that belong to
03945  *  this group.
03946  *
03947  *     ThreadGroup::Default.list   #=> [#<Thread:0x401bdf4c run>]
03948  */
03949 
03950 static VALUE
03951 thgroup_list(VALUE group)
03952 {
03953     VALUE ary = rb_ary_new();
03954     struct thgroup_list_params param;
03955 
03956     param.ary = ary;
03957     param.group = group;
03958     st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
03959     return ary;
03960 }
03961 
03962 
03963 /*
03964  *  call-seq:
03965  *     thgrp.enclose   -> thgrp
03966  *
03967  *  Prevents threads from being added to or removed from the receiving
03968  *  <code>ThreadGroup</code>. New threads can still be started in an enclosed
03969  *  <code>ThreadGroup</code>.
03970  *
03971  *     ThreadGroup::Default.enclose        #=> #<ThreadGroup:0x4029d914>
03972  *     thr = Thread::new { Thread.stop }   #=> #<Thread:0x402a7210 sleep>
03973  *     tg = ThreadGroup::new               #=> #<ThreadGroup:0x402752d4>
03974  *     tg.add thr
03975  *
03976  *  <em>produces:</em>
03977  *
03978  *     ThreadError: can't move from the enclosed thread group
03979  */
03980 
03981 static VALUE
03982 thgroup_enclose(VALUE group)
03983 {
03984     struct thgroup *data;
03985 
03986     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
03987     data->enclosed = 1;
03988 
03989     return group;
03990 }
03991 
03992 
03993 /*
03994  *  call-seq:
03995  *     thgrp.enclosed?   -> true or false
03996  *
03997  *  Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
03998  *  ThreadGroup#enclose.
03999  */
04000 
04001 static VALUE
04002 thgroup_enclosed_p(VALUE group)
04003 {
04004     struct thgroup *data;
04005 
04006     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
04007     if (data->enclosed)
04008         return Qtrue;
04009     return Qfalse;
04010 }
04011 
04012 
04013 /*
04014  *  call-seq:
04015  *     thgrp.add(thread)   -> thgrp
04016  *
04017  *  Adds the given <em>thread</em> to this group, removing it from any other
04018  *  group to which it may have previously belonged.
04019  *
04020  *     puts "Initial group is #{ThreadGroup::Default.list}"
04021  *     tg = ThreadGroup.new
04022  *     t1 = Thread.new { sleep }
04023  *     t2 = Thread.new { sleep }
04024  *     puts "t1 is #{t1}"
04025  *     puts "t2 is #{t2}"
04026  *     tg.add(t1)
04027  *     puts "Initial group now #{ThreadGroup::Default.list}"
04028  *     puts "tg group now #{tg.list}"
04029  *
04030  *  <em>produces:</em>
04031  *
04032  *     Initial group is #<Thread:0x401bdf4c>
04033  *     t1 is #<Thread:0x401b3c90>
04034  *     t2 is #<Thread:0x401b3c18>
04035  *     Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
04036  *     tg group now #<Thread:0x401b3c90>
04037  */
04038 
04039 static VALUE
04040 thgroup_add(VALUE group, VALUE thread)
04041 {
04042     rb_thread_t *th;
04043     struct thgroup *data;
04044 
04045     rb_secure(4);
04046     GetThreadPtr(thread, th);
04047 
04048     if (OBJ_FROZEN(group)) {
04049         rb_raise(rb_eThreadError, "can't move to the frozen thread group");
04050     }
04051     TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
04052     if (data->enclosed) {
04053         rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
04054     }
04055 
04056     if (!th->thgroup) {
04057         return Qnil;
04058     }
04059 
04060     if (OBJ_FROZEN(th->thgroup)) {
04061         rb_raise(rb_eThreadError, "can't move from the frozen thread group");
04062     }
04063     TypedData_Get_Struct(th->thgroup, struct thgroup, &thgroup_data_type, data);
04064     if (data->enclosed) {
04065         rb_raise(rb_eThreadError,
04066                  "can't move from the enclosed thread group");
04067     }
04068 
04069     th->thgroup = group;
04070     return group;
04071 }
04072 
04073 
04074 /*
04075  *  Document-class: Mutex
04076  *
04077  *  Mutex implements a simple semaphore that can be used to coordinate access to
04078  *  shared data from multiple concurrent threads.
04079  *
04080  *  Example:
04081  *
04082  *    require 'thread'
04083  *    semaphore = Mutex.new
04084  *
04085  *    a = Thread.new {
04086  *      semaphore.synchronize {
04087  *        # access shared resource
04088  *      }
04089  *    }
04090  *
04091  *    b = Thread.new {
04092  *      semaphore.synchronize {
04093  *        # access shared resource
04094  *      }
04095  *    }
04096  *
04097  */
04098 
04099 #define GetMutexPtr(obj, tobj) \
04100     TypedData_Get_Struct((obj), rb_mutex_t, &mutex_data_type, (tobj))
04101 
04102 #define mutex_mark NULL
04103 
04104 static void
04105 mutex_free(void *ptr)
04106 {
04107     if (ptr) {
04108         rb_mutex_t *mutex = ptr;
04109         if (mutex->th) {
04110             /* rb_warn("free locked mutex"); */
04111             const char *err = rb_mutex_unlock_th(mutex, mutex->th);
04112             if (err) rb_bug("%s", err);
04113         }
04114         native_mutex_destroy(&mutex->lock);
04115         native_cond_destroy(&mutex->cond);
04116     }
04117     ruby_xfree(ptr);
04118 }
04119 
04120 static size_t
04121 mutex_memsize(const void *ptr)
04122 {
04123     return ptr ? sizeof(rb_mutex_t) : 0;
04124 }
04125 
04126 static const rb_data_type_t mutex_data_type = {
04127     "mutex",
04128     {mutex_mark, mutex_free, mutex_memsize,},
04129 };
04130 
04131 VALUE
04132 rb_obj_is_mutex(VALUE obj)
04133 {
04134     if (rb_typeddata_is_kind_of(obj, &mutex_data_type)) {
04135         return Qtrue;
04136     }
04137     else {
04138         return Qfalse;
04139     }
04140 }
04141 
04142 static VALUE
04143 mutex_alloc(VALUE klass)
04144 {
04145     VALUE volatile obj;
04146     rb_mutex_t *mutex;
04147 
04148     obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
04149     native_mutex_initialize(&mutex->lock);
04150     native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
04151     return obj;
04152 }
04153 
04154 /*
04155  *  call-seq:
04156  *     Mutex.new   -> mutex
04157  *
04158  *  Creates a new Mutex
04159  */
04160 static VALUE
04161 mutex_initialize(VALUE self)
04162 {
04163     return self;
04164 }
04165 
04166 VALUE
04167 rb_mutex_new(void)
04168 {
04169     return mutex_alloc(rb_cMutex);
04170 }
04171 
04172 /*
04173  * call-seq:
04174  *    mutex.locked?  -> true or false
04175  *
04176  * Returns +true+ if this lock is currently held by some thread.
04177  */
04178 VALUE
04179 rb_mutex_locked_p(VALUE self)
04180 {
04181     rb_mutex_t *mutex;
04182     GetMutexPtr(self, mutex);
04183     return mutex->th ? Qtrue : Qfalse;
04184 }
04185 
04186 static void
04187 mutex_locked(rb_thread_t *th, VALUE self)
04188 {
04189     rb_mutex_t *mutex;
04190     GetMutexPtr(self, mutex);
04191 
04192     if (th->keeping_mutexes) {
04193         mutex->next_mutex = th->keeping_mutexes;
04194     }
04195     th->keeping_mutexes = mutex;
04196 }
04197 
04198 /*
04199  * call-seq:
04200  *    mutex.try_lock  -> true or false
04201  *
04202  * Attempts to obtain the lock and returns immediately. Returns +true+ if the
04203  * lock was granted.
04204  */
04205 VALUE
04206 rb_mutex_trylock(VALUE self)
04207 {
04208     rb_mutex_t *mutex;
04209     VALUE locked = Qfalse;
04210     GetMutexPtr(self, mutex);
04211 
04212     native_mutex_lock(&mutex->lock);
04213     if (mutex->th == 0) {
04214         mutex->th = GET_THREAD();
04215         locked = Qtrue;
04216 
04217         mutex_locked(GET_THREAD(), self);
04218     }
04219     native_mutex_unlock(&mutex->lock);
04220 
04221     return locked;
04222 }
04223 
04224 static int
04225 lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
04226 {
04227     int interrupted = 0;
04228     int err = 0;
04229 
04230     mutex->cond_waiting++;
04231     for (;;) {
04232         if (!mutex->th) {
04233             mutex->th = th;
04234             break;
04235         }
04236         if (RUBY_VM_INTERRUPTED(th)) {
04237             interrupted = 1;
04238             break;
04239         }
04240         if (err == ETIMEDOUT) {
04241             interrupted = 2;
04242             break;
04243         }
04244 
04245         if (timeout_ms) {
04246             struct timespec timeout_rel;
04247             struct timespec timeout;
04248 
04249             timeout_rel.tv_sec = 0;
04250             timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
04251             timeout = native_cond_timeout(&mutex->cond, timeout_rel);
04252             err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
04253         }
04254         else {
04255             native_cond_wait(&mutex->cond, &mutex->lock);
04256             err = 0;
04257         }
04258     }
04259     mutex->cond_waiting--;
04260 
04261     return interrupted;
04262 }
04263 
04264 static void
04265 lock_interrupt(void *ptr)
04266 {
04267     rb_mutex_t *mutex = (rb_mutex_t *)ptr;
04268     native_mutex_lock(&mutex->lock);
04269     if (mutex->cond_waiting > 0)
04270         native_cond_broadcast(&mutex->cond);
04271     native_mutex_unlock(&mutex->lock);
04272 }
04273 
04274 /*
04275  * At maximum, only one thread can use cond_timedwait and watch deadlock
04276  * periodically. Multiple polling thread (i.e. concurrent deadlock check)
04277  * introduces new race conditions. [Bug #6278] [ruby-core:44275]
04278  */
04279 static const rb_thread_t *patrol_thread = NULL;
04280 
04281 /*
04282  * call-seq:
04283  *    mutex.lock  -> self
04284  *
04285  * Attempts to grab the lock and waits if it isn't available.
04286  * Raises +ThreadError+ if +mutex+ was locked by the current thread.
04287  */
04288 VALUE
04289 rb_mutex_lock(VALUE self)
04290 {
04291     rb_thread_t *th = GET_THREAD();
04292     rb_mutex_t *mutex;
04293     GetMutexPtr(self, mutex);
04294 
04295     /* When running trap handler */
04296     if (!mutex->allow_trap && th->interrupt_mask & TRAP_INTERRUPT_MASK) {
04297         rb_raise(rb_eThreadError, "can't be called from trap context");
04298     }
04299 
04300     if (rb_mutex_trylock(self) == Qfalse) {
04301         if (mutex->th == GET_THREAD()) {
04302             rb_raise(rb_eThreadError, "deadlock; recursive locking");
04303         }
04304 
04305         while (mutex->th != th) {
04306             int interrupted;
04307             enum rb_thread_status prev_status = th->status;
04308             volatile int timeout_ms = 0;
04309             struct rb_unblock_callback oldubf;
04310 
04311             set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
04312             th->status = THREAD_STOPPED_FOREVER;
04313             th->locking_mutex = self;
04314 
04315             native_mutex_lock(&mutex->lock);
04316             th->vm->sleeper++;
04317             /*
04318              * Carefully! while some contended threads are in lock_func(),
04319              * vm->sleepr is unstable value. we have to avoid both deadlock
04320              * and busy loop.
04321              */
04322             if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
04323                 !patrol_thread) {
04324                 timeout_ms = 100;
04325                 patrol_thread = th;
04326             }
04327 
04328             GVL_UNLOCK_BEGIN();
04329             interrupted = lock_func(th, mutex, (int)timeout_ms);
04330             native_mutex_unlock(&mutex->lock);
04331             GVL_UNLOCK_END();
04332 
04333             if (patrol_thread == th)
04334                 patrol_thread = NULL;
04335 
04336             reset_unblock_function(th, &oldubf);
04337 
04338             th->locking_mutex = Qfalse;
04339             if (mutex->th && interrupted == 2) {
04340                 rb_check_deadlock(th->vm);
04341             }
04342             if (th->status == THREAD_STOPPED_FOREVER) {
04343                 th->status = prev_status;
04344             }
04345             th->vm->sleeper--;
04346 
04347             if (mutex->th == th) mutex_locked(th, self);
04348 
04349             if (interrupted) {
04350                 RUBY_VM_CHECK_INTS_BLOCKING(th);
04351             }
04352         }
04353     }
04354     return self;
04355 }
04356 
04357 /*
04358  * call-seq:
04359  *    mutex.owned?  -> true or false
04360  *
04361  * Returns +true+ if this lock is currently held by current thread.
04362  * <em>This API is experimental, and subject to change.</em>
04363  */
04364 VALUE
04365 rb_mutex_owned_p(VALUE self)
04366 {
04367     VALUE owned = Qfalse;
04368     rb_thread_t *th = GET_THREAD();
04369     rb_mutex_t *mutex;
04370 
04371     GetMutexPtr(self, mutex);
04372 
04373     if (mutex->th == th)
04374         owned = Qtrue;
04375 
04376     return owned;
04377 }
04378 
04379 static const char *
04380 rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t volatile *th)
04381 {
04382     const char *err = NULL;
04383 
04384     native_mutex_lock(&mutex->lock);
04385 
04386     if (mutex->th == 0) {
04387         err = "Attempt to unlock a mutex which is not locked";
04388     }
04389     else if (mutex->th != th) {
04390         err = "Attempt to unlock a mutex which is locked by another thread";
04391     }
04392     else {
04393         mutex->th = 0;
04394         if (mutex->cond_waiting > 0)
04395             native_cond_signal(&mutex->cond);
04396     }
04397 
04398     native_mutex_unlock(&mutex->lock);
04399 
04400     if (!err) {
04401         rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
04402         while (*th_mutex != mutex) {
04403             th_mutex = &(*th_mutex)->next_mutex;
04404         }
04405         *th_mutex = mutex->next_mutex;
04406         mutex->next_mutex = NULL;
04407     }
04408 
04409     return err;
04410 }
04411 
04412 /*
04413  * call-seq:
04414  *    mutex.unlock    -> self
04415  *
04416  * Releases the lock.
04417  * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
04418  */
04419 VALUE
04420 rb_mutex_unlock(VALUE self)
04421 {
04422     const char *err;
04423     rb_mutex_t *mutex;
04424     GetMutexPtr(self, mutex);
04425 
04426     err = rb_mutex_unlock_th(mutex, GET_THREAD());
04427     if (err) rb_raise(rb_eThreadError, "%s", err);
04428 
04429     return self;
04430 }
04431 
04432 static void
04433 rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
04434 {
04435     if (th->keeping_mutexes) {
04436         rb_mutex_abandon_all(th->keeping_mutexes);
04437     }
04438     th->keeping_mutexes = NULL;
04439 }
04440 
04441 static void
04442 rb_mutex_abandon_locking_mutex(rb_thread_t *th)
04443 {
04444     rb_mutex_t *mutex;
04445 
04446     if (!th->locking_mutex) return;
04447 
04448     GetMutexPtr(th->locking_mutex, mutex);
04449     if (mutex->th == th)
04450         rb_mutex_abandon_all(mutex);
04451     th->locking_mutex = Qfalse;
04452 }
04453 
04454 static void
04455 rb_mutex_abandon_all(rb_mutex_t *mutexes)
04456 {
04457     rb_mutex_t *mutex;
04458 
04459     while (mutexes) {
04460         mutex = mutexes;
04461         mutexes = mutex->next_mutex;
04462         mutex->th = 0;
04463         mutex->next_mutex = 0;
04464     }
04465 }
04466 
04467 static VALUE
04468 rb_mutex_sleep_forever(VALUE time)
04469 {
04470     sleep_forever(GET_THREAD(), 1, 0); /* permit spurious check */
04471     return Qnil;
04472 }
04473 
04474 static VALUE
04475 rb_mutex_wait_for(VALUE time)
04476 {
04477     struct timeval *t = (struct timeval *)time;
04478     sleep_timeval(GET_THREAD(), *t, 0); /* permit spurious check */
04479     return Qnil;
04480 }
04481 
04482 VALUE
04483 rb_mutex_sleep(VALUE self, VALUE timeout)
04484 {
04485     time_t beg, end;
04486     struct timeval t;
04487 
04488     if (!NIL_P(timeout)) {
04489         t = rb_time_interval(timeout);
04490     }
04491     rb_mutex_unlock(self);
04492     beg = time(0);
04493     if (NIL_P(timeout)) {
04494         rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
04495     }
04496     else {
04497         rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
04498     }
04499     end = time(0) - beg;
04500     return INT2FIX(end);
04501 }
04502 
04503 /*
04504  * call-seq:
04505  *    mutex.sleep(timeout = nil)    -> number
04506  *
04507  * Releases the lock and sleeps +timeout+ seconds if it is given and
04508  * non-nil or forever.  Raises +ThreadError+ if +mutex+ wasn't locked by
04509  * the current thread.
04510  *
04511  * Note that this method can wakeup without explicit Thread#wakeup call.
04512  * For example, receiving signal and so on.
04513  */
04514 static VALUE
04515 mutex_sleep(int argc, VALUE *argv, VALUE self)
04516 {
04517     VALUE timeout;
04518 
04519     rb_scan_args(argc, argv, "01", &timeout);
04520     return rb_mutex_sleep(self, timeout);
04521 }
04522 
04523 /*
04524  * call-seq:
04525  *    mutex.synchronize { ... }    -> result of the block
04526  *
04527  * Obtains a lock, runs the block, and releases the lock when the block
04528  * completes.  See the example under +Mutex+.
04529  */
04530 
04531 VALUE
04532 rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
04533 {
04534     rb_mutex_lock(mutex);
04535     return rb_ensure(func, arg, rb_mutex_unlock, mutex);
04536 }
04537 
04538 /*
04539  * call-seq:
04540  *    mutex.synchronize { ... }    -> result of the block
04541  *
04542  * Obtains a lock, runs the block, and releases the lock when the block
04543  * completes.  See the example under +Mutex+.
04544  */
04545 static VALUE
04546 rb_mutex_synchronize_m(VALUE self, VALUE args)
04547 {
04548     if (!rb_block_given_p()) {
04549         rb_raise(rb_eThreadError, "must be called with a block");
04550     }
04551 
04552     return rb_mutex_synchronize(self, rb_yield, Qundef);
04553 }
04554 
04555 void rb_mutex_allow_trap(VALUE self, int val)
04556 {
04557     rb_mutex_t *m;
04558     GetMutexPtr(self, m);
04559 
04560     m->allow_trap = val;
04561 }
04562 
04563 /*
04564  * Document-class: ThreadShield
04565  */
04566 static void
04567 thread_shield_mark(void *ptr)
04568 {
04569     rb_gc_mark((VALUE)ptr);
04570 }
04571 
04572 static const rb_data_type_t thread_shield_data_type = {
04573     "thread_shield",
04574     {thread_shield_mark, 0, 0,},
04575 };
04576 
04577 static VALUE
04578 thread_shield_alloc(VALUE klass)
04579 {
04580     return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
04581 }
04582 
04583 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
04584 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
04585 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
04586 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
04587 
04588 static inline void
04589 rb_thread_shield_waiting_inc(VALUE b)
04590 {
04591     unsigned int w = rb_thread_shield_waiting(b);
04592     w++;
04593     if (w > (unsigned int)(THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT))
04594         rb_raise(rb_eRuntimeError, "waiting count overflow");
04595     RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
04596     RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
04597 }
04598 
04599 static inline void
04600 rb_thread_shield_waiting_dec(VALUE b)
04601 {
04602     unsigned int w = rb_thread_shield_waiting(b);
04603     if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
04604     w--;
04605     RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
04606     RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
04607 }
04608 
04609 VALUE
04610 rb_thread_shield_new(void)
04611 {
04612     VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
04613     rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
04614     return thread_shield;
04615 }
04616 
04617 /*
04618  * Wait a thread shield.
04619  *
04620  * Returns
04621  *  true:  acquired the thread shield
04622  *  false: the thread shield was destroyed and no other threads waiting
04623  *  nil:   the thread shield was destroyed but still in use
04624  */
04625 VALUE
04626 rb_thread_shield_wait(VALUE self)
04627 {
04628     VALUE mutex = GetThreadShieldPtr(self);
04629     rb_mutex_t *m;
04630 
04631     if (!mutex) return Qfalse;
04632     GetMutexPtr(mutex, m);
04633     if (m->th == GET_THREAD()) return Qnil;
04634     rb_thread_shield_waiting_inc(self);
04635     rb_mutex_lock(mutex);
04636     rb_thread_shield_waiting_dec(self);
04637     if (DATA_PTR(self)) return Qtrue;
04638     rb_mutex_unlock(mutex);
04639     return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
04640 }
04641 
04642 /*
04643  * Release a thread shield, and return true if it has waiting threads.
04644  */
04645 VALUE
04646 rb_thread_shield_release(VALUE self)
04647 {
04648     VALUE mutex = GetThreadShieldPtr(self);
04649     rb_mutex_unlock(mutex);
04650     return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
04651 }
04652 
04653 /*
04654  * Release and destroy a thread shield, and return true if it has waiting threads.
04655  */
04656 VALUE
04657 rb_thread_shield_destroy(VALUE self)
04658 {
04659     VALUE mutex = GetThreadShieldPtr(self);
04660     DATA_PTR(self) = 0;
04661     rb_mutex_unlock(mutex);
04662     return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
04663 }
04664 
04665 /* variables for recursive traversals */
04666 static ID recursive_key;
04667 
04668 /*
04669  * Returns the current "recursive list" used to detect recursion.
04670  * This list is a hash table, unique for the current thread and for
04671  * the current __callee__.
04672  */
04673 
04674 static VALUE
04675 recursive_list_access(void)
04676 {
04677     volatile VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
04678     VALUE sym = ID2SYM(rb_frame_this_func());
04679     VALUE list;
04680     if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
04681         hash = rb_hash_new();
04682         OBJ_UNTRUST(hash);
04683         rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
04684         list = Qnil;
04685     }
04686     else {
04687         list = rb_hash_aref(hash, sym);
04688     }
04689     if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
04690         list = rb_hash_new();
04691         OBJ_UNTRUST(list);
04692         rb_hash_aset(hash, sym, list);
04693     }
04694     return list;
04695 }
04696 
04697 /*
04698  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
04699  * in the recursion list.
04700  * Assumes the recursion list is valid.
04701  */
04702 
04703 static VALUE
04704 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
04705 {
04706 #if SIZEOF_LONG == SIZEOF_VOIDP
04707   #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
04708 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
04709   #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
04710     rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
04711 #endif
04712 
04713     VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
04714     if (pair_list == Qundef)
04715         return Qfalse;
04716     if (paired_obj_id) {
04717         if (!RB_TYPE_P(pair_list, T_HASH)) {
04718             if (!OBJ_ID_EQL(paired_obj_id, pair_list))
04719                 return Qfalse;
04720         }
04721         else {
04722             if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
04723                 return Qfalse;
04724         }
04725     }
04726     return Qtrue;
04727 }
04728 
04729 /*
04730  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
04731  * For a single obj_id, it sets list[obj_id] to Qtrue.
04732  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
04733  * otherwise list[obj_id] becomes a hash like:
04734  *   {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
04735  * Assumes the recursion list is valid.
04736  */
04737 
04738 static void
04739 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
04740 {
04741     VALUE pair_list;
04742 
04743     if (!paired_obj) {
04744         rb_hash_aset(list, obj, Qtrue);
04745     }
04746     else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
04747         rb_hash_aset(list, obj, paired_obj);
04748     }
04749     else {
04750         if (!RB_TYPE_P(pair_list, T_HASH)){
04751             VALUE other_paired_obj = pair_list;
04752             pair_list = rb_hash_new();
04753             OBJ_UNTRUST(pair_list);
04754             rb_hash_aset(pair_list, other_paired_obj, Qtrue);
04755             rb_hash_aset(list, obj, pair_list);
04756         }
04757         rb_hash_aset(pair_list, paired_obj, Qtrue);
04758     }
04759 }
04760 
04761 /*
04762  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
04763  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
04764  * removed from the hash and no attempt is made to simplify
04765  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
04766  * Assumes the recursion list is valid.
04767  */
04768 
04769 static void
04770 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
04771 {
04772     if (paired_obj) {
04773         VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
04774         if (pair_list == Qundef) {
04775             VALUE symname = rb_inspect(ID2SYM(rb_frame_this_func()));
04776             VALUE thrname = rb_inspect(rb_thread_current());
04777             rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list for %s in %s",
04778                      StringValuePtr(symname), StringValuePtr(thrname));
04779         }
04780         if (RB_TYPE_P(pair_list, T_HASH)) {
04781             rb_hash_delete(pair_list, paired_obj);
04782             if (!RHASH_EMPTY_P(pair_list)) {
04783                 return; /* keep hash until is empty */
04784             }
04785         }
04786     }
04787     rb_hash_delete(list, obj);
04788 }
04789 
04790 struct exec_recursive_params {
04791     VALUE (*func) (VALUE, VALUE, int);
04792     VALUE list;
04793     VALUE obj;
04794     VALUE objid;
04795     VALUE pairid;
04796     VALUE arg;
04797 };
04798 
04799 static VALUE
04800 exec_recursive_i(VALUE tag, struct exec_recursive_params *p)
04801 {
04802     VALUE result = Qundef;
04803     int state;
04804 
04805     recursive_push(p->list, p->objid, p->pairid);
04806     PUSH_TAG();
04807     if ((state = EXEC_TAG()) == 0) {
04808         result = (*p->func)(p->obj, p->arg, FALSE);
04809     }
04810     POP_TAG();
04811     recursive_pop(p->list, p->objid, p->pairid);
04812     if (state)
04813         JUMP_TAG(state);
04814     return result;
04815 }
04816 
04817 /*
04818  * Calls func(obj, arg, recursive), where recursive is non-zero if the
04819  * current method is called recursively on obj, or on the pair <obj, pairid>
04820  * If outer is 0, then the innermost func will be called with recursive set
04821  * to Qtrue, otherwise the outermost func will be called. In the latter case,
04822  * all inner func are short-circuited by throw.
04823  * Implementation details: the value thrown is the recursive list which is
04824  * proper to the current method and unlikely to be catched anywhere else.
04825  * list[recursive_key] is used as a flag for the outermost call.
04826  */
04827 
04828 static VALUE
04829 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
04830 {
04831     VALUE result = Qundef;
04832     struct exec_recursive_params p;
04833     int outermost;
04834     p.list = recursive_list_access();
04835     p.objid = rb_obj_id(obj);
04836     p.obj = obj;
04837     p.pairid = pairid;
04838     p.arg = arg;
04839     outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
04840 
04841     if (recursive_check(p.list, p.objid, pairid)) {
04842         if (outer && !outermost) {
04843             rb_throw_obj(p.list, p.list);
04844         }
04845         return (*func)(obj, arg, TRUE);
04846     }
04847     else {
04848         p.func = func;
04849 
04850         if (outermost) {
04851             recursive_push(p.list, ID2SYM(recursive_key), 0);
04852             result = rb_catch_obj(p.list, exec_recursive_i, (VALUE)&p);
04853             recursive_pop(p.list, ID2SYM(recursive_key), 0);
04854             if (result == p.list) {
04855                 result = (*func)(obj, arg, TRUE);
04856             }
04857         }
04858         else {
04859             result = exec_recursive_i(0, &p);
04860         }
04861     }
04862     *(volatile struct exec_recursive_params *)&p;
04863     return result;
04864 }
04865 
04866 /*
04867  * Calls func(obj, arg, recursive), where recursive is non-zero if the
04868  * current method is called recursively on obj
04869  */
04870 
04871 VALUE
04872 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
04873 {
04874     return exec_recursive(func, obj, 0, arg, 0);
04875 }
04876 
04877 /*
04878  * Calls func(obj, arg, recursive), where recursive is non-zero if the
04879  * current method is called recursively on the ordered pair <obj, paired_obj>
04880  */
04881 
04882 VALUE
04883 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
04884 {
04885     return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
04886 }
04887 
04888 /*
04889  * If recursion is detected on the current method and obj, the outermost
04890  * func will be called with (obj, arg, Qtrue). All inner func will be
04891  * short-circuited using throw.
04892  */
04893 
04894 VALUE
04895 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
04896 {
04897     return exec_recursive(func, obj, 0, arg, 1);
04898 }
04899 
04900 /*
04901  * If recursion is detected on the current method, obj and paired_obj,
04902  * the outermost func will be called with (obj, arg, Qtrue). All inner
04903  * func will be short-circuited using throw.
04904  */
04905 
04906 VALUE
04907 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
04908 {
04909     return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
04910 }
04911 
04912 /*
04913  *  call-seq:
04914  *     thr.backtrace     -> array
04915  *
04916  *  Returns the current backtrace of the target thread.
04917  *
04918  */
04919 
04920 static VALUE
04921 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
04922 {
04923     return vm_thread_backtrace(argc, argv, thval);
04924 }
04925 
04926 /* call-seq:
04927  *  thr.backtrace_locations(*args)      -> array or nil
04928  *
04929  * Returns the execution stack for the target thread---an array containing
04930  * backtrace location objects.
04931  *
04932  * See Thread::Backtrace::Location for more information.
04933  *
04934  * This method behaves similarly to Kernel#caller_locations except it applies
04935  * to a specific thread.
04936  */
04937 static VALUE
04938 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
04939 {
04940     return vm_thread_backtrace_locations(argc, argv, thval);
04941 }
04942 
04943 /*
04944  *  Document-class: ThreadError
04945  *
04946  *  Raised when an invalid operation is attempted on a thread.
04947  *
04948  *  For example, when no other thread has been started:
04949  *
04950  *     Thread.stop
04951  *
04952  *  <em>raises the exception:</em>
04953  *
04954  *     ThreadError: stopping only thread
04955  */
04956 
04957 /*
04958  *  +Thread+ encapsulates the behavior of a thread of
04959  *  execution, including the main thread of the Ruby script.
04960  *
04961  *  In the descriptions of the methods in this class, the parameter _sym_
04962  *  refers to a symbol, which is either a quoted string or a
04963  *  +Symbol+ (such as <code>:name</code>).
04964  */
04965 
04966 void
04967 Init_Thread(void)
04968 {
04969 #undef rb_intern
04970 #define rb_intern(str) rb_intern_const(str)
04971 
04972     VALUE cThGroup;
04973     rb_thread_t *th = GET_THREAD();
04974 
04975     sym_never = ID2SYM(rb_intern("never"));
04976     sym_immediate = ID2SYM(rb_intern("immediate"));
04977     sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
04978 
04979     rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
04980     rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
04981     rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
04982     rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
04983     rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
04984     rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
04985     rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
04986     rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
04987     rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
04988     rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
04989     rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
04990     rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
04991 #if THREAD_DEBUG < 0
04992     rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
04993     rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
04994 #endif
04995     rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
04996     rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
04997     rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
04998 
04999     rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
05000     rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
05001     rb_define_method(rb_cThread, "join", thread_join_m, -1);
05002     rb_define_method(rb_cThread, "value", thread_value, 0);
05003     rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
05004     rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
05005     rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
05006     rb_define_method(rb_cThread, "run", rb_thread_run, 0);
05007     rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
05008     rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
05009     rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
05010     rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
05011     rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
05012     rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
05013     rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
05014     rb_define_method(rb_cThread, "status", rb_thread_status, 0);
05015     rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
05016     rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
05017     rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
05018     rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
05019     rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
05020     rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
05021     rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
05022     rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
05023     rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
05024     rb_define_method(rb_cThread, "group", rb_thread_group, 0);
05025     rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
05026     rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
05027 
05028     rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
05029 
05030     closed_stream_error = rb_exc_new2(rb_eIOError, "stream closed");
05031     OBJ_TAINT(closed_stream_error);
05032     OBJ_FREEZE(closed_stream_error);
05033 
05034     cThGroup = rb_define_class("ThreadGroup", rb_cObject);
05035     rb_define_alloc_func(cThGroup, thgroup_s_alloc);
05036     rb_define_method(cThGroup, "list", thgroup_list, 0);
05037     rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
05038     rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
05039     rb_define_method(cThGroup, "add", thgroup_add, 1);
05040 
05041     {
05042         th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
05043         rb_define_const(cThGroup, "Default", th->thgroup);
05044     }
05045 
05046     rb_cMutex = rb_define_class("Mutex", rb_cObject);
05047     rb_define_alloc_func(rb_cMutex, mutex_alloc);
05048     rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
05049     rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
05050     rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
05051     rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
05052     rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
05053     rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
05054     rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
05055     rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
05056 
05057     recursive_key = rb_intern("__recursive_key__");
05058     rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
05059 
05060     /* init thread core */
05061     {
05062         /* main thread setting */
05063         {
05064             /* acquire global vm lock */
05065             gvl_init(th->vm);
05066             gvl_acquire(th->vm, th);
05067             native_mutex_initialize(&th->vm->thread_destruct_lock);
05068             native_mutex_initialize(&th->interrupt_lock);
05069 
05070             th->pending_interrupt_queue = rb_ary_tmp_new(0);
05071             th->pending_interrupt_queue_checked = 0;
05072             th->pending_interrupt_mask_stack = rb_ary_tmp_new(0);
05073 
05074             th->interrupt_mask = 0;
05075         }
05076     }
05077 
05078     rb_thread_create_timer_thread();
05079 
05080     /* suppress warnings on cygwin, mingw and mswin.*/
05081     (void)native_mutex_trylock;
05082 }
05083 
05084 int
05085 ruby_native_thread_p(void)
05086 {
05087     rb_thread_t *th = ruby_thread_from_native();
05088 
05089     return th != 0;
05090 }
05091 
05092 static int
05093 check_deadlock_i(st_data_t key, st_data_t val, int *found)
05094 {
05095     VALUE thval = key;
05096     rb_thread_t *th;
05097     GetThreadPtr(thval, th);
05098 
05099     if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th)) {
05100         *found = 1;
05101     }
05102     else if (th->locking_mutex) {
05103         rb_mutex_t *mutex;
05104         GetMutexPtr(th->locking_mutex, mutex);
05105 
05106         native_mutex_lock(&mutex->lock);
05107         if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
05108             *found = 1;
05109         }
05110         native_mutex_unlock(&mutex->lock);
05111     }
05112 
05113     return (*found) ? ST_STOP : ST_CONTINUE;
05114 }
05115 
05116 #ifdef DEBUG_DEADLOCK_CHECK
05117 static int
05118 debug_i(st_data_t key, st_data_t val, int *found)
05119 {
05120     VALUE thval = key;
05121     rb_thread_t *th;
05122     GetThreadPtr(thval, th);
05123 
05124     printf("th:%p %d %d", th, th->status, th->interrupt_flag);
05125     if (th->locking_mutex) {
05126         rb_mutex_t *mutex;
05127         GetMutexPtr(th->locking_mutex, mutex);
05128 
05129         native_mutex_lock(&mutex->lock);
05130         printf(" %p %d\n", mutex->th, mutex->cond_waiting);
05131         native_mutex_unlock(&mutex->lock);
05132     }
05133     else
05134         puts("");
05135 
05136     return ST_CONTINUE;
05137 }
05138 #endif
05139 
05140 static void
05141 rb_check_deadlock(rb_vm_t *vm)
05142 {
05143     int found = 0;
05144 
05145     if (vm_living_thread_num(vm) > vm->sleeper) return;
05146     if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
05147     if (patrol_thread && patrol_thread != GET_THREAD()) return;
05148 
05149     st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
05150 
05151     if (!found) {
05152         VALUE argv[2];
05153         argv[0] = rb_eFatal;
05154         argv[1] = rb_str_new2("No live threads left. Deadlock?");
05155 #ifdef DEBUG_DEADLOCK_CHECK
05156         printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
05157         st_foreach(vm->living_threads, debug_i, (st_data_t)0);
05158 #endif
05159         vm->sleeper--;
05160         rb_threadptr_raise(vm->main_thread, 2, argv);
05161     }
05162 }
05163 
05164 static void
05165 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
05166 {
05167     VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
05168     if (coverage && RBASIC(coverage)->klass == 0) {
05169         long line = rb_sourceline() - 1;
05170         long count;
05171         if (RARRAY_PTR(coverage)[line] == Qnil) {
05172             return;
05173         }
05174         count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
05175         if (POSFIXABLE(count)) {
05176             RARRAY_PTR(coverage)[line] = LONG2FIX(count);
05177         }
05178     }
05179 }
05180 
05181 VALUE
05182 rb_get_coverages(void)
05183 {
05184     return GET_VM()->coverages;
05185 }
05186 
05187 void
05188 rb_set_coverages(VALUE coverages)
05189 {
05190     GET_VM()->coverages = coverages;
05191     rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
05192 }
05193 
05194 void
05195 rb_reset_coverages(void)
05196 {
05197     GET_VM()->coverages = Qfalse;
05198     rb_remove_event_hook(update_coverage);
05199 }
05200 
05201 VALUE
05202 rb_uninterruptible(VALUE (*b_proc)(ANYARGS), VALUE data)
05203 {
05204     VALUE interrupt_mask = rb_hash_new();
05205     rb_thread_t *cur_th = GET_THREAD();
05206 
05207     rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
05208     rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
05209 
05210     return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
05211 }
05212