|
Ruby
2.0.0p481(2014-05-08revision45883)
|
00001 /********************************************************************** 00002 00003 vm_insnhelper.c - instruction helper functions. 00004 00005 $Author: usa $ 00006 00007 Copyright (C) 2007 Koichi Sasada 00008 00009 **********************************************************************/ 00010 00011 /* finish iseq array */ 00012 #include "insns.inc" 00013 #include <math.h> 00014 #include "constant.h" 00015 #include "internal.h" 00016 #include "probes.h" 00017 #include "probes_helper.h" 00018 00019 /* control stack frame */ 00020 00021 #ifndef INLINE 00022 #define INLINE inline 00023 #endif 00024 00025 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp); 00026 00027 static void 00028 vm_stackoverflow(void) 00029 { 00030 rb_exc_raise(sysstack_error); 00031 } 00032 00033 static inline rb_control_frame_t * 00034 vm_push_frame(rb_thread_t *th, 00035 const rb_iseq_t *iseq, 00036 VALUE type, 00037 VALUE self, 00038 VALUE klass, 00039 VALUE specval, 00040 const VALUE *pc, 00041 VALUE *sp, 00042 int local_size, 00043 const rb_method_entry_t *me) 00044 { 00045 rb_control_frame_t *const cfp = th->cfp - 1; 00046 int i; 00047 00048 /* check stack overflow */ 00049 if ((void *)(sp + local_size) >= (void *)cfp) { 00050 vm_stackoverflow(); 00051 } 00052 th->cfp = cfp; 00053 00054 /* setup vm value stack */ 00055 00056 /* initialize local variables */ 00057 for (i=0; i < local_size; i++) { 00058 *sp++ = Qnil; 00059 } 00060 00061 /* set special val */ 00062 *sp = specval; 00063 00064 /* setup vm control frame stack */ 00065 00066 cfp->pc = (VALUE *)pc; 00067 cfp->sp = sp + 1; 00068 #if VM_DEBUG_BP_CHECK 00069 cfp->bp_check = sp + 1; 00070 #endif 00071 cfp->ep = sp; 00072 cfp->iseq = (rb_iseq_t *) iseq; 00073 cfp->flag = type; 00074 cfp->self = self; 00075 cfp->block_iseq = 0; 00076 cfp->proc = 0; 00077 cfp->me = me; 00078 if (klass) { 00079 cfp->klass = klass; 00080 } 00081 else { 00082 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00083 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) { 00084 cfp->klass = Qnil; 00085 } 00086 else { 00087 cfp->klass = prev_cfp->klass; 00088 } 00089 } 00090 00091 if (VMDEBUG == 2) { 00092 SDR(); 00093 } 00094 00095 return cfp; 00096 } 00097 00098 static inline void 00099 vm_pop_frame(rb_thread_t *th) 00100 { 00101 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp); 00102 00103 if (VMDEBUG == 2) { 00104 SDR(); 00105 } 00106 } 00107 00108 /* method dispatch */ 00109 static inline VALUE 00110 rb_arg_error_new(int argc, int min, int max) 00111 { 00112 VALUE err_mess = 0; 00113 if (min == max) { 00114 err_mess = rb_sprintf("wrong number of arguments (%d for %d)", argc, min); 00115 } 00116 else if (max == UNLIMITED_ARGUMENTS) { 00117 err_mess = rb_sprintf("wrong number of arguments (%d for %d+)", argc, min); 00118 } 00119 else { 00120 err_mess = rb_sprintf("wrong number of arguments (%d for %d..%d)", argc, min, max); 00121 } 00122 return rb_exc_new3(rb_eArgError, err_mess); 00123 } 00124 00125 NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc)); 00126 static void 00127 argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc) 00128 { 00129 VALUE exc = rb_arg_error_new(miss_argc, min_argc, max_argc); 00130 VALUE bt = rb_make_backtrace(); 00131 VALUE err_line = 0; 00132 00133 if (iseq) { 00134 int line_no = rb_iseq_first_lineno(iseq); 00135 00136 err_line = rb_sprintf("%s:%d:in `%s'", 00137 RSTRING_PTR(iseq->location.path), 00138 line_no, RSTRING_PTR(iseq->location.label)); 00139 rb_funcall(bt, rb_intern("unshift"), 1, err_line); 00140 } 00141 00142 rb_funcall(exc, rb_intern("set_backtrace"), 1, bt); 00143 rb_exc_raise(exc); 00144 } 00145 00146 NORETURN(static void unknown_keyword_error(const rb_iseq_t *iseq, VALUE hash)); 00147 static void 00148 unknown_keyword_error(const rb_iseq_t *iseq, VALUE hash) 00149 { 00150 VALUE sep = rb_usascii_str_new2(", "), keys; 00151 const char *msg; 00152 int i; 00153 for (i = 0; i < iseq->arg_keywords; i++) { 00154 rb_hash_delete(hash, ID2SYM(iseq->arg_keyword_table[i])); 00155 } 00156 keys = rb_funcall(hash, rb_intern("keys"), 0, 0); 00157 if (!RB_TYPE_P(keys, T_ARRAY)) rb_raise(rb_eArgError, "unknown keyword"); 00158 msg = RARRAY_LEN(keys) == 1 ? "" : "s"; 00159 keys = rb_funcall(keys, rb_intern("join"), 1, sep); 00160 rb_raise(rb_eArgError, "unknown keyword%s: %"PRIsVALUE, msg, keys); 00161 } 00162 00163 void 00164 rb_error_arity(int argc, int min, int max) 00165 { 00166 rb_exc_raise(rb_arg_error_new(argc, min, max)); 00167 } 00168 00169 /* svar */ 00170 00171 static inline NODE * 00172 lep_svar_place(rb_thread_t *th, VALUE *lep) 00173 { 00174 VALUE *svar; 00175 00176 if (lep && th->root_lep != lep) { 00177 svar = &lep[-1]; 00178 } 00179 else { 00180 svar = &th->root_svar; 00181 } 00182 if (NIL_P(*svar)) { 00183 *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil); 00184 } 00185 return (NODE *)*svar; 00186 } 00187 00188 static VALUE 00189 lep_svar_get(rb_thread_t *th, VALUE *lep, rb_num_t key) 00190 { 00191 NODE *svar = lep_svar_place(th, lep); 00192 00193 switch (key) { 00194 case 0: 00195 return svar->u1.value; 00196 case 1: 00197 return svar->u2.value; 00198 default: { 00199 const VALUE ary = svar->u3.value; 00200 00201 if (NIL_P(ary)) { 00202 return Qnil; 00203 } 00204 else { 00205 return rb_ary_entry(ary, key - DEFAULT_SPECIAL_VAR_COUNT); 00206 } 00207 } 00208 } 00209 } 00210 00211 static void 00212 lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val) 00213 { 00214 NODE *svar = lep_svar_place(th, lep); 00215 00216 switch (key) { 00217 case 0: 00218 svar->u1.value = val; 00219 return; 00220 case 1: 00221 svar->u2.value = val; 00222 return; 00223 default: { 00224 VALUE ary = svar->u3.value; 00225 00226 if (NIL_P(ary)) { 00227 svar->u3.value = ary = rb_ary_new(); 00228 } 00229 rb_ary_store(ary, key - DEFAULT_SPECIAL_VAR_COUNT, val); 00230 } 00231 } 00232 } 00233 00234 static inline VALUE 00235 vm_getspecial(rb_thread_t *th, VALUE *lep, rb_num_t key, rb_num_t type) 00236 { 00237 VALUE val; 00238 00239 if (type == 0) { 00240 val = lep_svar_get(th, lep, key); 00241 } 00242 else { 00243 VALUE backref = lep_svar_get(th, lep, 1); 00244 00245 if (type & 0x01) { 00246 switch (type >> 1) { 00247 case '&': 00248 val = rb_reg_last_match(backref); 00249 break; 00250 case '`': 00251 val = rb_reg_match_pre(backref); 00252 break; 00253 case '\'': 00254 val = rb_reg_match_post(backref); 00255 break; 00256 case '+': 00257 val = rb_reg_match_last(backref); 00258 break; 00259 default: 00260 rb_bug("unexpected back-ref"); 00261 } 00262 } 00263 else { 00264 val = rb_reg_nth_match((int)(type >> 1), backref); 00265 } 00266 } 00267 return val; 00268 } 00269 00270 static NODE * 00271 vm_get_cref0(const rb_iseq_t *iseq, const VALUE *ep) 00272 { 00273 while (1) { 00274 if (VM_EP_LEP_P(ep)) { 00275 if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) return NULL; 00276 return iseq->cref_stack; 00277 } 00278 else if (ep[-1] != Qnil) { 00279 return (NODE *)ep[-1]; 00280 } 00281 ep = VM_EP_PREV_EP(ep); 00282 } 00283 } 00284 00285 NODE * 00286 rb_vm_get_cref(const rb_iseq_t *iseq, const VALUE *ep) 00287 { 00288 NODE *cref = vm_get_cref0(iseq, ep); 00289 00290 if (cref == 0) { 00291 rb_bug("rb_vm_get_cref: unreachable"); 00292 } 00293 return cref; 00294 } 00295 00296 static NODE * 00297 vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr) 00298 { 00299 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp); 00300 NODE *cref = NEW_CREF(klass); 00301 cref->nd_refinements = Qnil; 00302 cref->nd_visi = noex; 00303 00304 if (blockptr) { 00305 cref->nd_next = vm_get_cref0(blockptr->iseq, blockptr->ep); 00306 } 00307 else if (cfp) { 00308 cref->nd_next = vm_get_cref0(cfp->iseq, cfp->ep); 00309 } 00310 /* TODO: why cref->nd_next is 1? */ 00311 if (cref->nd_next && cref->nd_next != (void *) 1 && 00312 !NIL_P(cref->nd_next->nd_refinements)) { 00313 COPY_CREF_OMOD(cref, cref->nd_next); 00314 } 00315 00316 return cref; 00317 } 00318 00319 static inline VALUE 00320 vm_get_cbase(const rb_iseq_t *iseq, const VALUE *ep) 00321 { 00322 NODE *cref = rb_vm_get_cref(iseq, ep); 00323 VALUE klass = Qundef; 00324 00325 while (cref) { 00326 if ((klass = cref->nd_clss) != 0) { 00327 break; 00328 } 00329 cref = cref->nd_next; 00330 } 00331 00332 return klass; 00333 } 00334 00335 static inline VALUE 00336 vm_get_const_base(const rb_iseq_t *iseq, const VALUE *ep) 00337 { 00338 NODE *cref = rb_vm_get_cref(iseq, ep); 00339 VALUE klass = Qundef; 00340 00341 while (cref) { 00342 if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) && 00343 (klass = cref->nd_clss) != 0) { 00344 break; 00345 } 00346 cref = cref->nd_next; 00347 } 00348 00349 return klass; 00350 } 00351 00352 static inline void 00353 vm_check_if_namespace(VALUE klass) 00354 { 00355 VALUE str; 00356 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) { 00357 str = rb_inspect(klass); 00358 rb_raise(rb_eTypeError, "%s is not a class/module", 00359 StringValuePtr(str)); 00360 } 00361 } 00362 00363 static inline VALUE 00364 vm_get_iclass(rb_control_frame_t *cfp, VALUE klass) 00365 { 00366 if (RB_TYPE_P(klass, T_MODULE) && 00367 FL_TEST(klass, RMODULE_IS_OVERLAID) && 00368 RB_TYPE_P(cfp->klass, T_ICLASS) && 00369 RBASIC(cfp->klass)->klass == klass) { 00370 return cfp->klass; 00371 } 00372 else { 00373 return klass; 00374 } 00375 } 00376 00377 static inline VALUE 00378 vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq, 00379 VALUE orig_klass, ID id, int is_defined) 00380 { 00381 VALUE val; 00382 00383 if (orig_klass == Qnil) { 00384 /* in current lexical scope */ 00385 const NODE *root_cref = rb_vm_get_cref(iseq, th->cfp->ep); 00386 const NODE *cref; 00387 VALUE klass = orig_klass; 00388 00389 while (root_cref && root_cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) { 00390 root_cref = root_cref->nd_next; 00391 } 00392 cref = root_cref; 00393 while (cref && cref->nd_next) { 00394 if (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) { 00395 klass = Qnil; 00396 } 00397 else { 00398 klass = cref->nd_clss; 00399 } 00400 cref = cref->nd_next; 00401 00402 if (!NIL_P(klass)) { 00403 VALUE av, am = 0; 00404 st_data_t data; 00405 search_continue: 00406 if (RCLASS_CONST_TBL(klass) && 00407 st_lookup(RCLASS_CONST_TBL(klass), id, &data)) { 00408 val = ((rb_const_entry_t*)data)->value; 00409 if (val == Qundef) { 00410 if (am == klass) break; 00411 am = klass; 00412 if (is_defined) return 1; 00413 if (rb_autoloading_value(klass, id, &av)) return av; 00414 rb_autoload_load(klass, id); 00415 goto search_continue; 00416 } 00417 else { 00418 if (is_defined) { 00419 return 1; 00420 } 00421 else { 00422 return val; 00423 } 00424 } 00425 } 00426 } 00427 } 00428 00429 /* search self */ 00430 if (root_cref && !NIL_P(root_cref->nd_clss)) { 00431 klass = vm_get_iclass(th->cfp, root_cref->nd_clss); 00432 } 00433 else { 00434 klass = CLASS_OF(th->cfp->self); 00435 } 00436 00437 if (is_defined) { 00438 return rb_const_defined(klass, id); 00439 } 00440 else { 00441 return rb_const_get(klass, id); 00442 } 00443 } 00444 else { 00445 vm_check_if_namespace(orig_klass); 00446 if (is_defined) { 00447 return rb_public_const_defined_from(orig_klass, id); 00448 } 00449 else { 00450 return rb_public_const_get_from(orig_klass, id); 00451 } 00452 } 00453 } 00454 00455 static inline VALUE 00456 vm_get_cvar_base(NODE *cref, rb_control_frame_t *cfp) 00457 { 00458 VALUE klass; 00459 00460 if (!cref) { 00461 rb_bug("vm_get_cvar_base: no cref"); 00462 } 00463 00464 while (cref->nd_next && 00465 (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) || 00466 (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) { 00467 cref = cref->nd_next; 00468 } 00469 if (!cref->nd_next) { 00470 rb_warn("class variable access from toplevel"); 00471 } 00472 00473 klass = vm_get_iclass(cfp, cref->nd_clss); 00474 00475 if (NIL_P(klass)) { 00476 rb_raise(rb_eTypeError, "no class variables available"); 00477 } 00478 return klass; 00479 } 00480 00481 static VALUE 00482 vm_search_const_defined_class(const VALUE cbase, ID id) 00483 { 00484 if (rb_const_defined_at(cbase, id)) return cbase; 00485 if (cbase == rb_cObject) { 00486 VALUE tmp = RCLASS_SUPER(cbase); 00487 while (tmp) { 00488 if (rb_const_defined_at(tmp, id)) return tmp; 00489 tmp = RCLASS_SUPER(tmp); 00490 } 00491 } 00492 return 0; 00493 } 00494 00495 #ifndef USE_IC_FOR_IVAR 00496 #define USE_IC_FOR_IVAR 1 00497 #endif 00498 00499 static inline VALUE 00500 vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr) 00501 { 00502 #if USE_IC_FOR_IVAR 00503 if (RB_TYPE_P(obj, T_OBJECT)) { 00504 VALUE val = Qundef; 00505 VALUE klass = RBASIC(obj)->klass; 00506 00507 if (LIKELY((!is_attr && (ic->ic_class == klass && ic->ic_vmstat == GET_VM_STATE_VERSION())) || 00508 (is_attr && ci->aux.index > 0))) { 00509 long index = !is_attr ? ic->ic_value.index : ci->aux.index - 1; 00510 long len = ROBJECT_NUMIV(obj); 00511 VALUE *ptr = ROBJECT_IVPTR(obj); 00512 00513 if (index < len) { 00514 val = ptr[index]; 00515 } 00516 } 00517 else { 00518 st_data_t index; 00519 long len = ROBJECT_NUMIV(obj); 00520 VALUE *ptr = ROBJECT_IVPTR(obj); 00521 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj); 00522 00523 if (iv_index_tbl) { 00524 if (st_lookup(iv_index_tbl, id, &index)) { 00525 if ((long)index < len) { 00526 val = ptr[index]; 00527 } 00528 if (!is_attr) { 00529 ic->ic_class = klass; 00530 ic->ic_value.index = index; 00531 ic->ic_vmstat = GET_VM_STATE_VERSION(); 00532 } 00533 else { /* call_info */ 00534 ci->aux.index = index + 1; 00535 } 00536 } 00537 } 00538 } 00539 00540 if (UNLIKELY(val == Qundef)) { 00541 if (!is_attr) rb_warning("instance variable %s not initialized", rb_id2name(id)); 00542 val = Qnil; 00543 } 00544 return val; 00545 } 00546 #endif /* USE_IC_FOR_IVAR */ 00547 if (is_attr) 00548 return rb_attr_get(obj, id); 00549 return rb_ivar_get(obj, id); 00550 } 00551 00552 static inline VALUE 00553 vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr) 00554 { 00555 #if USE_IC_FOR_IVAR 00556 if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) { 00557 rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable"); 00558 } 00559 00560 rb_check_frozen(obj); 00561 00562 if (RB_TYPE_P(obj, T_OBJECT)) { 00563 VALUE klass = RBASIC(obj)->klass; 00564 st_data_t index; 00565 00566 if (LIKELY( 00567 (!is_attr && ic->ic_class == klass && ic->ic_vmstat == GET_VM_STATE_VERSION()) || 00568 (is_attr && ci->aux.index > 0))) { 00569 long index = !is_attr ? ic->ic_value.index : ci->aux.index-1; 00570 long len = ROBJECT_NUMIV(obj); 00571 VALUE *ptr = ROBJECT_IVPTR(obj); 00572 00573 if (index < len) { 00574 ptr[index] = val; 00575 return val; /* inline cache hit */ 00576 } 00577 } 00578 else { 00579 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj); 00580 00581 if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) { 00582 if (!is_attr) { 00583 ic->ic_class = klass; 00584 ic->ic_value.index = index; 00585 ic->ic_vmstat = GET_VM_STATE_VERSION(); 00586 } 00587 else { 00588 ci->aux.index = index + 1; 00589 } 00590 } 00591 /* fall through */ 00592 } 00593 } 00594 #endif /* USE_IC_FOR_IVAR */ 00595 return rb_ivar_set(obj, id, val); 00596 } 00597 00598 static VALUE 00599 vm_getinstancevariable(VALUE obj, ID id, IC ic) 00600 { 00601 return vm_getivar(obj, id, ic, 0, 0); 00602 } 00603 00604 static void 00605 vm_setinstancevariable(VALUE obj, ID id, VALUE val, IC ic) 00606 { 00607 vm_setivar(obj, id, val, ic, 0, 0); 00608 } 00609 00610 static VALUE 00611 vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp, 00612 rb_num_t throw_state, VALUE throwobj) 00613 { 00614 int state = (int)(throw_state & 0xff); 00615 int flag = (int)(throw_state & 0x8000); 00616 rb_num_t level = throw_state >> 16; 00617 00618 if (state != 0) { 00619 VALUE *pt = 0; 00620 if (flag != 0) { 00621 pt = (void *) 1; 00622 } 00623 else { 00624 if (state == TAG_BREAK) { 00625 rb_control_frame_t *cfp = GET_CFP(); 00626 VALUE *ep = GET_EP(); 00627 int is_orphan = 1; 00628 rb_iseq_t *base_iseq = GET_ISEQ(); 00629 00630 search_parent: 00631 if (cfp->iseq->type != ISEQ_TYPE_BLOCK) { 00632 if (cfp->iseq->type == ISEQ_TYPE_CLASS) { 00633 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00634 ep = cfp->ep; 00635 goto search_parent; 00636 } 00637 ep = VM_EP_PREV_EP(ep); 00638 base_iseq = base_iseq->parent_iseq; 00639 00640 while ((VALUE *) cfp < th->stack + th->stack_size) { 00641 if (cfp->ep == ep) { 00642 goto search_parent; 00643 } 00644 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00645 } 00646 rb_bug("VM (throw): can't find break base."); 00647 } 00648 00649 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) { 00650 /* lambda{... break ...} */ 00651 is_orphan = 0; 00652 pt = cfp->ep; 00653 state = TAG_RETURN; 00654 } 00655 else { 00656 ep = VM_EP_PREV_EP(ep); 00657 00658 while ((VALUE *)cfp < th->stack + th->stack_size) { 00659 if (cfp->ep == ep) { 00660 VALUE epc = cfp->pc - cfp->iseq->iseq_encoded; 00661 rb_iseq_t *iseq = cfp->iseq; 00662 int i; 00663 00664 for (i=0; i<iseq->catch_table_size; i++) { 00665 struct iseq_catch_table_entry *entry = &iseq->catch_table[i]; 00666 00667 if (entry->type == CATCH_TYPE_BREAK && 00668 entry->start < epc && entry->end >= epc) { 00669 if (entry->cont == epc) { 00670 goto found; 00671 } 00672 else { 00673 break; 00674 } 00675 } 00676 } 00677 break; 00678 00679 found: 00680 pt = ep; 00681 is_orphan = 0; 00682 break; 00683 } 00684 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00685 } 00686 } 00687 00688 if (is_orphan) { 00689 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK); 00690 } 00691 } 00692 else if (state == TAG_RETRY) { 00693 rb_num_t i; 00694 pt = VM_EP_PREV_EP(GET_EP()); 00695 for (i = 0; i < level; i++) { 00696 pt = GC_GUARDED_PTR_REF((VALUE *) * pt); 00697 } 00698 } 00699 else if (state == TAG_RETURN) { 00700 rb_control_frame_t *cfp = GET_CFP(); 00701 VALUE *ep = GET_EP(); 00702 VALUE *target_lep = VM_CF_LEP(cfp); 00703 int in_class_frame = 0; 00704 00705 /* check orphan and get dfp */ 00706 while ((VALUE *) cfp < th->stack + th->stack_size) { 00707 VALUE *lep = VM_CF_LEP(cfp); 00708 00709 if (!target_lep) { 00710 target_lep = lep; 00711 } 00712 00713 if (lep == target_lep && cfp->iseq->type == ISEQ_TYPE_CLASS) { 00714 in_class_frame = 1; 00715 target_lep = 0; 00716 } 00717 00718 if (lep == target_lep) { 00719 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) { 00720 VALUE *tep = ep; 00721 00722 if (in_class_frame) { 00723 /* lambda {class A; ... return ...; end} */ 00724 ep = cfp->ep; 00725 goto valid_return; 00726 } 00727 00728 while (target_lep != tep) { 00729 if (cfp->ep == tep) { 00730 /* in lambda */ 00731 ep = cfp->ep; 00732 goto valid_return; 00733 } 00734 tep = VM_EP_PREV_EP(tep); 00735 } 00736 } 00737 } 00738 00739 if (cfp->ep == target_lep && cfp->iseq->type == ISEQ_TYPE_METHOD) { 00740 ep = target_lep; 00741 goto valid_return; 00742 } 00743 00744 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00745 } 00746 00747 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN); 00748 00749 valid_return: 00750 pt = ep; 00751 } 00752 else { 00753 rb_bug("isns(throw): unsupport throw type"); 00754 } 00755 } 00756 th->state = state; 00757 return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state); 00758 } 00759 else { 00760 /* continue throw */ 00761 VALUE err = throwobj; 00762 00763 if (FIXNUM_P(err)) { 00764 th->state = FIX2INT(err); 00765 } 00766 else if (SYMBOL_P(err)) { 00767 th->state = TAG_THROW; 00768 } 00769 else if (BUILTIN_TYPE(err) == T_NODE) { 00770 th->state = GET_THROWOBJ_STATE(err); 00771 } 00772 else { 00773 th->state = TAG_RAISE; 00774 /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/ 00775 } 00776 return err; 00777 } 00778 } 00779 00780 static inline void 00781 vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag) 00782 { 00783 int is_splat = flag & 0x01; 00784 rb_num_t space_size = num + is_splat; 00785 VALUE *base = cfp->sp, *ptr; 00786 rb_num_t len; 00787 00788 if (!RB_TYPE_P(ary, T_ARRAY)) { 00789 ary = rb_ary_to_ary(ary); 00790 } 00791 00792 cfp->sp += space_size; 00793 00794 ptr = RARRAY_PTR(ary); 00795 len = (rb_num_t)RARRAY_LEN(ary); 00796 00797 if (flag & 0x02) { 00798 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */ 00799 rb_num_t i = 0, j; 00800 00801 if (len < num) { 00802 for (i=0; i<num-len; i++) { 00803 *base++ = Qnil; 00804 } 00805 } 00806 for (j=0; i<num; i++, j++) { 00807 VALUE v = ptr[len - j - 1]; 00808 *base++ = v; 00809 } 00810 if (is_splat) { 00811 *base = rb_ary_new4(len - j, ptr); 00812 } 00813 } 00814 else { 00815 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */ 00816 rb_num_t i; 00817 VALUE *bptr = &base[space_size - 1]; 00818 00819 for (i=0; i<num; i++) { 00820 if (len <= i) { 00821 for (; i<num; i++) { 00822 *bptr-- = Qnil; 00823 } 00824 break; 00825 } 00826 *bptr-- = ptr[i]; 00827 } 00828 if (is_splat) { 00829 if (num > len) { 00830 *bptr = rb_ary_new(); 00831 } 00832 else { 00833 *bptr = rb_ary_new4(len - num, ptr + num); 00834 } 00835 } 00836 } 00837 RB_GC_GUARD(ary); 00838 } 00839 00840 static VALUE vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci); 00841 00842 static void 00843 vm_search_method(rb_call_info_t *ci, VALUE recv) 00844 { 00845 VALUE klass = CLASS_OF(recv); 00846 00847 #if OPT_INLINE_METHOD_CACHE 00848 if (LIKELY(GET_VM_STATE_VERSION() == ci->vmstat && klass == ci->klass)) { 00849 /* cache hit! */ 00850 } 00851 else { 00852 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class); 00853 ci->klass = klass; 00854 ci->vmstat = GET_VM_STATE_VERSION(); 00855 ci->call = vm_call_general; 00856 } 00857 #else 00858 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class); 00859 ci->call = vm_call_general; 00860 ci->klass = klass; 00861 #endif 00862 } 00863 00864 static inline int 00865 check_cfunc(const rb_method_entry_t *me, VALUE (*func)()) 00866 { 00867 if (me && me->def->type == VM_METHOD_TYPE_CFUNC && 00868 me->def->body.cfunc.func == func) { 00869 return 1; 00870 } 00871 else { 00872 return 0; 00873 } 00874 } 00875 00876 static 00877 #ifndef NO_BIG_INLINE 00878 inline 00879 #endif 00880 VALUE 00881 opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci) 00882 { 00883 if (FIXNUM_2_P(recv, obj) && 00884 BASIC_OP_UNREDEFINED_P(BOP_EQ, FIXNUM_REDEFINED_OP_FLAG)) { 00885 return (recv == obj) ? Qtrue : Qfalse; 00886 } 00887 else if (FLONUM_2_P(recv, obj) && 00888 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) { 00889 return (recv == obj) ? Qtrue : Qfalse; 00890 } 00891 else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) { 00892 if (HEAP_CLASS_OF(recv) == rb_cFloat && 00893 HEAP_CLASS_OF(obj) == rb_cFloat && 00894 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) { 00895 double a = RFLOAT_VALUE(recv); 00896 double b = RFLOAT_VALUE(obj); 00897 00898 if (isnan(a) || isnan(b)) { 00899 return Qfalse; 00900 } 00901 return (a == b) ? Qtrue : Qfalse; 00902 } 00903 else if (HEAP_CLASS_OF(recv) == rb_cString && 00904 HEAP_CLASS_OF(obj) == rb_cString && 00905 BASIC_OP_UNREDEFINED_P(BOP_EQ, STRING_REDEFINED_OP_FLAG)) { 00906 return rb_str_equal(recv, obj); 00907 } 00908 } 00909 00910 { 00911 vm_search_method(ci, recv); 00912 00913 if (check_cfunc(ci->me, rb_obj_equal)) { 00914 return recv == obj ? Qtrue : Qfalse; 00915 } 00916 } 00917 00918 return Qundef; 00919 } 00920 00921 static VALUE 00922 vm_call0(rb_thread_t*, VALUE, ID, int, const VALUE*, const rb_method_entry_t*, VALUE); 00923 00924 static VALUE 00925 check_match(VALUE pattern, VALUE target, enum vm_check_match_type type) 00926 { 00927 switch (type) { 00928 case VM_CHECKMATCH_TYPE_WHEN: 00929 return pattern; 00930 case VM_CHECKMATCH_TYPE_RESCUE: 00931 if (!rb_obj_is_kind_of(pattern, rb_cModule)) { 00932 rb_raise(rb_eTypeError, "class or module required for rescue clause"); 00933 } 00934 /* fall through */ 00935 case VM_CHECKMATCH_TYPE_CASE: { 00936 VALUE defined_class; 00937 rb_method_entry_t *me = rb_method_entry_with_refinements(CLASS_OF(pattern), idEqq, &defined_class); 00938 if (me) { 00939 return vm_call0(GET_THREAD(), pattern, idEqq, 1, &target, me, defined_class); 00940 } 00941 else { 00942 /* fallback to funcall (e.g. method_missing) */ 00943 return rb_funcall2(pattern, idEqq, 1, &target); 00944 } 00945 } 00946 default: 00947 rb_bug("check_match: unreachable"); 00948 } 00949 } 00950 00951 00952 #if defined(_MSC_VER) && _MSC_VER < 1300 00953 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse; 00954 #else 00955 #define CHECK_CMP_NAN(a, b) /* do nothing */ 00956 #endif 00957 00958 static inline VALUE 00959 double_cmp_lt(double a, double b) 00960 { 00961 CHECK_CMP_NAN(a, b); 00962 return a < b ? Qtrue : Qfalse; 00963 } 00964 00965 static inline VALUE 00966 double_cmp_le(double a, double b) 00967 { 00968 CHECK_CMP_NAN(a, b); 00969 return a <= b ? Qtrue : Qfalse; 00970 } 00971 00972 static inline VALUE 00973 double_cmp_gt(double a, double b) 00974 { 00975 CHECK_CMP_NAN(a, b); 00976 return a > b ? Qtrue : Qfalse; 00977 } 00978 00979 static inline VALUE 00980 double_cmp_ge(double a, double b) 00981 { 00982 CHECK_CMP_NAN(a, b); 00983 return a >= b ? Qtrue : Qfalse; 00984 } 00985 00986 static VALUE * 00987 vm_base_ptr(rb_control_frame_t *cfp) 00988 { 00989 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 00990 VALUE *bp = prev_cfp->sp + cfp->iseq->local_size + 1; 00991 00992 if (cfp->iseq->type == ISEQ_TYPE_METHOD) { 00993 /* adjust `self' */ 00994 bp += 1; 00995 } 00996 00997 #if VM_DEBUG_BP_CHECK 00998 if (bp != cfp->bp_check) { 00999 fprintf(stderr, "bp_check: %ld, bp: %ld\n", 01000 (long)(cfp->bp_check - GET_THREAD()->stack), 01001 (long)(bp - GET_THREAD()->stack)); 01002 rb_bug("vm_base_ptr: unreachable"); 01003 } 01004 #endif 01005 01006 return bp; 01007 } 01008 01009 /* method call processes with call_info */ 01010 01011 static void 01012 vm_caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01013 { 01014 #define SAVE_RESTORE_CI(expr, ci) do { \ 01015 int saved_argc = (ci)->argc; rb_block_t *saved_blockptr = (ci)->blockptr; /* save */ \ 01016 expr; \ 01017 (ci)->argc = saved_argc; (ci)->blockptr = saved_blockptr; /* restore */ \ 01018 } while (0) 01019 01020 if (UNLIKELY(ci->flag & VM_CALL_ARGS_BLOCKARG)) { 01021 rb_proc_t *po; 01022 VALUE proc; 01023 01024 proc = *(--cfp->sp); 01025 01026 if (proc != Qnil) { 01027 if (!rb_obj_is_proc(proc)) { 01028 VALUE b; 01029 01030 SAVE_RESTORE_CI(b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc"), ci); 01031 01032 if (NIL_P(b) || !rb_obj_is_proc(b)) { 01033 rb_raise(rb_eTypeError, 01034 "wrong argument type %s (expected Proc)", 01035 rb_obj_classname(proc)); 01036 } 01037 proc = b; 01038 } 01039 GetProcPtr(proc, po); 01040 ci->blockptr = &po->block; 01041 RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc; 01042 } 01043 } 01044 else if (ci->blockiseq != 0) { /* likely */ 01045 ci->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp); 01046 ci->blockptr->iseq = ci->blockiseq; 01047 ci->blockptr->proc = 0; 01048 } 01049 01050 /* expand top of stack? */ 01051 01052 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) { 01053 VALUE ary = *(cfp->sp - 1); 01054 VALUE *ptr; 01055 int i; 01056 VALUE tmp; 01057 01058 SAVE_RESTORE_CI(tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a"), ci); 01059 01060 if (NIL_P(tmp)) { 01061 /* do nothing */ 01062 } 01063 else { 01064 long len = RARRAY_LEN(tmp); 01065 ptr = RARRAY_PTR(tmp); 01066 cfp->sp -= 1; 01067 01068 CHECK_VM_STACK_OVERFLOW(cfp, len); 01069 01070 for (i = 0; i < len; i++) { 01071 *cfp->sp++ = ptr[i]; 01072 } 01073 ci->argc += i-1; 01074 } 01075 } 01076 } 01077 01078 static int 01079 separate_symbol(st_data_t key, st_data_t value, st_data_t arg) 01080 { 01081 VALUE *kwdhash = (VALUE *)arg; 01082 01083 if (!SYMBOL_P(key)) kwdhash++; 01084 if (!*kwdhash) *kwdhash = rb_hash_new(); 01085 rb_hash_aset(*kwdhash, (VALUE)key, (VALUE)value); 01086 return ST_CONTINUE; 01087 } 01088 01089 static VALUE 01090 extract_keywords(VALUE *orighash) 01091 { 01092 VALUE parthash[2] = {0, 0}; 01093 VALUE hash = *orighash; 01094 01095 if (RHASH_EMPTY_P(hash)) { 01096 *orighash = 0; 01097 return hash; 01098 } 01099 st_foreach(RHASH_TBL(hash), separate_symbol, (st_data_t)&parthash); 01100 *orighash = parthash[1]; 01101 return parthash[0]; 01102 } 01103 01104 static inline int 01105 vm_callee_setup_keyword_arg(const rb_iseq_t *iseq, int argc, int m, VALUE *orig_argv, VALUE *kwd) 01106 { 01107 VALUE keyword_hash, orig_hash; 01108 int i, j; 01109 01110 if (argc > m && 01111 !NIL_P(orig_hash = rb_check_hash_type(orig_argv[argc-1])) && 01112 (keyword_hash = extract_keywords(&orig_hash)) != 0) { 01113 if (!orig_hash) { 01114 argc--; 01115 } 01116 else { 01117 orig_argv[argc-1] = orig_hash; 01118 } 01119 if (iseq->arg_keyword_check) { 01120 for (i = j = 0; i < iseq->arg_keywords; i++) { 01121 if (st_lookup(RHASH_TBL(keyword_hash), ID2SYM(iseq->arg_keyword_table[i]), 0)) j++; 01122 } 01123 if (RHASH_TBL(keyword_hash)->num_entries > (unsigned int) j) { 01124 unknown_keyword_error(iseq, keyword_hash); 01125 } 01126 } 01127 } 01128 else { 01129 keyword_hash = rb_hash_new(); 01130 } 01131 01132 *kwd = keyword_hash; 01133 01134 return argc; 01135 } 01136 01137 static inline int 01138 vm_callee_setup_arg_complex(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *orig_argv) 01139 { 01140 const int m = iseq->argc; 01141 const int opts = iseq->arg_opts - (iseq->arg_opts > 0); 01142 const int min = m + iseq->arg_post_len; 01143 const int max = (iseq->arg_rest == -1) ? m + opts + iseq->arg_post_len : UNLIMITED_ARGUMENTS; 01144 const int orig_argc = ci->argc; 01145 int argc = orig_argc; 01146 VALUE *argv = orig_argv; 01147 VALUE keyword_hash = Qnil; 01148 rb_num_t opt_pc = 0; 01149 01150 th->mark_stack_len = argc + iseq->arg_size; 01151 01152 /* keyword argument */ 01153 if (iseq->arg_keyword != -1) { 01154 argc = vm_callee_setup_keyword_arg(iseq, argc, m, orig_argv, &keyword_hash); 01155 } 01156 01157 /* mandatory */ 01158 if ((argc < min) || (argc > max && max != UNLIMITED_ARGUMENTS)) { 01159 argument_error(iseq, argc, min, max); 01160 } 01161 01162 argv += m; 01163 argc -= m; 01164 01165 /* post arguments */ 01166 if (iseq->arg_post_len) { 01167 if (!(orig_argc < iseq->arg_post_start)) { 01168 VALUE *new_argv = ALLOCA_N(VALUE, argc); 01169 MEMCPY(new_argv, argv, VALUE, argc); 01170 argv = new_argv; 01171 } 01172 01173 MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len], 01174 VALUE, iseq->arg_post_len); 01175 } 01176 01177 /* opt arguments */ 01178 if (iseq->arg_opts) { 01179 if (argc > opts) { 01180 argc -= opts; 01181 argv += opts; 01182 opt_pc = iseq->arg_opt_table[opts]; /* no opt */ 01183 } 01184 else { 01185 int i; 01186 for (i = argc; i<opts; i++) { 01187 orig_argv[i + m] = Qnil; 01188 } 01189 opt_pc = iseq->arg_opt_table[argc]; 01190 argc = 0; 01191 } 01192 } 01193 01194 /* rest arguments */ 01195 if (iseq->arg_rest != -1) { 01196 orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv); 01197 argc = 0; 01198 } 01199 01200 /* keyword argument */ 01201 if (iseq->arg_keyword != -1) { 01202 orig_argv[iseq->arg_keyword] = keyword_hash; 01203 } 01204 01205 /* block arguments */ 01206 if (iseq->arg_block != -1) { 01207 VALUE blockval = Qnil; 01208 const rb_block_t *blockptr = ci->blockptr; 01209 01210 if (blockptr) { 01211 /* make Proc object */ 01212 if (blockptr->proc == 0) { 01213 rb_proc_t *proc; 01214 blockval = rb_vm_make_proc(th, blockptr, rb_cProc); 01215 GetProcPtr(blockval, proc); 01216 ci->blockptr = &proc->block; 01217 } 01218 else { 01219 blockval = blockptr->proc; 01220 } 01221 } 01222 01223 orig_argv[iseq->arg_block] = blockval; /* Proc or nil */ 01224 } 01225 01226 th->mark_stack_len = 0; 01227 return (int)opt_pc; 01228 } 01229 01230 static VALUE vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci); 01231 static inline VALUE vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci); 01232 static inline VALUE vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci); 01233 01234 static inline void 01235 vm_callee_setup_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, 01236 VALUE *argv, int is_lambda) 01237 { 01238 if (LIKELY(iseq->arg_simple & 0x01)) { 01239 /* simple check */ 01240 if (ci->argc != iseq->argc) { 01241 argument_error(iseq, ci->argc, iseq->argc, iseq->argc); 01242 } 01243 ci->aux.opt_pc = 0; 01244 CI_SET_FASTPATH(ci, 01245 (UNLIKELY(ci->flag & VM_CALL_TAILCALL) ? 01246 vm_call_iseq_setup_tailcall : 01247 vm_call_iseq_setup_normal), 01248 (!is_lambda && 01249 !(ci->flag & VM_CALL_ARGS_SPLAT) && /* argc may differ for each calls */ 01250 !(ci->me->flag & NOEX_PROTECTED))); 01251 } 01252 else { 01253 ci->aux.opt_pc = vm_callee_setup_arg_complex(th, ci, iseq, argv); 01254 } 01255 } 01256 01257 static VALUE 01258 vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01259 { 01260 vm_callee_setup_arg(th, ci, ci->me->def->body.iseq, cfp->sp - ci->argc, 0); 01261 return vm_call_iseq_setup_2(th, cfp, ci); 01262 } 01263 01264 static VALUE 01265 vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01266 { 01267 if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) { 01268 return vm_call_iseq_setup_normal(th, cfp, ci); 01269 } 01270 else { 01271 return vm_call_iseq_setup_tailcall(th, cfp, ci); 01272 } 01273 } 01274 01275 static inline VALUE 01276 vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01277 { 01278 int i; 01279 VALUE *argv = cfp->sp - ci->argc; 01280 rb_iseq_t *iseq = ci->me->def->body.iseq; 01281 VALUE *sp = argv + iseq->arg_size; 01282 01283 CHECK_VM_STACK_OVERFLOW(cfp, iseq->stack_max); 01284 01285 /* clear local variables */ 01286 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) { 01287 *sp++ = Qnil; 01288 } 01289 01290 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, ci->recv, ci->defined_class, 01291 VM_ENVVAL_BLOCK_PTR(ci->blockptr), 01292 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me); 01293 01294 cfp->sp = argv - 1 /* recv */; 01295 return Qundef; 01296 } 01297 01298 static inline VALUE 01299 vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01300 { 01301 int i; 01302 VALUE *argv = cfp->sp - ci->argc; 01303 rb_iseq_t *iseq = ci->me->def->body.iseq; 01304 VALUE *src_argv = argv; 01305 VALUE *sp_orig, *sp; 01306 VALUE finish_flag = VM_FRAME_TYPE_FINISH_P(cfp) ? VM_FRAME_FLAG_FINISH : 0; 01307 01308 cfp = th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp); /* pop cf */ 01309 01310 CHECK_VM_STACK_OVERFLOW(cfp, iseq->stack_max); 01311 RUBY_VM_CHECK_INTS(th); 01312 01313 sp_orig = sp = cfp->sp; 01314 01315 /* push self */ 01316 sp[0] = ci->recv; 01317 sp++; 01318 01319 /* copy arguments */ 01320 for (i=0; i < iseq->arg_size; i++) { 01321 *sp++ = src_argv[i]; 01322 } 01323 01324 /* clear local variables */ 01325 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) { 01326 *sp++ = Qnil; 01327 } 01328 01329 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag, 01330 ci->recv, ci->defined_class, VM_ENVVAL_BLOCK_PTR(ci->blockptr), 01331 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me); 01332 01333 cfp->sp = sp_orig; 01334 return Qundef; 01335 } 01336 01337 static VALUE 01338 call_cfunc_m2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01339 { 01340 return (*func)(recv, rb_ary_new4(argc, argv)); 01341 } 01342 01343 static VALUE 01344 call_cfunc_m1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01345 { 01346 return (*func)(argc, argv, recv); 01347 } 01348 01349 static VALUE 01350 call_cfunc_0(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01351 { 01352 return (*func)(recv); 01353 } 01354 01355 static VALUE 01356 call_cfunc_1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01357 { 01358 return (*func)(recv, argv[0]); 01359 } 01360 01361 static VALUE 01362 call_cfunc_2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01363 { 01364 return (*func)(recv, argv[0], argv[1]); 01365 } 01366 01367 static VALUE 01368 call_cfunc_3(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01369 { 01370 return (*func)(recv, argv[0], argv[1], argv[2]); 01371 } 01372 01373 static VALUE 01374 call_cfunc_4(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01375 { 01376 return (*func)(recv, argv[0], argv[1], argv[2], argv[3]); 01377 } 01378 01379 static VALUE 01380 call_cfunc_5(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01381 { 01382 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]); 01383 } 01384 01385 static VALUE 01386 call_cfunc_6(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01387 { 01388 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]); 01389 } 01390 01391 static VALUE 01392 call_cfunc_7(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01393 { 01394 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]); 01395 } 01396 01397 static VALUE 01398 call_cfunc_8(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01399 { 01400 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]); 01401 } 01402 01403 static VALUE 01404 call_cfunc_9(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01405 { 01406 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]); 01407 } 01408 01409 static VALUE 01410 call_cfunc_10(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01411 { 01412 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]); 01413 } 01414 01415 static VALUE 01416 call_cfunc_11(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01417 { 01418 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]); 01419 } 01420 01421 static VALUE 01422 call_cfunc_12(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01423 { 01424 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]); 01425 } 01426 01427 static VALUE 01428 call_cfunc_13(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01429 { 01430 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]); 01431 } 01432 01433 static VALUE 01434 call_cfunc_14(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01435 { 01436 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]); 01437 } 01438 01439 static VALUE 01440 call_cfunc_15(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv) 01441 { 01442 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]); 01443 } 01444 01445 #ifndef VM_PROFILE 01446 #define VM_PROFILE 0 01447 #endif 01448 01449 #if VM_PROFILE 01450 static int vm_profile_counter[4]; 01451 #define VM_PROFILE_UP(x) (vm_profile_counter[x]++) 01452 #define VM_PROFILE_ATEXIT() atexit(vm_profile_show_result) 01453 static void vm_profile_show_result(void) 01454 { 01455 fprintf(stderr, "VM Profile results: \n"); 01456 fprintf(stderr, "r->c call: %d\n", vm_profile_counter[0]); 01457 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[1]); 01458 fprintf(stderr, "c->c call: %d\n", vm_profile_counter[2]); 01459 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[3]); 01460 } 01461 #else 01462 #define VM_PROFILE_UP(x) 01463 #define VM_PROFILE_ATEXIT() 01464 #endif 01465 01466 static VALUE 01467 vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01468 { 01469 VALUE val; 01470 const rb_method_entry_t *me = ci->me; 01471 const rb_method_cfunc_t *cfunc = &me->def->body.cfunc; 01472 int len = cfunc->argc; 01473 01474 /* don't use `ci' after EXEC_EVENT_HOOK because ci can be override */ 01475 VALUE recv = ci->recv; 01476 VALUE defined_class = ci->defined_class; 01477 rb_block_t *blockptr = ci->blockptr; 01478 int argc = ci->argc; 01479 01480 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id); 01481 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qundef); 01482 01483 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv, defined_class, 01484 VM_ENVVAL_BLOCK_PTR(blockptr), 0, th->cfp->sp, 1, me); 01485 01486 if (len >= 0) rb_check_arity(argc, len, len); 01487 01488 reg_cfp->sp -= argc + 1; 01489 VM_PROFILE_UP(0); 01490 val = (*cfunc->invoker)(cfunc->func, recv, argc, reg_cfp->sp + 1); 01491 01492 if (reg_cfp != th->cfp + 1) { 01493 rb_bug("vm_call_cfunc - cfp consistency error"); 01494 } 01495 01496 vm_pop_frame(th); 01497 01498 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val); 01499 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id); 01500 01501 return val; 01502 } 01503 01504 #if OPT_CALL_CFUNC_WITHOUT_FRAME 01505 static VALUE 01506 vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01507 { 01508 VALUE val; 01509 int argc = ci->argc; 01510 VALUE *argv = STACK_ADDR_FROM_TOP(argc); 01511 const rb_method_cfunc_t *cfunc = &ci->me->def->body.cfunc; 01512 01513 th->passed_ci = ci; 01514 reg_cfp->sp -= argc + 1; 01515 ci->aux.inc_sp = argc + 1; 01516 VM_PROFILE_UP(0); 01517 val = (*cfunc->invoker)(cfunc->func, ci, argv); 01518 01519 /* check */ 01520 if (reg_cfp == th->cfp) { /* no frame push */ 01521 if (UNLIKELY(th->passed_ci != ci)) { 01522 rb_bug("vm_call_cfunc_latter: passed_ci error (ci: %p, passed_ci: %p)", ci, th->passed_ci); 01523 } 01524 th->passed_ci = 0; 01525 } 01526 else { 01527 if (UNLIKELY(reg_cfp != RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp))) { 01528 rb_bug("vm_call_cfunc_latter: cfp consistency error (%p, %p)", reg_cfp, th->cfp+1); 01529 } 01530 vm_pop_frame(th); 01531 VM_PROFILE_UP(1); 01532 } 01533 01534 return val; 01535 } 01536 01537 static VALUE 01538 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01539 { 01540 VALUE val; 01541 const rb_method_entry_t *me = ci->me; 01542 int len = me->def->body.cfunc.argc; 01543 VALUE recv = ci->recv; 01544 01545 if (len >= 0) rb_check_arity(ci->argc, len, len); 01546 01547 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id); 01548 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qnil); 01549 01550 if (!(ci->me->flag & NOEX_PROTECTED) && 01551 !(ci->flag & VM_CALL_ARGS_SPLAT)) { 01552 CI_SET_FASTPATH(ci, vm_call_cfunc_latter, 1); 01553 } 01554 val = vm_call_cfunc_latter(th, reg_cfp, ci); 01555 01556 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val); 01557 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id); 01558 01559 return val; 01560 } 01561 01562 void 01563 vm_call_cfunc_push_frame(rb_thread_t *th) 01564 { 01565 rb_call_info_t *ci = th->passed_ci; 01566 const rb_method_entry_t *me = ci->me; 01567 th->passed_ci = 0; 01568 01569 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, ci->recv, ci->defined_class, 01570 VM_ENVVAL_BLOCK_PTR(ci->blockptr), 0, th->cfp->sp + ci->aux.inc_sp, 1, me); 01571 01572 if (ci->call != vm_call_general) { 01573 ci->call = vm_call_cfunc_with_frame; 01574 } 01575 } 01576 #else /* OPT_CALL_CFUNC_WITHOUT_FRAME */ 01577 static VALUE 01578 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01579 { 01580 return vm_call_cfunc_with_frame(th, reg_cfp, ci); 01581 } 01582 #endif 01583 01584 static VALUE 01585 vm_call_ivar(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01586 { 01587 VALUE val = vm_getivar(ci->recv, ci->me->def->body.attr.id, 0, ci, 1); 01588 cfp->sp -= 1; 01589 return val; 01590 } 01591 01592 static VALUE 01593 vm_call_attrset(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01594 { 01595 VALUE val = vm_setivar(ci->recv, ci->me->def->body.attr.id, *(cfp->sp - 1), 0, ci, 1); 01596 cfp->sp -= 2; 01597 return val; 01598 } 01599 01600 static inline VALUE 01601 vm_call_bmethod_body(rb_thread_t *th, rb_call_info_t *ci, const VALUE *argv) 01602 { 01603 rb_proc_t *proc; 01604 VALUE val; 01605 01606 RUBY_DTRACE_METHOD_ENTRY_HOOK(th, ci->me->klass, ci->me->called_id); 01607 EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, ci->recv, ci->me->called_id, ci->me->klass, Qnil); 01608 01609 /* control block frame */ 01610 th->passed_me = ci->me; 01611 GetProcPtr(ci->me->def->body.proc, proc); 01612 val = vm_invoke_proc(th, proc, ci->recv, ci->defined_class, ci->argc, argv, ci->blockptr); 01613 01614 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, ci->recv, ci->me->called_id, ci->me->klass, val); 01615 RUBY_DTRACE_METHOD_RETURN_HOOK(th, ci->me->klass, ci->me->called_id); 01616 01617 return val; 01618 } 01619 01620 static VALUE 01621 vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01622 { 01623 VALUE *argv = ALLOCA_N(VALUE, ci->argc); 01624 MEMCPY(argv, cfp->sp - ci->argc, VALUE, ci->argc); 01625 cfp->sp += - ci->argc - 1; 01626 01627 return vm_call_bmethod_body(th, ci, argv); 01628 } 01629 01630 static 01631 #ifdef _MSC_VER 01632 __forceinline 01633 #else 01634 inline 01635 #endif 01636 VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci); 01637 01638 static VALUE 01639 vm_call_opt_send(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01640 { 01641 int i = ci->argc - 1; 01642 VALUE sym; 01643 rb_call_info_t ci_entry; 01644 01645 if (ci->argc == 0) { 01646 rb_raise(rb_eArgError, "no method name given"); 01647 } 01648 01649 ci_entry = *ci; /* copy ci entry */ 01650 ci = &ci_entry; 01651 01652 sym = TOPN(i); 01653 01654 if (SYMBOL_P(sym)) { 01655 ci->mid = SYM2ID(sym); 01656 } 01657 else if (!(ci->mid = rb_check_id(&sym))) { 01658 if (rb_method_basic_definition_p(CLASS_OF(ci->recv), idMethodMissing)) { 01659 VALUE exc = make_no_method_exception(rb_eNoMethodError, NULL, ci->recv, rb_long2int(ci->argc), &TOPN(i)); 01660 rb_exc_raise(exc); 01661 } 01662 ci->mid = rb_to_id(sym); 01663 } 01664 01665 /* shift arguments */ 01666 if (i > 0) { 01667 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i); 01668 } 01669 ci->me = 01670 rb_method_entry_without_refinements(CLASS_OF(ci->recv), 01671 ci->mid, &ci->defined_class); 01672 ci->argc -= 1; 01673 DEC_SP(1); 01674 01675 ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND; 01676 01677 return vm_call_method(th, reg_cfp, ci); 01678 } 01679 01680 static VALUE 01681 vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01682 { 01683 rb_proc_t *proc; 01684 int argc = ci->argc; 01685 VALUE *argv = ALLOCA_N(VALUE, argc); 01686 GetProcPtr(ci->recv, proc); 01687 MEMCPY(argv, cfp->sp - argc, VALUE, argc); 01688 cfp->sp -= argc + 1; 01689 01690 return rb_vm_invoke_proc(th, proc, argc, argv, ci->blockptr); 01691 } 01692 01693 static VALUE 01694 vm_call_method_missing(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01695 { 01696 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc); 01697 rb_call_info_t ci_entry; 01698 01699 ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND; 01700 ci_entry.argc = ci->argc+1; 01701 ci_entry.mid = idMethodMissing; 01702 ci_entry.blockptr = ci->blockptr; 01703 ci_entry.recv = ci->recv; 01704 ci_entry.me = rb_method_entry(CLASS_OF(ci_entry.recv), idMethodMissing, &ci_entry.defined_class); 01705 01706 /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */ 01707 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1); 01708 if (ci->argc > 0) { 01709 MEMMOVE(argv+1, argv, VALUE, ci->argc); 01710 } 01711 argv[0] = ID2SYM(ci->mid); 01712 INC_SP(1); 01713 01714 th->method_missing_reason = ci->aux.missing_reason; 01715 return vm_call_method(th, reg_cfp, &ci_entry); 01716 } 01717 01718 static inline VALUE 01719 find_refinement(VALUE refinements, VALUE klass) 01720 { 01721 if (NIL_P(refinements)) { 01722 return Qnil; 01723 } 01724 return rb_hash_lookup(refinements, klass); 01725 } 01726 01727 static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2); 01728 static VALUE vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci); 01729 01730 static rb_control_frame_t * 01731 current_method_entry(rb_thread_t *th, rb_control_frame_t *cfp) 01732 { 01733 rb_control_frame_t *top_cfp = cfp; 01734 01735 if (cfp->iseq && cfp->iseq->type == ISEQ_TYPE_BLOCK) { 01736 rb_iseq_t *local_iseq = cfp->iseq->local_iseq; 01737 do { 01738 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); 01739 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) { 01740 /* TODO: orphan block */ 01741 return top_cfp; 01742 } 01743 } while (cfp->iseq != local_iseq); 01744 } 01745 return cfp; 01746 } 01747 01748 static 01749 #ifdef _MSC_VER 01750 __forceinline 01751 #else 01752 inline 01753 #endif 01754 VALUE 01755 vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci) 01756 { 01757 int enable_fastpath = 1; 01758 rb_call_info_t ci_temp; 01759 01760 start_method_dispatch: 01761 if (ci->me != 0) { 01762 if ((ci->me->flag == 0)) { 01763 VALUE klass; 01764 01765 normal_method_dispatch: 01766 switch (ci->me->def->type) { 01767 case VM_METHOD_TYPE_ISEQ:{ 01768 CI_SET_FASTPATH(ci, vm_call_iseq_setup, enable_fastpath); 01769 return vm_call_iseq_setup(th, cfp, ci); 01770 } 01771 case VM_METHOD_TYPE_NOTIMPLEMENTED: 01772 case VM_METHOD_TYPE_CFUNC: 01773 CI_SET_FASTPATH(ci, vm_call_cfunc, enable_fastpath); 01774 return vm_call_cfunc(th, cfp, ci); 01775 case VM_METHOD_TYPE_ATTRSET:{ 01776 rb_check_arity(ci->argc, 1, 1); 01777 ci->aux.index = 0; 01778 CI_SET_FASTPATH(ci, vm_call_attrset, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT)); 01779 return vm_call_attrset(th, cfp, ci); 01780 } 01781 case VM_METHOD_TYPE_IVAR:{ 01782 rb_check_arity(ci->argc, 0, 0); 01783 ci->aux.index = 0; 01784 CI_SET_FASTPATH(ci, vm_call_ivar, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT)); 01785 return vm_call_ivar(th, cfp, ci); 01786 } 01787 case VM_METHOD_TYPE_MISSING:{ 01788 ci->aux.missing_reason = 0; 01789 CI_SET_FASTPATH(ci, vm_call_method_missing, enable_fastpath); 01790 return vm_call_method_missing(th, cfp, ci); 01791 } 01792 case VM_METHOD_TYPE_BMETHOD:{ 01793 CI_SET_FASTPATH(ci, vm_call_bmethod, enable_fastpath); 01794 return vm_call_bmethod(th, cfp, ci); 01795 } 01796 case VM_METHOD_TYPE_ZSUPER:{ 01797 klass = ci->me->klass; 01798 klass = RCLASS_ORIGIN(klass); 01799 zsuper_method_dispatch: 01800 klass = RCLASS_SUPER(klass); 01801 ci_temp = *ci; 01802 ci = &ci_temp; 01803 01804 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class); 01805 01806 if (ci->me != 0) { 01807 goto normal_method_dispatch; 01808 } 01809 else { 01810 goto start_method_dispatch; 01811 } 01812 } 01813 case VM_METHOD_TYPE_OPTIMIZED:{ 01814 switch (ci->me->def->body.optimize_type) { 01815 case OPTIMIZED_METHOD_TYPE_SEND: 01816 CI_SET_FASTPATH(ci, vm_call_opt_send, enable_fastpath); 01817 return vm_call_opt_send(th, cfp, ci); 01818 case OPTIMIZED_METHOD_TYPE_CALL: 01819 CI_SET_FASTPATH(ci, vm_call_opt_call, enable_fastpath); 01820 return vm_call_opt_call(th, cfp, ci); 01821 default: 01822 rb_bug("vm_call_method: unsupported optimized method type (%d)", 01823 ci->me->def->body.optimize_type); 01824 } 01825 break; 01826 } 01827 case VM_METHOD_TYPE_UNDEF: 01828 break; 01829 case VM_METHOD_TYPE_REFINED:{ 01830 NODE *cref = rb_vm_get_cref(cfp->iseq, cfp->ep); 01831 VALUE refinements = cref ? cref->nd_refinements : Qnil; 01832 VALUE refinement, defined_class; 01833 rb_method_entry_t *me; 01834 01835 refinement = find_refinement(refinements, 01836 ci->defined_class); 01837 if (NIL_P(refinement)) { 01838 goto no_refinement_dispatch; 01839 } 01840 me = rb_method_entry(refinement, ci->mid, &defined_class); 01841 if (me) { 01842 if (ci->call == vm_call_super_method) { 01843 rb_control_frame_t *top_cfp = current_method_entry(th, cfp); 01844 if (top_cfp->me && 01845 rb_method_definition_eq(me->def, top_cfp->me->def)) { 01846 goto no_refinement_dispatch; 01847 } 01848 } 01849 ci->me = me; 01850 ci->defined_class = defined_class; 01851 if (me->def->type != VM_METHOD_TYPE_REFINED) { 01852 goto start_method_dispatch; 01853 } 01854 } 01855 01856 no_refinement_dispatch: 01857 if (ci->me->def->body.orig_me) { 01858 ci->me = ci->me->def->body.orig_me; 01859 if (UNDEFINED_METHOD_ENTRY_P(ci->me)) { 01860 ci->me = 0; 01861 } 01862 goto start_method_dispatch; 01863 } 01864 else { 01865 klass = ci->me->klass; 01866 goto zsuper_method_dispatch; 01867 } 01868 } 01869 } 01870 rb_bug("vm_call_method: unsupported method type (%d)", ci->me->def->type); 01871 } 01872 else { 01873 int noex_safe; 01874 if (!(ci->flag & VM_CALL_FCALL) && (ci->me->flag & NOEX_MASK) & NOEX_PRIVATE) { 01875 int stat = NOEX_PRIVATE; 01876 01877 if (ci->flag & VM_CALL_VCALL) { 01878 stat |= NOEX_VCALL; 01879 } 01880 ci->aux.missing_reason = stat; 01881 CI_SET_FASTPATH(ci, vm_call_method_missing, 1); 01882 return vm_call_method_missing(th, cfp, ci); 01883 } 01884 else if (!(ci->flag & VM_CALL_OPT_SEND) && (ci->me->flag & NOEX_MASK) & NOEX_PROTECTED) { 01885 enable_fastpath = 0; 01886 if (!rb_obj_is_kind_of(cfp->self, ci->defined_class)) { 01887 ci->aux.missing_reason = NOEX_PROTECTED; 01888 return vm_call_method_missing(th, cfp, ci); 01889 } 01890 else { 01891 goto normal_method_dispatch; 01892 } 01893 } 01894 else if ((noex_safe = NOEX_SAFE(ci->me->flag)) > th->safe_level && (noex_safe > 2)) { 01895 rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(ci->mid)); 01896 } 01897 else { 01898 goto normal_method_dispatch; 01899 } 01900 } 01901 } 01902 else { 01903 /* method missing */ 01904 int stat = 0; 01905 if (ci->flag & VM_CALL_VCALL) { 01906 stat |= NOEX_VCALL; 01907 } 01908 if (ci->flag & VM_CALL_SUPER) { 01909 stat |= NOEX_SUPER; 01910 } 01911 if (ci->mid == idMethodMissing) { 01912 rb_control_frame_t *reg_cfp = cfp; 01913 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc); 01914 rb_raise_method_missing(th, ci->argc, argv, ci->recv, stat); 01915 } 01916 else { 01917 ci->aux.missing_reason = stat; 01918 CI_SET_FASTPATH(ci, vm_call_method_missing, 1); 01919 return vm_call_method_missing(th, cfp, ci); 01920 } 01921 } 01922 01923 rb_bug("vm_call_method: unreachable"); 01924 } 01925 01926 static VALUE 01927 vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01928 { 01929 return vm_call_method(th, reg_cfp, ci); 01930 } 01931 01932 static VALUE 01933 vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 01934 { 01935 return vm_call_method(th, reg_cfp, ci); 01936 } 01937 01938 /* super */ 01939 01940 static inline VALUE 01941 vm_search_normal_superclass(VALUE klass) 01942 { 01943 if (BUILTIN_TYPE(klass) == T_ICLASS && 01944 FL_TEST(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) { 01945 klass = RBASIC(klass)->klass; 01946 } 01947 klass = RCLASS_ORIGIN(klass); 01948 return RCLASS_SUPER(klass); 01949 } 01950 01951 static void 01952 vm_super_outside(void) 01953 { 01954 rb_raise(rb_eNoMethodError, "super called outside of method"); 01955 } 01956 01957 static int 01958 vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *iseq, VALUE sigval, rb_call_info_t *ci) 01959 { 01960 while (iseq && !iseq->klass) { 01961 iseq = iseq->parent_iseq; 01962 } 01963 01964 if (iseq == 0) { 01965 return -1; 01966 } 01967 01968 ci->mid = iseq->defined_method_id; 01969 01970 if (iseq != iseq->local_iseq) { 01971 /* defined by Module#define_method() */ 01972 rb_control_frame_t *lcfp = GET_CFP(); 01973 01974 if (!sigval) { 01975 /* zsuper */ 01976 return -2; 01977 } 01978 01979 while (lcfp->iseq != iseq) { 01980 rb_thread_t *th = GET_THREAD(); 01981 VALUE *tep = VM_EP_PREV_EP(lcfp->ep); 01982 while (1) { 01983 lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp); 01984 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, lcfp)) { 01985 return -1; 01986 } 01987 if (lcfp->ep == tep) { 01988 break; 01989 } 01990 } 01991 } 01992 01993 /* temporary measure for [Bug #2420] [Bug #3136] */ 01994 if (!lcfp->me) { 01995 return -1; 01996 } 01997 01998 ci->mid = lcfp->me->def->original_id; 01999 ci->klass = vm_search_normal_superclass(lcfp->klass); 02000 } 02001 else { 02002 ci->klass = vm_search_normal_superclass(reg_cfp->klass); 02003 } 02004 02005 return 0; 02006 } 02007 02008 static void 02009 vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 02010 { 02011 VALUE current_defined_class; 02012 rb_iseq_t *iseq = GET_ISEQ(); 02013 VALUE sigval = TOPN(ci->argc); 02014 02015 current_defined_class = GET_CFP()->klass; 02016 if (NIL_P(current_defined_class)) { 02017 vm_super_outside(); 02018 } 02019 02020 if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) { 02021 current_defined_class = RCLASS_REFINED_CLASS(current_defined_class); 02022 } 02023 02024 if (!FL_TEST(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) && 02025 !rb_obj_is_kind_of(ci->recv, current_defined_class)) { 02026 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ? 02027 RBASIC(current_defined_class)->klass : current_defined_class; 02028 02029 rb_raise(rb_eTypeError, 02030 "self has wrong type to call super in this context: " 02031 "%s (expected %s)", 02032 rb_obj_classname(ci->recv), rb_class2name(m)); 02033 } 02034 02035 switch (vm_search_superclass(GET_CFP(), iseq, sigval, ci)) { 02036 case -1: 02037 vm_super_outside(); 02038 case -2: 02039 rb_raise(rb_eRuntimeError, 02040 "implicit argument passing of super from method defined" 02041 " by define_method() is not supported." 02042 " Specify all arguments explicitly."); 02043 } 02044 if (!ci->klass) { 02045 /* bound instance method of module */ 02046 ci->aux.missing_reason = NOEX_SUPER; 02047 CI_SET_FASTPATH(ci, vm_call_method_missing, 1); 02048 return; 02049 } 02050 02051 /* TODO: use inline cache */ 02052 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class); 02053 ci->call = vm_call_super_method; 02054 02055 while (iseq && !iseq->klass) { 02056 iseq = iseq->parent_iseq; 02057 } 02058 02059 if (ci->me && ci->me->def->type == VM_METHOD_TYPE_ISEQ && ci->me->def->body.iseq == iseq) { 02060 ci->klass = RCLASS_SUPER(ci->defined_class); 02061 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class); 02062 } 02063 } 02064 02065 /* yield */ 02066 02067 static inline int 02068 block_proc_is_lambda(const VALUE procval) 02069 { 02070 rb_proc_t *proc; 02071 02072 if (procval) { 02073 GetProcPtr(procval, proc); 02074 return proc->is_lambda; 02075 } 02076 else { 02077 return 0; 02078 } 02079 } 02080 02081 static inline VALUE 02082 vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, 02083 VALUE self, int argc, const VALUE *argv, 02084 const rb_block_t *blockargptr) 02085 { 02086 NODE *ifunc = (NODE *) block->iseq; 02087 VALUE val, arg, blockarg; 02088 int lambda = block_proc_is_lambda(block->proc); 02089 02090 if (lambda) { 02091 arg = rb_ary_new4(argc, argv); 02092 } 02093 else if (argc == 0) { 02094 arg = Qnil; 02095 } 02096 else { 02097 arg = argv[0]; 02098 } 02099 02100 if (blockargptr) { 02101 if (blockargptr->proc) { 02102 blockarg = blockargptr->proc; 02103 } 02104 else { 02105 blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc); 02106 } 02107 } 02108 else { 02109 blockarg = Qnil; 02110 } 02111 02112 vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC, self, 02113 0, VM_ENVVAL_PREV_EP_PTR(block->ep), 0, 02114 th->cfp->sp, 1, 0); 02115 02116 val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg); 02117 02118 th->cfp++; 02119 return val; 02120 } 02121 02122 02123 /*-- 02124 * @brief on supplied all of optional, rest and post parameters. 02125 * @pre iseq is block style (not lambda style) 02126 */ 02127 static inline int 02128 vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq, 02129 int argc, VALUE *argv) 02130 { 02131 rb_num_t opt_pc = 0; 02132 int i; 02133 const int m = iseq->argc; 02134 const int r = iseq->arg_rest; 02135 int len = iseq->arg_post_len; 02136 int start = iseq->arg_post_start; 02137 int rsize = argc > m ? argc - m : 0; /* # of arguments which did not consumed yet */ 02138 int psize = rsize > len ? len : rsize; /* # of post arguments */ 02139 int osize = 0; /* # of opt arguments */ 02140 VALUE ary; 02141 02142 /* reserves arguments for post parameters */ 02143 rsize -= psize; 02144 02145 if (iseq->arg_opts) { 02146 const int opts = iseq->arg_opts - 1; 02147 if (rsize > opts) { 02148 osize = opts; 02149 opt_pc = iseq->arg_opt_table[opts]; 02150 } 02151 else { 02152 osize = rsize; 02153 opt_pc = iseq->arg_opt_table[rsize]; 02154 } 02155 } 02156 rsize -= osize; 02157 02158 if (0) { 02159 printf(" argc: %d\n", argc); 02160 printf(" len: %d\n", len); 02161 printf("start: %d\n", start); 02162 printf("rsize: %d\n", rsize); 02163 } 02164 02165 if (r == -1) { 02166 /* copy post argument */ 02167 MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize); 02168 } 02169 else { 02170 ary = rb_ary_new4(rsize, &argv[r]); 02171 02172 /* copy post argument */ 02173 MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize); 02174 argv[r] = ary; 02175 } 02176 02177 for (i=psize; i<len; i++) { 02178 argv[start + i] = Qnil; 02179 } 02180 02181 return (int)opt_pc; 02182 } 02183 02184 static inline int 02185 vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq, 02186 int orig_argc, VALUE *argv, 02187 const rb_block_t *blockptr) 02188 { 02189 int i; 02190 int argc = orig_argc; 02191 const int m = iseq->argc; 02192 VALUE ary, arg0; 02193 VALUE keyword_hash = Qnil; 02194 int opt_pc = 0; 02195 02196 th->mark_stack_len = argc; 02197 02198 /* 02199 * yield [1, 2] 02200 * => {|a|} => a = [1, 2] 02201 * => {|a, b|} => a, b = [1, 2] 02202 */ 02203 arg0 = argv[0]; 02204 if (!(iseq->arg_simple & 0x02) && /* exclude {|a|} */ 02205 ((m + iseq->arg_post_len) > 0 || /* positional arguments exist */ 02206 iseq->arg_opts > 2 || /* multiple optional arguments exist */ 02207 iseq->arg_keyword != -1 || /* any keyword arguments */ 02208 0) && 02209 argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) { /* rhs is only an array */ 02210 th->mark_stack_len = argc = RARRAY_LENINT(ary); 02211 02212 CHECK_VM_STACK_OVERFLOW(th->cfp, argc); 02213 02214 MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc); 02215 } 02216 else { 02217 /* vm_push_frame current argv is at the top of sp because vm_invoke_block 02218 * set sp at the first element of argv. 02219 * Therefore when rb_check_array_type(arg0) called to_ary and called to_ary 02220 * or method_missing run vm_push_frame, it initializes local variables. 02221 * see also https://bugs.ruby-lang.org/issues/8484 02222 */ 02223 argv[0] = arg0; 02224 } 02225 02226 /* keyword argument */ 02227 if (iseq->arg_keyword != -1) { 02228 argc = vm_callee_setup_keyword_arg(iseq, argc, m, argv, &keyword_hash); 02229 } 02230 02231 for (i=argc; i<m; i++) { 02232 argv[i] = Qnil; 02233 } 02234 02235 if (iseq->arg_rest == -1 && iseq->arg_opts == 0) { 02236 const int arg_size = iseq->arg_size; 02237 if (arg_size < argc) { 02238 /* 02239 * yield 1, 2 02240 * => {|a|} # truncate 02241 */ 02242 th->mark_stack_len = argc = arg_size; 02243 } 02244 } 02245 else { 02246 int r = iseq->arg_rest; 02247 02248 if (iseq->arg_post_len || 02249 iseq->arg_opts) { /* TODO: implement simple version for (iseq->arg_post_len==0 && iseq->arg_opts > 0) */ 02250 opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv); 02251 } 02252 else { 02253 if (argc < r) { 02254 /* yield 1 02255 * => {|a, b, *r|} 02256 */ 02257 for (i=argc; i<r; i++) { 02258 argv[i] = Qnil; 02259 } 02260 argv[r] = rb_ary_new(); 02261 } 02262 else { 02263 argv[r] = rb_ary_new4(argc-r, &argv[r]); 02264 } 02265 } 02266 02267 th->mark_stack_len = iseq->arg_size; 02268 } 02269 02270 /* keyword argument */ 02271 if (iseq->arg_keyword != -1) { 02272 argv[iseq->arg_keyword] = keyword_hash; 02273 } 02274 02275 /* {|&b|} */ 02276 if (iseq->arg_block != -1) { 02277 VALUE procval = Qnil; 02278 02279 if (blockptr) { 02280 if (blockptr->proc == 0) { 02281 procval = rb_vm_make_proc(th, blockptr, rb_cProc); 02282 } 02283 else { 02284 procval = blockptr->proc; 02285 } 02286 } 02287 02288 argv[iseq->arg_block] = procval; 02289 } 02290 02291 th->mark_stack_len = 0; 02292 return opt_pc; 02293 } 02294 02295 static inline int 02296 vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq, 02297 int argc, VALUE *argv, const rb_block_t *blockptr, int lambda) 02298 { 02299 if (0) { /* for debug */ 02300 printf(" argc: %d\n", argc); 02301 printf("iseq argc: %d\n", iseq->argc); 02302 printf("iseq opts: %d\n", iseq->arg_opts); 02303 printf("iseq rest: %d\n", iseq->arg_rest); 02304 printf("iseq post: %d\n", iseq->arg_post_len); 02305 printf("iseq blck: %d\n", iseq->arg_block); 02306 printf("iseq smpl: %d\n", iseq->arg_simple); 02307 printf(" lambda: %s\n", lambda ? "true" : "false"); 02308 } 02309 02310 if (lambda) { 02311 /* call as method */ 02312 rb_call_info_t ci_entry; 02313 ci_entry.flag = 0; 02314 ci_entry.argc = argc; 02315 ci_entry.blockptr = (rb_block_t *)blockptr; 02316 vm_callee_setup_arg(th, &ci_entry, iseq, argv, 1); 02317 return ci_entry.aux.opt_pc; 02318 } 02319 else { 02320 return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr); 02321 } 02322 } 02323 02324 static VALUE 02325 vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci) 02326 { 02327 const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp); 02328 rb_iseq_t *iseq; 02329 VALUE type = GET_ISEQ()->local_iseq->type; 02330 02331 if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) { 02332 rb_vm_localjump_error("no block given (yield)", Qnil, 0); 02333 } 02334 iseq = block->iseq; 02335 02336 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) { 02337 vm_caller_setup_args(th, GET_CFP(), ci); 02338 } 02339 02340 if (BUILTIN_TYPE(iseq) != T_NODE) { 02341 int opt_pc; 02342 const int arg_size = iseq->arg_size; 02343 VALUE * const rsp = GET_SP() - ci->argc; 02344 SET_SP(rsp); 02345 02346 CHECK_VM_STACK_OVERFLOW(GET_CFP(), iseq->stack_max); 02347 opt_pc = vm_yield_setup_args(th, iseq, ci->argc, rsp, 0, block_proc_is_lambda(block->proc)); 02348 02349 vm_push_frame(th, iseq, VM_FRAME_MAGIC_BLOCK, block->self, 02350 block->klass, 02351 VM_ENVVAL_PREV_EP_PTR(block->ep), 02352 iseq->iseq_encoded + opt_pc, 02353 rsp + arg_size, 02354 iseq->local_size - arg_size, 0); 02355 02356 return Qundef; 02357 } 02358 else { 02359 VALUE val = vm_yield_with_cfunc(th, block, block->self, ci->argc, STACK_ADDR_FROM_TOP(ci->argc), 0); 02360 POPN(ci->argc); /* TODO: should put before C/yield? */ 02361 return val; 02362 } 02363 } 02364
1.7.6.1