00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "insns.inc"
00013 #include <math.h>
00014
00015
00016
00017 #ifndef INLINE
00018 #define INLINE inline
00019 #endif
00020
00021 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
00022
00023 static inline rb_control_frame_t *
00024 vm_push_frame(rb_thread_t * th, const rb_iseq_t * iseq,
00025 VALUE type, VALUE self, VALUE specval,
00026 const VALUE *pc, VALUE *sp, VALUE *lfp,
00027 int local_size)
00028 {
00029 rb_control_frame_t * const cfp = th->cfp - 1;
00030 int i;
00031
00032 if ((void *)(sp + local_size) >= (void *)cfp) {
00033 rb_exc_raise(sysstack_error);
00034 }
00035 th->cfp = cfp;
00036
00037
00038
00039 for (i=0; i < local_size; i++) {
00040 *sp = Qnil;
00041 sp++;
00042 }
00043
00044
00045 *sp = GC_GUARDED_PTR(specval);
00046
00047 if (lfp == 0) {
00048 lfp = sp;
00049 }
00050
00051
00052
00053 cfp->pc = (VALUE *)pc;
00054 cfp->sp = sp + 1;
00055 cfp->bp = sp + 1;
00056 cfp->iseq = (rb_iseq_t *) iseq;
00057 cfp->flag = type;
00058 cfp->self = self;
00059 cfp->lfp = lfp;
00060 cfp->dfp = sp;
00061 cfp->proc = 0;
00062 cfp->me = 0;
00063
00064 #define COLLECT_PROFILE 0
00065 #if COLLECT_PROFILE
00066 cfp->prof_time_self = clock();
00067 cfp->prof_time_chld = 0;
00068 #endif
00069
00070 if (VMDEBUG == 2) {
00071 SDR();
00072 }
00073
00074 return cfp;
00075 }
00076
00077 static inline void
00078 vm_pop_frame(rb_thread_t *th)
00079 {
00080 #if COLLECT_PROFILE
00081 rb_control_frame_t *cfp = th->cfp;
00082
00083 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00084 VALUE current_time = clock();
00085 rb_control_frame_t *cfp = th->cfp;
00086 cfp->prof_time_self = current_time - cfp->prof_time_self;
00087 (cfp+1)->prof_time_chld += cfp->prof_time_self;
00088
00089 cfp->iseq->profile.count++;
00090 cfp->iseq->profile.time_cumu = cfp->prof_time_self;
00091 cfp->iseq->profile.time_self = cfp->prof_time_self - cfp->prof_time_chld;
00092 }
00093 else if (0 ) {
00094
00095 }
00096 #endif
00097 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
00098
00099 if (VMDEBUG == 2) {
00100 SDR();
00101 }
00102 }
00103
00104
00105
00106 NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc));
00107 static void
00108 argument_error(const rb_iseq_t *iseq, int miss_argc, int correct_argc)
00109 {
00110 VALUE mesg = rb_sprintf("wrong number of arguments (%d for %d)", miss_argc, correct_argc);
00111 VALUE exc = rb_exc_new3(rb_eArgError, mesg);
00112 VALUE bt = rb_make_backtrace();
00113 VALUE err_line = 0;
00114
00115 if (iseq) {
00116 int line_no = 1;
00117
00118 if (iseq->insn_info_size) {
00119 line_no = iseq->insn_info_table[0].line_no;
00120 }
00121
00122 err_line = rb_sprintf("%s:%d:in `%s'",
00123 RSTRING_PTR(iseq->filename),
00124 line_no, RSTRING_PTR(iseq->name));
00125 rb_funcall(bt, rb_intern("unshift"), 1, err_line);
00126 }
00127
00128 rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
00129 rb_exc_raise(exc);
00130 }
00131
00132 #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
00133 if (LIKELY(iseq->arg_simple & 0x01)) { \
00134 \
00135 if (orig_argc != iseq->argc) { \
00136 argument_error(iseq, orig_argc, iseq->argc); \
00137 } \
00138 ret = 0; \
00139 } \
00140 else { \
00141 ret = vm_callee_setup_arg_complex(th, iseq, orig_argc, orig_argv, block); \
00142 }
00143
00144 static inline int
00145 vm_callee_setup_arg_complex(rb_thread_t *th, const rb_iseq_t * iseq,
00146 int orig_argc, VALUE * orig_argv,
00147 const rb_block_t **block)
00148 {
00149 const int m = iseq->argc;
00150 int argc = orig_argc;
00151 VALUE *argv = orig_argv;
00152 rb_num_t opt_pc = 0;
00153
00154 th->mark_stack_len = argc + iseq->arg_size;
00155
00156
00157 if (argc < (m + iseq->arg_post_len)) {
00158 argument_error(iseq, argc, m + iseq->arg_post_len);
00159 }
00160
00161 argv += m;
00162 argc -= m;
00163
00164
00165 if (iseq->arg_post_len) {
00166 if (!(orig_argc < iseq->arg_post_start)) {
00167 VALUE *new_argv = ALLOCA_N(VALUE, argc);
00168 MEMCPY(new_argv, argv, VALUE, argc);
00169 argv = new_argv;
00170 }
00171
00172 MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
00173 VALUE, iseq->arg_post_len);
00174 }
00175
00176
00177 if (iseq->arg_opts) {
00178 const int opts = iseq->arg_opts - 1 ;
00179
00180 if (iseq->arg_rest == -1 && argc > opts) {
00181 argument_error(iseq, orig_argc, m + opts + iseq->arg_post_len);
00182 }
00183
00184 if (argc > opts) {
00185 argc -= opts;
00186 argv += opts;
00187 opt_pc = iseq->arg_opt_table[opts];
00188 }
00189 else {
00190 int i;
00191 for (i = argc; i<opts; i++) {
00192 orig_argv[i + m] = Qnil;
00193 }
00194 opt_pc = iseq->arg_opt_table[argc];
00195 argc = 0;
00196 }
00197 }
00198
00199
00200 if (iseq->arg_rest != -1) {
00201 orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
00202 argc = 0;
00203 }
00204
00205
00206 if (block && iseq->arg_block != -1) {
00207 VALUE blockval = Qnil;
00208 const rb_block_t *blockptr = *block;
00209
00210 if (argc != 0) {
00211 argument_error(iseq, orig_argc, m + iseq->arg_post_len);
00212 }
00213
00214 if (blockptr) {
00215
00216 if (blockptr->proc == 0) {
00217 rb_proc_t *proc;
00218 blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
00219 GetProcPtr(blockval, proc);
00220 *block = &proc->block;
00221 }
00222 else {
00223 blockval = blockptr->proc;
00224 }
00225 }
00226
00227 orig_argv[iseq->arg_block] = blockval;
00228 }
00229
00230 th->mark_stack_len = 0;
00231 return (int)opt_pc;
00232 }
00233
00234 static inline int
00235 caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, VALUE flag,
00236 int argc, rb_iseq_t *blockiseq, rb_block_t **block)
00237 {
00238 rb_block_t *blockptr = 0;
00239
00240 if (block) {
00241 if (flag & VM_CALL_ARGS_BLOCKARG_BIT) {
00242 rb_proc_t *po;
00243 VALUE proc;
00244
00245 proc = *(--cfp->sp);
00246
00247 if (proc != Qnil) {
00248 if (!rb_obj_is_proc(proc)) {
00249 VALUE b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
00250 if (NIL_P(b) || !rb_obj_is_proc(b)) {
00251 rb_raise(rb_eTypeError,
00252 "wrong argument type %s (expected Proc)",
00253 rb_obj_classname(proc));
00254 }
00255 proc = b;
00256 }
00257 GetProcPtr(proc, po);
00258 blockptr = &po->block;
00259 RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
00260 *block = blockptr;
00261 }
00262 }
00263 else if (blockiseq) {
00264 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
00265 blockptr->iseq = blockiseq;
00266 blockptr->proc = 0;
00267 *block = blockptr;
00268 }
00269 }
00270
00271
00272 if (flag & VM_CALL_ARGS_SPLAT_BIT) {
00273 VALUE ary = *(cfp->sp - 1);
00274 VALUE *ptr;
00275 int i;
00276 VALUE tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a");
00277
00278 if (NIL_P(tmp)) {
00279
00280 }
00281 else {
00282 long len = RARRAY_LEN(tmp);
00283 ptr = RARRAY_PTR(tmp);
00284 cfp->sp -= 1;
00285
00286 CHECK_STACK_OVERFLOW(cfp, len);
00287
00288 for (i = 0; i < len; i++) {
00289 *cfp->sp++ = ptr[i];
00290 }
00291 argc += i-1;
00292 }
00293 }
00294
00295 return argc;
00296 }
00297
00298 static inline VALUE
00299 call_cfunc(VALUE (*func)(), VALUE recv,
00300 int len, int argc, const VALUE *argv)
00301 {
00302
00303
00304 if (len >= 0 && argc != len) {
00305 rb_raise(rb_eArgError, "wrong number of arguments(%d for %d)",
00306 argc, len);
00307 }
00308
00309 switch (len) {
00310 case -2:
00311 return (*func) (recv, rb_ary_new4(argc, argv));
00312 break;
00313 case -1:
00314 return (*func) (argc, argv, recv);
00315 break;
00316 case 0:
00317 return (*func) (recv);
00318 break;
00319 case 1:
00320 return (*func) (recv, argv[0]);
00321 break;
00322 case 2:
00323 return (*func) (recv, argv[0], argv[1]);
00324 break;
00325 case 3:
00326 return (*func) (recv, argv[0], argv[1], argv[2]);
00327 break;
00328 case 4:
00329 return (*func) (recv, argv[0], argv[1], argv[2], argv[3]);
00330 break;
00331 case 5:
00332 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
00333 break;
00334 case 6:
00335 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00336 argv[5]);
00337 break;
00338 case 7:
00339 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00340 argv[5], argv[6]);
00341 break;
00342 case 8:
00343 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00344 argv[5], argv[6], argv[7]);
00345 break;
00346 case 9:
00347 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00348 argv[5], argv[6], argv[7], argv[8]);
00349 break;
00350 case 10:
00351 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00352 argv[5], argv[6], argv[7], argv[8], argv[9]);
00353 break;
00354 case 11:
00355 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00356 argv[5], argv[6], argv[7], argv[8], argv[9],
00357 argv[10]);
00358 break;
00359 case 12:
00360 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00361 argv[5], argv[6], argv[7], argv[8], argv[9],
00362 argv[10], argv[11]);
00363 break;
00364 case 13:
00365 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00366 argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
00367 argv[11], argv[12]);
00368 break;
00369 case 14:
00370 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00371 argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
00372 argv[11], argv[12], argv[13]);
00373 break;
00374 case 15:
00375 return (*func) (recv, argv[0], argv[1], argv[2], argv[3], argv[4],
00376 argv[5], argv[6], argv[7], argv[8], argv[9], argv[10],
00377 argv[11], argv[12], argv[13], argv[14]);
00378 break;
00379 default:
00380 rb_raise(rb_eArgError, "too many arguments(%d)", len);
00381 return Qundef;
00382 }
00383 }
00384
00385 static inline VALUE
00386 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp,
00387 int num, VALUE recv, const rb_block_t *blockptr,
00388 const rb_method_entry_t *me)
00389 {
00390 VALUE val = 0;
00391 const rb_method_definition_t *def = me->def;
00392 rb_control_frame_t *cfp;
00393
00394 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass);
00395
00396 cfp = vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
00397 recv, (VALUE) blockptr, 0, reg_cfp->sp, 0, 1);
00398 cfp->me = me;
00399 reg_cfp->sp -= num + 1;
00400
00401 val = call_cfunc(def->body.cfunc.func, recv, (int)def->body.cfunc.argc, num, reg_cfp->sp + 1);
00402
00403 if (reg_cfp != th->cfp + 1) {
00404 rb_bug("cfp consistency error - send");
00405 }
00406
00407 vm_pop_frame(th);
00408
00409 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass);
00410
00411 return val;
00412 }
00413
00414 static inline VALUE
00415 vm_call_bmethod(rb_thread_t *th, VALUE recv, int argc, const VALUE *argv,
00416 const rb_block_t *blockptr, const rb_method_entry_t *me)
00417 {
00418 rb_proc_t *proc;
00419 VALUE val;
00420
00421
00422 th->passed_me = me;
00423
00424 GetProcPtr(me->def->body.proc, proc);
00425 val = rb_vm_invoke_proc(th, proc, recv, argc, argv, blockptr);
00426 return val;
00427 }
00428
00429 static inline void
00430 vm_method_missing_args(rb_thread_t *th, VALUE *argv,
00431 int num, const rb_block_t *blockptr, int opt)
00432 {
00433 rb_control_frame_t * const reg_cfp = th->cfp;
00434 MEMCPY(argv, STACK_ADDR_FROM_TOP(num + 1), VALUE, num + 1);
00435 th->method_missing_reason = opt;
00436 th->passed_block = blockptr;
00437 POPN(num + 1);
00438 }
00439
00440 static inline VALUE
00441 vm_method_missing(rb_thread_t *th, ID id, VALUE recv,
00442 int num, const rb_block_t *blockptr, int opt)
00443 {
00444 VALUE *argv = ALLOCA_N(VALUE, num + 1);
00445 vm_method_missing_args(th, argv, num, blockptr, opt);
00446 argv[0] = ID2SYM(id);
00447 return rb_funcall2(recv, idMethodMissing, num + 1, argv);
00448 }
00449
00450 static inline void
00451 vm_setup_method(rb_thread_t *th, rb_control_frame_t *cfp,
00452 VALUE recv, int argc, const rb_block_t *blockptr, VALUE flag,
00453 const rb_method_entry_t *me)
00454 {
00455 int opt_pc, i;
00456 VALUE *sp, *rsp = cfp->sp - argc;
00457 rb_iseq_t *iseq = me->def->body.iseq;
00458
00459 VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, rsp, &blockptr);
00460
00461
00462 CHECK_STACK_OVERFLOW(cfp, iseq->stack_max);
00463
00464 sp = rsp + iseq->arg_size;
00465
00466 if (LIKELY(!(flag & VM_CALL_TAILCALL_BIT))) {
00467 if (0) printf("local_size: %d, arg_size: %d\n",
00468 iseq->local_size, iseq->arg_size);
00469
00470
00471 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
00472 *sp++ = Qnil;
00473 }
00474
00475 vm_push_frame(th, iseq,
00476 VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
00477 iseq->iseq_encoded + opt_pc, sp, 0, 0);
00478
00479 cfp->sp = rsp - 1 ;
00480 }
00481 else {
00482 VALUE *p_rsp;
00483 th->cfp++;
00484 p_rsp = th->cfp->sp;
00485
00486
00487 for (i=0; i < (sp - rsp); i++) {
00488 p_rsp[i] = rsp[i];
00489 }
00490
00491 sp -= rsp - p_rsp;
00492
00493
00494 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
00495 *sp++ = Qnil;
00496 }
00497
00498 vm_push_frame(th, iseq,
00499 VM_FRAME_MAGIC_METHOD, recv, (VALUE) blockptr,
00500 iseq->iseq_encoded + opt_pc, sp, 0, 0);
00501 }
00502 }
00503
00504 static inline VALUE
00505 vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp,
00506 int num, const rb_block_t *blockptr, VALUE flag,
00507 ID id, const rb_method_entry_t *me, VALUE recv)
00508 {
00509 VALUE val;
00510
00511 start_method_dispatch:
00512
00513 if (me != 0) {
00514 if ((me->flag == 0)) {
00515 normal_method_dispatch:
00516 switch (me->def->type) {
00517 case VM_METHOD_TYPE_ISEQ:{
00518 vm_setup_method(th, cfp, recv, num, blockptr, flag, me);
00519 return Qundef;
00520 }
00521 case VM_METHOD_TYPE_NOTIMPLEMENTED:
00522 case VM_METHOD_TYPE_CFUNC:{
00523 val = vm_call_cfunc(th, cfp, num, recv, blockptr, me);
00524 break;
00525 }
00526 case VM_METHOD_TYPE_ATTRSET:{
00527 if (num != 1) {
00528 rb_raise(rb_eArgError, "wrong number of arguments (%d for 1)", num);
00529 }
00530 val = rb_ivar_set(recv, me->def->body.attr.id, *(cfp->sp - 1));
00531 cfp->sp -= 2;
00532 break;
00533 }
00534 case VM_METHOD_TYPE_IVAR:{
00535 if (num != 0) {
00536 rb_raise(rb_eArgError, "wrong number of arguments (%d for 0)", num);
00537 }
00538 val = rb_attr_get(recv, me->def->body.attr.id);
00539 cfp->sp -= 1;
00540 break;
00541 }
00542 case VM_METHOD_TYPE_MISSING:{
00543 VALUE *argv = ALLOCA_N(VALUE, num+1);
00544 argv[0] = ID2SYM(me->def->original_id);
00545 MEMCPY(argv+1, cfp->sp - num, VALUE, num);
00546 cfp->sp += - num - 1;
00547 val = rb_funcall2(recv, rb_intern("method_missing"), num+1, argv);
00548 break;
00549 }
00550 case VM_METHOD_TYPE_BMETHOD:{
00551 VALUE *argv = ALLOCA_N(VALUE, num);
00552 MEMCPY(argv, cfp->sp - num, VALUE, num);
00553 cfp->sp += - num - 1;
00554 val = vm_call_bmethod(th, recv, num, argv, blockptr, me);
00555 break;
00556 }
00557 case VM_METHOD_TYPE_ZSUPER:{
00558 VALUE klass = RCLASS_SUPER(me->klass);
00559 me = rb_method_entry(klass, id);
00560
00561 if (me != 0) {
00562 goto normal_method_dispatch;
00563 }
00564 else {
00565 goto start_method_dispatch;
00566 }
00567 }
00568 case VM_METHOD_TYPE_OPTIMIZED:{
00569 switch (me->def->body.optimize_type) {
00570 case OPTIMIZED_METHOD_TYPE_SEND: {
00571 rb_control_frame_t *reg_cfp = cfp;
00572 rb_num_t i = num - 1;
00573 VALUE sym;
00574
00575 if (num == 0) {
00576 rb_raise(rb_eArgError, "no method name given");
00577 }
00578
00579 sym = TOPN(i);
00580 id = SYMBOL_P(sym) ? SYM2ID(sym) : rb_to_id(sym);
00581
00582 if (i > 0) {
00583 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
00584 }
00585 me = rb_method_entry(CLASS_OF(recv), id);
00586 num -= 1;
00587 DEC_SP(1);
00588 flag |= VM_CALL_FCALL_BIT | VM_CALL_OPT_SEND_BIT;
00589
00590 goto start_method_dispatch;
00591 }
00592 case OPTIMIZED_METHOD_TYPE_CALL: {
00593 rb_proc_t *proc;
00594 int argc = num;
00595 VALUE *argv = ALLOCA_N(VALUE, num);
00596 GetProcPtr(recv, proc);
00597 MEMCPY(argv, cfp->sp - num, VALUE, num);
00598 cfp->sp -= num + 1;
00599
00600 val = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, blockptr);
00601 break;
00602 }
00603 default:
00604 rb_bug("eval_invoke_method: unsupported optimized method type (%d)",
00605 me->def->body.optimize_type);
00606 }
00607 break;
00608 }
00609 default:{
00610 rb_bug("eval_invoke_method: unsupported method type (%d)", me->def->type);
00611 break;
00612 }
00613 }
00614 }
00615 else {
00616 int noex_safe;
00617
00618 if (!(flag & VM_CALL_FCALL_BIT) &&
00619 (me->flag & NOEX_MASK) & NOEX_PRIVATE) {
00620 int stat = NOEX_PRIVATE;
00621
00622 if (flag & VM_CALL_VCALL_BIT) {
00623 stat |= NOEX_VCALL;
00624 }
00625 val = vm_method_missing(th, id, recv, num, blockptr, stat);
00626 }
00627 else if (!(flag & VM_CALL_OPT_SEND_BIT) && (me->flag & NOEX_MASK) & NOEX_PROTECTED) {
00628 VALUE defined_class = me->klass;
00629
00630 if (TYPE(defined_class) == T_ICLASS) {
00631 defined_class = RBASIC(defined_class)->klass;
00632 }
00633
00634 if (!rb_obj_is_kind_of(cfp->self, defined_class)) {
00635 val = vm_method_missing(th, id, recv, num, blockptr, NOEX_PROTECTED);
00636 }
00637 else {
00638 goto normal_method_dispatch;
00639 }
00640 }
00641 else if ((noex_safe = NOEX_SAFE(me->flag)) > th->safe_level &&
00642 (noex_safe > 2)) {
00643 rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(id));
00644 }
00645 else {
00646 goto normal_method_dispatch;
00647 }
00648 }
00649 }
00650 else {
00651
00652 int stat = 0;
00653 if (flag & VM_CALL_VCALL_BIT) {
00654 stat |= NOEX_VCALL;
00655 }
00656 if (flag & VM_CALL_SUPER_BIT) {
00657 stat |= NOEX_SUPER;
00658 }
00659 if (id == idMethodMissing) {
00660 VALUE *argv = ALLOCA_N(VALUE, num);
00661 vm_method_missing_args(th, argv, num - 1, 0, stat);
00662 rb_raise_method_missing(th, num, argv, recv, stat);
00663 }
00664 else {
00665 val = vm_method_missing(th, id, recv, num, blockptr, stat);
00666 }
00667 }
00668
00669 RUBY_VM_CHECK_INTS();
00670 return val;
00671 }
00672
00673
00674
00675 static inline int
00676 block_proc_is_lambda(const VALUE procval)
00677 {
00678 rb_proc_t *proc;
00679
00680 if (procval) {
00681 GetProcPtr(procval, proc);
00682 return proc->is_lambda;
00683 }
00684 else {
00685 return 0;
00686 }
00687 }
00688
00689 static inline VALUE
00690 vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
00691 VALUE self, int argc, const VALUE *argv,
00692 const rb_block_t *blockargptr)
00693 {
00694 NODE *ifunc = (NODE *) block->iseq;
00695 VALUE val, arg, blockarg;
00696 int lambda = block_proc_is_lambda(block->proc);
00697
00698 if (lambda) {
00699 arg = rb_ary_new4(argc, argv);
00700 }
00701 else if (argc == 0) {
00702 arg = Qnil;
00703 }
00704 else {
00705 arg = argv[0];
00706 }
00707
00708 if (blockargptr) {
00709 if (blockargptr->proc) {
00710 blockarg = blockargptr->proc;
00711 }
00712 else {
00713 blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
00714 }
00715 }
00716 else {
00717 blockarg = Qnil;
00718 }
00719
00720 vm_push_frame(th, 0, VM_FRAME_MAGIC_IFUNC,
00721 self, (VALUE)block->dfp,
00722 0, th->cfp->sp, block->lfp, 1);
00723
00724 val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
00725
00726 th->cfp++;
00727 return val;
00728 }
00729
00730
00731
00732
00733
00734
00735 static inline int
00736 vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
00737 int argc, VALUE *argv)
00738 {
00739 rb_num_t opt_pc = 0;
00740 int i;
00741 const int m = iseq->argc;
00742 const int r = iseq->arg_rest;
00743 int len = iseq->arg_post_len;
00744 int start = iseq->arg_post_start;
00745 int rsize = argc > m ? argc - m : 0;
00746 int psize = rsize > len ? len : rsize;
00747 int osize = 0;
00748 VALUE ary;
00749
00750
00751 rsize -= psize;
00752
00753 if (iseq->arg_opts) {
00754 const int opts = iseq->arg_opts - 1;
00755 if (rsize > opts) {
00756 osize = opts;
00757 opt_pc = iseq->arg_opt_table[opts];
00758 }
00759 else {
00760 osize = rsize;
00761 opt_pc = iseq->arg_opt_table[rsize];
00762 }
00763 }
00764 rsize -= osize;
00765
00766 if (0) {
00767 printf(" argc: %d\n", argc);
00768 printf(" len: %d\n", len);
00769 printf("start: %d\n", start);
00770 printf("rsize: %d\n", rsize);
00771 }
00772
00773 if (r == -1) {
00774
00775 MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
00776 }
00777 else {
00778 ary = rb_ary_new4(rsize, &argv[r]);
00779
00780
00781 MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
00782 argv[r] = ary;
00783 }
00784
00785 for (i=psize; i<len; i++) {
00786 argv[start + i] = Qnil;
00787 }
00788
00789 return (int)opt_pc;
00790 }
00791
00792 static inline int
00793 vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
00794 int orig_argc, VALUE *argv,
00795 const rb_block_t *blockptr)
00796 {
00797 int i;
00798 int argc = orig_argc;
00799 const int m = iseq->argc;
00800 VALUE ary, arg0;
00801 int opt_pc = 0;
00802
00803 th->mark_stack_len = argc;
00804
00805
00806
00807
00808
00809
00810 arg0 = argv[0];
00811 if (!(iseq->arg_simple & 0x02) &&
00812 (m + iseq->arg_post_len) > 0 &&
00813 argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) {
00814 th->mark_stack_len = argc = RARRAY_LENINT(ary);
00815
00816 CHECK_STACK_OVERFLOW(th->cfp, argc);
00817
00818 MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
00819 }
00820 else {
00821 argv[0] = arg0;
00822 }
00823
00824 for (i=argc; i<m; i++) {
00825 argv[i] = Qnil;
00826 }
00827
00828 if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
00829 const int arg_size = iseq->arg_size;
00830 if (arg_size < argc) {
00831
00832
00833
00834
00835 th->mark_stack_len = argc = arg_size;
00836 }
00837 }
00838 else {
00839 int r = iseq->arg_rest;
00840
00841 if (iseq->arg_post_len ||
00842 iseq->arg_opts) {
00843 opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
00844 }
00845 else {
00846 if (argc < r) {
00847
00848
00849
00850 for (i=argc; i<r; i++) {
00851 argv[i] = Qnil;
00852 }
00853 argv[r] = rb_ary_new();
00854 }
00855 else {
00856 argv[r] = rb_ary_new4(argc-r, &argv[r]);
00857 }
00858 }
00859
00860 th->mark_stack_len = iseq->arg_size;
00861 }
00862
00863
00864 if (iseq->arg_block != -1) {
00865 VALUE procval = Qnil;
00866
00867 if (blockptr) {
00868 if (blockptr->proc == 0) {
00869 procval = rb_vm_make_proc(th, blockptr, rb_cProc);
00870 }
00871 else {
00872 procval = blockptr->proc;
00873 }
00874 }
00875
00876 argv[iseq->arg_block] = procval;
00877 }
00878
00879 th->mark_stack_len = 0;
00880 return opt_pc;
00881 }
00882
00883 static inline int
00884 vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
00885 int argc, VALUE *argv,
00886 const rb_block_t *blockptr, int lambda)
00887 {
00888 if (0) {
00889 printf(" argc: %d\n", argc);
00890 printf("iseq argc: %d\n", iseq->argc);
00891 printf("iseq opts: %d\n", iseq->arg_opts);
00892 printf("iseq rest: %d\n", iseq->arg_rest);
00893 printf("iseq post: %d\n", iseq->arg_post_len);
00894 printf("iseq blck: %d\n", iseq->arg_block);
00895 printf("iseq smpl: %d\n", iseq->arg_simple);
00896 printf(" lambda: %s\n", lambda ? "true" : "false");
00897 }
00898
00899 if (lambda) {
00900
00901 int opt_pc;
00902 VM_CALLEE_SETUP_ARG(opt_pc, th, iseq, argc, argv, &blockptr);
00903 return opt_pc;
00904 }
00905 else {
00906 return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
00907 }
00908 }
00909
00910 static VALUE
00911 vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t num, rb_num_t flag)
00912 {
00913 const rb_block_t *block = GET_BLOCK_PTR();
00914 rb_iseq_t *iseq;
00915 int argc = (int)num;
00916 VALUE type = GET_ISEQ()->local_iseq->type;
00917
00918 if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
00919 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
00920 }
00921 iseq = block->iseq;
00922
00923 argc = caller_setup_args(th, GET_CFP(), flag, argc, 0, 0);
00924
00925 if (BUILTIN_TYPE(iseq) != T_NODE) {
00926 int opt_pc;
00927 const int arg_size = iseq->arg_size;
00928 VALUE * const rsp = GET_SP() - argc;
00929 SET_SP(rsp);
00930
00931 CHECK_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
00932 opt_pc = vm_yield_setup_args(th, iseq, argc, rsp, 0,
00933 block_proc_is_lambda(block->proc));
00934
00935 vm_push_frame(th, iseq,
00936 VM_FRAME_MAGIC_BLOCK, block->self, (VALUE) block->dfp,
00937 iseq->iseq_encoded + opt_pc, rsp + arg_size, block->lfp,
00938 iseq->local_size - arg_size);
00939
00940 return Qundef;
00941 }
00942 else {
00943 VALUE val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
00944 POPN(argc);
00945 return val;
00946 }
00947 }
00948
00949
00950
00951 static inline NODE *
00952 lfp_svar_place(rb_thread_t *th, VALUE *lfp)
00953 {
00954 VALUE *svar;
00955
00956 if (lfp && th->local_lfp != lfp) {
00957 svar = &lfp[-1];
00958 }
00959 else {
00960 svar = &th->local_svar;
00961 }
00962 if (NIL_P(*svar)) {
00963 *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
00964 }
00965 return (NODE *)*svar;
00966 }
00967
00968 static VALUE
00969 lfp_svar_get(rb_thread_t *th, VALUE *lfp, VALUE key)
00970 {
00971 NODE *svar = lfp_svar_place(th, lfp);
00972
00973 switch (key) {
00974 case 0:
00975 return svar->u1.value;
00976 case 1:
00977 return svar->u2.value;
00978 default: {
00979 const VALUE hash = svar->u3.value;
00980
00981 if (hash == Qnil) {
00982 return Qnil;
00983 }
00984 else {
00985 return rb_hash_lookup(hash, key);
00986 }
00987 }
00988 }
00989 }
00990
00991 static void
00992 lfp_svar_set(rb_thread_t *th, VALUE *lfp, VALUE key, VALUE val)
00993 {
00994 NODE *svar = lfp_svar_place(th, lfp);
00995
00996 switch (key) {
00997 case 0:
00998 svar->u1.value = val;
00999 return;
01000 case 1:
01001 svar->u2.value = val;
01002 return;
01003 default: {
01004 VALUE hash = svar->u3.value;
01005
01006 if (hash == Qnil) {
01007 svar->u3.value = hash = rb_hash_new();
01008 }
01009 rb_hash_aset(hash, key, val);
01010 }
01011 }
01012 }
01013
01014 static inline VALUE
01015 vm_getspecial(rb_thread_t *th, VALUE *lfp, VALUE key, rb_num_t type)
01016 {
01017 VALUE val;
01018
01019 if (type == 0) {
01020 VALUE k = key;
01021 if (FIXNUM_P(key)) {
01022 k = FIX2INT(key);
01023 }
01024 val = lfp_svar_get(th, lfp, k);
01025 }
01026 else {
01027 VALUE backref = lfp_svar_get(th, lfp, 1);
01028
01029 if (type & 0x01) {
01030 switch (type >> 1) {
01031 case '&':
01032 val = rb_reg_last_match(backref);
01033 break;
01034 case '`':
01035 val = rb_reg_match_pre(backref);
01036 break;
01037 case '\'':
01038 val = rb_reg_match_post(backref);
01039 break;
01040 case '+':
01041 val = rb_reg_match_last(backref);
01042 break;
01043 default:
01044 rb_bug("unexpected back-ref");
01045 }
01046 }
01047 else {
01048 val = rb_reg_nth_match((int)(type >> 1), backref);
01049 }
01050 }
01051 return val;
01052 }
01053
01054 static NODE *
01055 vm_get_cref(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
01056 {
01057 NODE *cref = 0;
01058
01059 while (1) {
01060 if (lfp == dfp) {
01061 cref = iseq->cref_stack;
01062 break;
01063 }
01064 else if (dfp[-1] != Qnil) {
01065 cref = (NODE *)dfp[-1];
01066 break;
01067 }
01068 dfp = GET_PREV_DFP(dfp);
01069 }
01070
01071 if (cref == 0) {
01072 rb_bug("vm_get_cref: unreachable");
01073 }
01074 return cref;
01075 }
01076
01077 static NODE *
01078 vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
01079 {
01080 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
01081 NODE *cref = NEW_BLOCK(klass);
01082 cref->nd_file = 0;
01083 cref->nd_visi = noex;
01084
01085 if (blockptr) {
01086 cref->nd_next = vm_get_cref(blockptr->iseq, blockptr->lfp, blockptr->dfp);
01087 }
01088 else if (cfp) {
01089 cref->nd_next = vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
01090 }
01091
01092 return cref;
01093 }
01094
01095 static inline VALUE
01096 vm_get_cbase(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
01097 {
01098 NODE *cref = vm_get_cref(iseq, lfp, dfp);
01099 VALUE klass = Qundef;
01100
01101 while (cref) {
01102 if ((klass = cref->nd_clss) != 0) {
01103 break;
01104 }
01105 cref = cref->nd_next;
01106 }
01107
01108 return klass;
01109 }
01110
01111 static inline VALUE
01112 vm_get_const_base(const rb_iseq_t *iseq, const VALUE *lfp, const VALUE *dfp)
01113 {
01114 NODE *cref = vm_get_cref(iseq, lfp, dfp);
01115 VALUE klass = Qundef;
01116
01117 while (cref) {
01118 if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
01119 (klass = cref->nd_clss) != 0) {
01120 break;
01121 }
01122 cref = cref->nd_next;
01123 }
01124
01125 return klass;
01126 }
01127
01128 static inline void
01129 vm_check_if_namespace(VALUE klass)
01130 {
01131 switch (TYPE(klass)) {
01132 case T_CLASS:
01133 case T_MODULE:
01134 break;
01135 default:
01136 rb_raise(rb_eTypeError, "%s is not a class/module",
01137 RSTRING_PTR(rb_inspect(klass)));
01138 }
01139 }
01140
01141 static inline VALUE
01142 vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
01143 VALUE orig_klass, ID id, int is_defined)
01144 {
01145 VALUE val;
01146
01147 if (orig_klass == Qnil) {
01148
01149 const NODE *cref = vm_get_cref(iseq, th->cfp->lfp, th->cfp->dfp);
01150 const NODE *root_cref = NULL;
01151 VALUE klass = orig_klass;
01152
01153 while (cref && cref->nd_next) {
01154 if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL)) {
01155 klass = cref->nd_clss;
01156 if (root_cref == NULL)
01157 root_cref = cref;
01158 }
01159 cref = cref->nd_next;
01160
01161 if (!NIL_P(klass)) {
01162 VALUE am = 0;
01163 search_continue:
01164 if (RCLASS_IV_TBL(klass) &&
01165 st_lookup(RCLASS_IV_TBL(klass), id, &val)) {
01166 if (val == Qundef) {
01167 if (am == klass) break;
01168 am = klass;
01169 rb_autoload_load(klass, id);
01170 goto search_continue;
01171 }
01172 else {
01173 if (is_defined) {
01174 return 1;
01175 }
01176 else {
01177 return val;
01178 }
01179 }
01180 }
01181 }
01182 }
01183
01184
01185 if (root_cref && !NIL_P(root_cref->nd_clss)) {
01186 klass = root_cref->nd_clss;
01187 }
01188 else {
01189 klass = CLASS_OF(th->cfp->self);
01190 }
01191
01192 if (is_defined) {
01193 return rb_const_defined(klass, id);
01194 }
01195 else {
01196 return rb_const_get(klass, id);
01197 }
01198 }
01199 else {
01200 vm_check_if_namespace(orig_klass);
01201 if (is_defined) {
01202 return rb_const_defined_from(orig_klass, id);
01203 }
01204 else {
01205 return rb_const_get_from(orig_klass, id);
01206 }
01207 }
01208 }
01209
01210 static inline VALUE
01211 vm_get_cvar_base(NODE *cref)
01212 {
01213 VALUE klass;
01214
01215 while (cref && cref->nd_next &&
01216 (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
01217 (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
01218 cref = cref->nd_next;
01219
01220 if (!cref->nd_next) {
01221 rb_warn("class variable access from toplevel");
01222 }
01223 }
01224
01225 klass = cref->nd_clss;
01226
01227 if (NIL_P(klass)) {
01228 rb_raise(rb_eTypeError, "no class variables available");
01229 }
01230 return klass;
01231 }
01232
01233
01234 #ifndef USE_IC_FOR_IVAR
01235 #define USE_IC_FOR_IVAR 1
01236 #endif
01237
01238 static VALUE
01239 vm_getivar(VALUE obj, ID id, IC ic)
01240 {
01241 #if USE_IC_FOR_IVAR
01242 if (TYPE(obj) == T_OBJECT) {
01243 VALUE val = Qundef;
01244 VALUE klass = RBASIC(obj)->klass;
01245
01246 if (ic->ic_class == klass) {
01247 long index = ic->ic_value.index;
01248 long len = ROBJECT_NUMIV(obj);
01249 VALUE *ptr = ROBJECT_IVPTR(obj);
01250
01251 if (index < len) {
01252 val = ptr[index];
01253 }
01254 }
01255 else {
01256 st_data_t index;
01257 long len = ROBJECT_NUMIV(obj);
01258 VALUE *ptr = ROBJECT_IVPTR(obj);
01259 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
01260
01261 if (iv_index_tbl) {
01262 if (st_lookup(iv_index_tbl, id, &index)) {
01263 if ((long)index < len) {
01264 val = ptr[index];
01265 }
01266 ic->ic_class = klass;
01267 ic->ic_value.index = index;
01268 }
01269 }
01270 }
01271 if (UNLIKELY(val == Qundef)) {
01272 rb_warning("instance variable %s not initialized", rb_id2name(id));
01273 val = Qnil;
01274 }
01275 return val;
01276 }
01277 else {
01278 return rb_ivar_get(obj, id);
01279 }
01280 #else
01281 return rb_ivar_get(obj, id);
01282 #endif
01283 }
01284
01285 static void
01286 vm_setivar(VALUE obj, ID id, VALUE val, IC ic)
01287 {
01288 #if USE_IC_FOR_IVAR
01289 if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
01290 rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
01291 }
01292 if (OBJ_FROZEN(obj)) {
01293 rb_error_frozen("object");
01294 }
01295
01296 if (TYPE(obj) == T_OBJECT) {
01297 VALUE klass = RBASIC(obj)->klass;
01298 st_data_t index;
01299
01300 if (ic->ic_class == klass) {
01301 long index = ic->ic_value.index;
01302 long len = ROBJECT_NUMIV(obj);
01303 VALUE *ptr = ROBJECT_IVPTR(obj);
01304
01305 if (index < len) {
01306 ptr[index] = val;
01307 return;
01308 }
01309 }
01310 else {
01311 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
01312
01313 if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
01314 ic->ic_class = klass;
01315 ic->ic_value.index = index;
01316 }
01317
01318 }
01319 }
01320 rb_ivar_set(obj, id, val);
01321 #else
01322 rb_ivar_set(obj, id, val);
01323 #endif
01324 }
01325
01326 static inline const rb_method_entry_t *
01327 vm_method_search(VALUE id, VALUE klass, IC ic)
01328 {
01329 rb_method_entry_t *me;
01330 #if OPT_INLINE_METHOD_CACHE
01331 if (LIKELY(klass == ic->ic_class) &&
01332 LIKELY(GET_VM_STATE_VERSION() == ic->ic_vmstat)) {
01333 me = ic->ic_value.method;
01334 }
01335 else {
01336 me = rb_method_entry(klass, id);
01337 ic->ic_class = klass;
01338 ic->ic_value.method = me;
01339 ic->ic_vmstat = GET_VM_STATE_VERSION();
01340 }
01341 #else
01342 me = rb_method_entry(klass, id);
01343 #endif
01344 return me;
01345 }
01346
01347 static inline VALUE
01348 vm_search_normal_superclass(VALUE klass, VALUE recv)
01349 {
01350 if (BUILTIN_TYPE(klass) == T_CLASS) {
01351 return RCLASS_SUPER(klass);
01352 }
01353 else if (BUILTIN_TYPE(klass) == T_MODULE) {
01354 VALUE k = CLASS_OF(recv);
01355 while (k) {
01356 if (BUILTIN_TYPE(k) == T_ICLASS && RBASIC(k)->klass == klass) {
01357 return RCLASS_SUPER(k);
01358 }
01359 k = RCLASS_SUPER(k);
01360 }
01361 return rb_cObject;
01362 }
01363 else {
01364 rb_bug("vm_search_normal_superclass: should not be reach here");
01365 }
01366 }
01367
01368 static void
01369 vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *ip,
01370 VALUE recv, VALUE sigval,
01371 ID *idp, VALUE *klassp)
01372 {
01373 ID id;
01374 VALUE klass;
01375
01376 while (ip && !ip->klass) {
01377 ip = ip->parent_iseq;
01378 }
01379
01380 if (ip == 0) {
01381 rb_raise(rb_eNoMethodError, "super called outside of method");
01382 }
01383
01384 id = ip->defined_method_id;
01385
01386 if (ip != ip->local_iseq) {
01387
01388 rb_control_frame_t *lcfp = GET_CFP();
01389
01390 if (!sigval) {
01391
01392 rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
01393 }
01394
01395 while (lcfp->iseq != ip) {
01396 VALUE *tdfp = GET_PREV_DFP(lcfp->dfp);
01397 while (1) {
01398 lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
01399 if (lcfp->dfp == tdfp) {
01400 break;
01401 }
01402 }
01403 }
01404
01405
01406 if (!lcfp->me) {
01407 rb_raise(rb_eNoMethodError, "super called outside of method");
01408 }
01409
01410 id = lcfp->me->def->original_id;
01411 klass = vm_search_normal_superclass(lcfp->me->klass, recv);
01412 }
01413 else {
01414 klass = vm_search_normal_superclass(ip->klass, recv);
01415 }
01416
01417 *idp = id;
01418 *klassp = klass;
01419 }
01420
01421 static VALUE
01422 vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
01423 rb_num_t throw_state, VALUE throwobj)
01424 {
01425 int state = (int)(throw_state & 0xff);
01426 int flag = (int)(throw_state & 0x8000);
01427 rb_num_t level = throw_state >> 16;
01428
01429 if (state != 0) {
01430 VALUE *pt = 0;
01431 if (flag != 0) {
01432 pt = (void *) 1;
01433 }
01434 else {
01435 if (state == TAG_BREAK) {
01436 rb_control_frame_t *cfp = GET_CFP();
01437 VALUE *dfp = GET_DFP();
01438 int is_orphan = 1;
01439 rb_iseq_t *base_iseq = GET_ISEQ();
01440
01441 search_parent:
01442 if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
01443 if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
01444 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01445 dfp = cfp->dfp;
01446 goto search_parent;
01447 }
01448 dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
01449 base_iseq = base_iseq->parent_iseq;
01450
01451 while ((VALUE *) cfp < th->stack + th->stack_size) {
01452 if (cfp->dfp == dfp) {
01453 goto search_parent;
01454 }
01455 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01456 }
01457 rb_bug("VM (throw): can't find break base.");
01458 }
01459
01460 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
01461
01462 is_orphan = 0;
01463 pt = cfp->dfp;
01464 state = TAG_RETURN;
01465 }
01466 else {
01467 dfp = GC_GUARDED_PTR_REF((VALUE *) *dfp);
01468
01469 while ((VALUE *)cfp < th->stack + th->stack_size) {
01470 if (cfp->dfp == dfp) {
01471 VALUE epc = epc = cfp->pc - cfp->iseq->iseq_encoded;
01472 rb_iseq_t *iseq = cfp->iseq;
01473 int i;
01474
01475 for (i=0; i<iseq->catch_table_size; i++) {
01476 struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
01477
01478 if (entry->type == CATCH_TYPE_BREAK &&
01479 entry->start < epc && entry->end >= epc) {
01480 if (entry->cont == epc) {
01481 goto found;
01482 }
01483 else {
01484 break;
01485 }
01486 }
01487 }
01488 break;
01489
01490 found:
01491 pt = dfp;
01492 is_orphan = 0;
01493 break;
01494 }
01495 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01496 }
01497 }
01498
01499 if (is_orphan) {
01500 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
01501 }
01502 }
01503 else if (state == TAG_RETRY) {
01504 rb_num_t i;
01505 pt = GC_GUARDED_PTR_REF((VALUE *) * GET_DFP());
01506 for (i = 0; i < level; i++) {
01507 pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
01508 }
01509 }
01510 else if (state == TAG_RETURN) {
01511 rb_control_frame_t *cfp = GET_CFP();
01512 VALUE *dfp = GET_DFP();
01513 VALUE *lfp = GET_LFP();
01514
01515
01516 while ((VALUE *) cfp < th->stack + th->stack_size) {
01517 if (!lfp) {
01518 lfp = cfp->lfp;
01519 }
01520 if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_CLASS) {
01521 lfp = 0;
01522 }
01523
01524 if (cfp->lfp == lfp) {
01525 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
01526 VALUE *tdfp = dfp;
01527
01528 while (lfp != tdfp) {
01529 if (cfp->dfp == tdfp) {
01530
01531 dfp = cfp->dfp;
01532 goto valid_return;
01533 }
01534 tdfp = GC_GUARDED_PTR_REF((VALUE *)*tdfp);
01535 }
01536 }
01537 }
01538
01539 if (cfp->dfp == lfp && cfp->iseq->type == ISEQ_TYPE_METHOD) {
01540 dfp = lfp;
01541 goto valid_return;
01542 }
01543
01544 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01545 }
01546
01547 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
01548
01549 valid_return:
01550 pt = dfp;
01551 }
01552 else {
01553 rb_bug("isns(throw): unsupport throw type");
01554 }
01555 }
01556 th->state = state;
01557 return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
01558 }
01559 else {
01560
01561 VALUE err = throwobj;
01562
01563 if (FIXNUM_P(err)) {
01564 th->state = FIX2INT(err);
01565 }
01566 else if (SYMBOL_P(err)) {
01567 th->state = TAG_THROW;
01568 }
01569 else if (BUILTIN_TYPE(err) == T_NODE) {
01570 th->state = GET_THROWOBJ_STATE(err);
01571 }
01572 else {
01573 th->state = TAG_RAISE;
01574
01575 }
01576 return err;
01577 }
01578 }
01579
01580 static inline void
01581 vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
01582 {
01583 int is_splat = flag & 0x01;
01584 rb_num_t space_size = num + is_splat;
01585 VALUE *base = cfp->sp, *ptr;
01586 volatile VALUE tmp_ary;
01587 rb_num_t len;
01588
01589 if (TYPE(ary) != T_ARRAY) {
01590 ary = rb_ary_to_ary(ary);
01591 }
01592
01593 cfp->sp += space_size;
01594
01595 tmp_ary = ary;
01596 ptr = RARRAY_PTR(ary);
01597 len = (rb_num_t)RARRAY_LEN(ary);
01598
01599 if (flag & 0x02) {
01600
01601 rb_num_t i = 0, j;
01602
01603 if (len < num) {
01604 for (i=0; i<num-len; i++) {
01605 *base++ = Qnil;
01606 }
01607 }
01608 for (j=0; i<num; i++, j++) {
01609 VALUE v = ptr[len - j - 1];
01610 *base++ = v;
01611 }
01612 if (is_splat) {
01613 *base = rb_ary_new4(len - j, ptr);
01614 }
01615 }
01616 else {
01617
01618 rb_num_t i;
01619 VALUE *bptr = &base[space_size - 1];
01620
01621 for (i=0; i<num; i++) {
01622 if (len <= i) {
01623 for (; i<num; i++) {
01624 *bptr-- = Qnil;
01625 }
01626 break;
01627 }
01628 *bptr-- = ptr[i];
01629 }
01630 if (is_splat) {
01631 if (num > len) {
01632 *bptr = rb_ary_new();
01633 }
01634 else {
01635 *bptr = rb_ary_new4(len - num, ptr + num);
01636 }
01637 }
01638 }
01639 }
01640
01641 static inline int
01642 check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
01643 {
01644 if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
01645 me->def->body.cfunc.func == func) {
01646 return 1;
01647 }
01648 else {
01649 return 0;
01650 }
01651 }
01652
01653 static
01654 #ifndef NO_BIG_INLINE
01655 inline
01656 #endif
01657 VALUE
01658 opt_eq_func(VALUE recv, VALUE obj, IC ic)
01659 {
01660 if (FIXNUM_2_P(recv, obj) &&
01661 BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
01662 return (recv == obj) ? Qtrue : Qfalse;
01663 }
01664 else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
01665 if (HEAP_CLASS_OF(recv) == rb_cFloat &&
01666 HEAP_CLASS_OF(obj) == rb_cFloat &&
01667 BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
01668 double a = RFLOAT_VALUE(recv);
01669 double b = RFLOAT_VALUE(obj);
01670
01671 if (isnan(a) || isnan(b)) {
01672 return Qfalse;
01673 }
01674 return (a == b) ? Qtrue : Qfalse;
01675 }
01676 else if (HEAP_CLASS_OF(recv) == rb_cString &&
01677 HEAP_CLASS_OF(obj) == rb_cString &&
01678 BASIC_OP_UNREDEFINED_P(BOP_EQ)) {
01679 return rb_str_equal(recv, obj);
01680 }
01681 }
01682
01683 {
01684 const rb_method_entry_t *me = vm_method_search(idEq, CLASS_OF(recv), ic);
01685 extern VALUE rb_obj_equal(VALUE obj1, VALUE obj2);
01686
01687 if (check_cfunc(me, rb_obj_equal)) {
01688 return recv == obj ? Qtrue : Qfalse;
01689 }
01690 }
01691
01692 return Qundef;
01693 }
01694
01695 struct opt_case_dispatch_i_arg {
01696 VALUE obj;
01697 int label;
01698 };
01699
01700 static int
01701 opt_case_dispatch_i(st_data_t key, st_data_t data, st_data_t p)
01702 {
01703 struct opt_case_dispatch_i_arg *arg = (void *)p;
01704
01705 if (RTEST(rb_funcall((VALUE)key, idEqq, 1, arg->obj))) {
01706 arg->label = FIX2INT((VALUE)data);
01707 return ST_STOP;
01708 }
01709 else {
01710 return ST_CONTINUE;
01711 }
01712 }
01713
01714