Ruby  2.0.0p247(2013-06-27revision41674)
vm.c
Go to the documentation of this file.
00001 /**********************************************************************
00002 
00003   vm.c -
00004 
00005   $Author: nagachika $
00006 
00007   Copyright (C) 2004-2007 Koichi Sasada
00008 
00009 **********************************************************************/
00010 
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016 
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021 #include "probes.h"
00022 #include "probes_helper.h"
00023 
00024 static inline VALUE *
00025 VM_EP_LEP(VALUE *ep)
00026 {
00027     while (1) {
00028         if (VM_EP_LEP_P(ep)) {
00029             return ep;
00030         }
00031         ep = VM_EP_PREV_EP(ep);
00032     }
00033 }
00034 
00035 VALUE *
00036 rb_vm_ep_local_ep(VALUE *ep)
00037 {
00038     return VM_EP_LEP(ep);
00039 }
00040 
00041 static inline VALUE *
00042 VM_CF_LEP(rb_control_frame_t *cfp)
00043 {
00044     return VM_EP_LEP(cfp->ep);
00045 }
00046 
00047 static inline VALUE *
00048 VM_CF_PREV_EP(rb_control_frame_t * cfp)
00049 {
00050     return VM_EP_PREV_EP((cfp)->ep);
00051 }
00052 
00053 static inline rb_block_t *
00054 VM_CF_BLOCK_PTR(rb_control_frame_t *cfp)
00055 {
00056     VALUE *ep = VM_CF_LEP(cfp);
00057     return VM_EP_BLOCK_PTR(ep);
00058 }
00059 
00060 rb_block_t *
00061 rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp)
00062 {
00063     return VM_CF_BLOCK_PTR(cfp);
00064 }
00065 
00066 #if VM_COLLECT_USAGE_DETAILS
00067 static void vm_collect_usage_operand(int insn, int n, VALUE op);
00068 static void vm_collect_usage_insn(int insn);
00069 static void vm_collect_usage_register(int reg, int isset);
00070 #endif
00071 
00072 static VALUE
00073 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00074                int argc, const VALUE *argv, const rb_block_t *blockptr);
00075 
00076 #include "vm_insnhelper.h"
00077 #include "vm_insnhelper.c"
00078 #include "vm_exec.h"
00079 #include "vm_exec.c"
00080 
00081 #include "vm_method.c"
00082 #include "vm_eval.c"
00083 
00084 #include <assert.h>
00085 
00086 #define BUFSIZE 0x100
00087 #define PROCDEBUG 0
00088 
00089 VALUE rb_cRubyVM;
00090 VALUE rb_cThread;
00091 VALUE rb_cEnv;
00092 VALUE rb_mRubyVMFrozenCore;
00093 
00094 VALUE ruby_vm_const_missing_count = 0;
00095 char ruby_vm_redefined_flag[BOP_LAST_];
00096 rb_thread_t *ruby_current_thread = 0;
00097 rb_vm_t *ruby_current_vm = 0;
00098 rb_event_flag_t ruby_vm_event_flags;
00099 
00100 static void thread_free(void *ptr);
00101 
00102 void
00103 rb_vm_change_state(void)
00104 {
00105     INC_VM_STATE_VERSION();
00106 }
00107 
00108 static void vm_clear_global_method_cache(void);
00109 
00110 static void
00111 vm_clear_all_inline_method_cache(void)
00112 {
00113     /* TODO: Clear all inline cache entries in all iseqs.
00114              How to iterate all iseqs in sweep phase?
00115              rb_objspace_each_objects() doesn't work at sweep phase.
00116      */
00117 }
00118 
00119 static void
00120 vm_clear_all_cache()
00121 {
00122     vm_clear_global_method_cache();
00123     vm_clear_all_inline_method_cache();
00124     ruby_vm_global_state_version = 1;
00125 }
00126 
00127 void
00128 rb_vm_inc_const_missing_count(void)
00129 {
00130     ruby_vm_const_missing_count +=1;
00131 }
00132 
00133 /* control stack frame */
00134 
00135 static void
00136 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00137 {
00138     rb_iseq_t *iseq;
00139     GetISeqPtr(iseqval, iseq);
00140 
00141     if (iseq->type != ISEQ_TYPE_TOP) {
00142         rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00143     }
00144 
00145     /* for return */
00146     CHECK_VM_STACK_OVERFLOW(th->cfp, iseq->local_size + iseq->stack_max);
00147     vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
00148                   th->top_self, rb_cObject, VM_ENVVAL_BLOCK_PTR(0),
00149                   iseq->iseq_encoded, th->cfp->sp, iseq->local_size, 0);
00150 }
00151 
00152 static void
00153 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref, rb_block_t *base_block)
00154 {
00155     rb_iseq_t *iseq;
00156     GetISeqPtr(iseqval, iseq);
00157 
00158     CHECK_VM_STACK_OVERFLOW(th->cfp, iseq->local_size + iseq->stack_max);
00159     vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
00160                   base_block->self, base_block->klass,
00161                   VM_ENVVAL_PREV_EP_PTR(base_block->ep), iseq->iseq_encoded,
00162                   th->cfp->sp, iseq->local_size, 0);
00163 
00164     if (cref) {
00165         th->cfp->ep[-1] = (VALUE)cref;
00166     }
00167 }
00168 
00169 static void
00170 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00171 {
00172     VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00173     rb_binding_t *bind;
00174     rb_iseq_t *iseq;
00175     rb_env_t *env;
00176 
00177     GetBindingPtr(toplevel_binding, bind);
00178     GetEnvPtr(bind->env, env);
00179     vm_set_eval_stack(th, iseqval, 0, &env->block);
00180 
00181     /* save binding */
00182     GetISeqPtr(iseqval, iseq);
00183     if (bind && iseq->local_size > 0) {
00184         bind->env = rb_vm_make_env_object(th, th->cfp);
00185     }
00186 }
00187 
00188 rb_control_frame_t *
00189 rb_vm_get_binding_creatable_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00190 {
00191     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00192         if (cfp->iseq) {
00193             return (rb_control_frame_t *)cfp;
00194         }
00195         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00196     }
00197     return 0;
00198 }
00199 
00200 rb_control_frame_t *
00201 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00202 {
00203     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00204         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00205             return (rb_control_frame_t *)cfp;
00206         }
00207         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00208     }
00209     return 0;
00210 }
00211 
00212 static rb_control_frame_t *
00213 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00214 {
00215     if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00216         return cfp;
00217     }
00218 
00219     cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00220 
00221     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00222         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00223             return cfp;
00224         }
00225 
00226         if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00227             break;
00228         }
00229         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00230     }
00231     return 0;
00232 }
00233 
00234 /* at exit */
00235 
00236 void
00237 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00238 {
00239     rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00240 }
00241 
00242 static void
00243 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00244 {
00245     VALUE hook = (VALUE)&vm->at_exit;
00246 
00247     while (RARRAY_LEN(hook) > 0) {
00248         typedef void rb_vm_at_exit_func(rb_vm_t*);
00249         rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00250         (*func)(vm);
00251     }
00252     rb_ary_free(hook);
00253 }
00254 
00255 /* Env */
00256 
00257 /*
00258   env{
00259     env[0] // special (block or prev env)
00260     env[1] // env object
00261   };
00262  */
00263 
00264 #define ENV_IN_HEAP_P(th, env)  \
00265   (!((th)->stack <= (env) && (env) < ((th)->stack + (th)->stack_size)))
00266 #define ENV_VAL(env)        ((env)[1])
00267 
00268 static void
00269 env_mark(void * const ptr)
00270 {
00271     RUBY_MARK_ENTER("env");
00272     if (ptr) {
00273         const rb_env_t * const env = ptr;
00274 
00275         if (env->env) {
00276             /* TODO: should mark more restricted range */
00277             RUBY_GC_INFO("env->env\n");
00278             rb_gc_mark_locations(env->env, env->env + env->env_size);
00279         }
00280 
00281         RUBY_GC_INFO("env->prev_envval\n");
00282         RUBY_MARK_UNLESS_NULL(env->prev_envval);
00283         RUBY_MARK_UNLESS_NULL(env->block.self);
00284         RUBY_MARK_UNLESS_NULL(env->block.proc);
00285 
00286         if (env->block.iseq) {
00287             if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00288                 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00289             }
00290             else {
00291                 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00292             }
00293         }
00294     }
00295     RUBY_MARK_LEAVE("env");
00296 }
00297 
00298 static void
00299 env_free(void * const ptr)
00300 {
00301     RUBY_FREE_ENTER("env");
00302     if (ptr) {
00303         rb_env_t *const env = ptr;
00304         RUBY_FREE_UNLESS_NULL(env->env);
00305         ruby_xfree(ptr);
00306     }
00307     RUBY_FREE_LEAVE("env");
00308 }
00309 
00310 static size_t
00311 env_memsize(const void *ptr)
00312 {
00313     if (ptr) {
00314         const rb_env_t * const env = ptr;
00315         size_t size = sizeof(rb_env_t);
00316         if (env->env) {
00317             size += env->env_size * sizeof(VALUE);
00318         }
00319         return size;
00320     }
00321     return 0;
00322 }
00323 
00324 static const rb_data_type_t env_data_type = {
00325     "VM/env",
00326     {env_mark, env_free, env_memsize,},
00327 };
00328 
00329 static VALUE
00330 env_alloc(void)
00331 {
00332     VALUE obj;
00333     rb_env_t *env;
00334     obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00335     env->env = 0;
00336     env->prev_envval = 0;
00337     env->block.iseq = 0;
00338     return obj;
00339 }
00340 
00341 static VALUE check_env_value(VALUE envval);
00342 
00343 static int
00344 check_env(rb_env_t * const env)
00345 {
00346     fprintf(stderr, "---\n");
00347     fprintf(stderr, "envptr: %p\n", (void *)&env->block.ep[0]);
00348     fprintf(stderr, "envval: %10p ", (void *)env->block.ep[1]);
00349     dp(env->block.ep[1]);
00350     fprintf(stderr, "ep:    %10p\n", (void *)env->block.ep);
00351     if (env->prev_envval) {
00352         fprintf(stderr, ">>\n");
00353         check_env_value(env->prev_envval);
00354         fprintf(stderr, "<<\n");
00355     }
00356     return 1;
00357 }
00358 
00359 static VALUE
00360 check_env_value(VALUE envval)
00361 {
00362     rb_env_t *env;
00363     GetEnvPtr(envval, env);
00364 
00365     if (check_env(env)) {
00366         return envval;
00367     }
00368     rb_bug("invalid env");
00369     return Qnil;                /* unreachable */
00370 }
00371 
00372 static VALUE
00373 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00374                  VALUE *envptr, VALUE * const endptr)
00375 {
00376     VALUE envval, penvval = 0;
00377     rb_env_t *env;
00378     VALUE *nenvptr;
00379     int i, local_size;
00380 
00381     if (ENV_IN_HEAP_P(th, envptr)) {
00382         return ENV_VAL(envptr);
00383     }
00384 
00385     if (envptr != endptr) {
00386         VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00387         rb_control_frame_t *pcfp = cfp;
00388 
00389         if (ENV_IN_HEAP_P(th, penvptr)) {
00390             penvval = ENV_VAL(penvptr);
00391         }
00392         else {
00393             while (pcfp->ep != penvptr) {
00394                 pcfp++;
00395                 if (pcfp->ep == 0) {
00396                     SDR();
00397                     rb_bug("invalid ep");
00398                 }
00399             }
00400             penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00401             *envptr = VM_ENVVAL_PREV_EP_PTR(pcfp->ep);
00402         }
00403     }
00404 
00405     /* allocate env */
00406     envval = env_alloc();
00407     GetEnvPtr(envval, env);
00408 
00409     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00410         local_size = 2;
00411     }
00412     else {
00413         local_size = cfp->iseq->local_size;
00414     }
00415 
00416     env->env_size = local_size + 1 + 1;
00417     env->local_size = local_size;
00418     env->env = ALLOC_N(VALUE, env->env_size);
00419     env->prev_envval = penvval;
00420 
00421     for (i = 0; i <= local_size; i++) {
00422         env->env[i] = envptr[-local_size + i];
00423 #if 0
00424         fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00425         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00426             /* clear value stack for GC */
00427             envptr[-local_size + i] = 0;
00428         }
00429 #endif
00430     }
00431 
00432     *envptr = envval;           /* GC mark */
00433     nenvptr = &env->env[i - 1];
00434     nenvptr[1] = envval;        /* frame self */
00435 
00436     /* reset ep in cfp */
00437     cfp->ep = nenvptr;
00438 
00439     /* as Binding */
00440     env->block.self = cfp->self;
00441     env->block.ep = cfp->ep;
00442     env->block.iseq = cfp->iseq;
00443 
00444     if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00445         /* TODO */
00446         env->block.iseq = 0;
00447     }
00448     return envval;
00449 }
00450 
00451 static int
00452 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00453 {
00454     int i;
00455     if (!iseq) return 0;
00456     for (i = 0; i < iseq->local_table_size; i++) {
00457         ID lid = iseq->local_table[i];
00458         if (rb_is_local_id(lid)) {
00459             rb_ary_push(ary, ID2SYM(lid));
00460         }
00461     }
00462     return 1;
00463 }
00464 
00465 static int
00466 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00467 {
00468 
00469     while (collect_local_variables_in_iseq(env->block.iseq, ary),
00470            env->prev_envval) {
00471         GetEnvPtr(env->prev_envval, env);
00472     }
00473     return 0;
00474 }
00475 
00476 static int
00477 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *ep, VALUE ary)
00478 {
00479     if (ENV_IN_HEAP_P(th, ep)) {
00480         rb_env_t *env;
00481         GetEnvPtr(ENV_VAL(ep), env);
00482         collect_local_variables_in_env(env, ary);
00483         return 1;
00484     }
00485     else {
00486         return 0;
00487     }
00488 }
00489 
00490 static void vm_rewrite_ep_in_errinfo(rb_thread_t *th);
00491 static VALUE vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block);
00492 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
00493 
00494 VALUE
00495 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00496 {
00497     VALUE blockprocval;
00498     return vm_make_env_object(th, cfp, &blockprocval);
00499 }
00500 
00501 static VALUE
00502 vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *blockprocptr)
00503 {
00504     VALUE envval;
00505     VALUE *lep = VM_CF_LEP(cfp);
00506     rb_block_t *blockptr = VM_EP_BLOCK_PTR(lep);
00507 
00508     if (blockptr) {
00509         VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
00510         rb_proc_t *p;
00511         GetProcPtr(blockprocval, p);
00512         lep[0] = VM_ENVVAL_BLOCK_PTR(&p->block);
00513         *blockprocptr = blockprocval;
00514     }
00515 
00516     envval = vm_make_env_each(th, cfp, cfp->ep, lep);
00517     vm_rewrite_ep_in_errinfo(th);
00518 
00519     if (PROCDEBUG) {
00520         check_env_value(envval);
00521     }
00522 
00523     return envval;
00524 }
00525 
00526 static void
00527 vm_rewrite_ep_in_errinfo(rb_thread_t *th)
00528 {
00529     rb_control_frame_t *cfp = th->cfp;
00530     while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00531         /* rewrite ep in errinfo to point to heap */
00532         if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00533             (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00534              cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00535             VALUE errinfo = cfp->ep[-2]; /* #$! */
00536             if (RB_TYPE_P(errinfo, T_NODE)) {
00537                 VALUE *escape_ep = GET_THROWOBJ_CATCH_POINT(errinfo);
00538                 if (! ENV_IN_HEAP_P(th, escape_ep)) {
00539                     VALUE epval = *escape_ep;
00540                     if (!SPECIAL_CONST_P(epval) && RBASIC(epval)->klass == rb_cEnv) {
00541                         rb_env_t *epenv;
00542                         GetEnvPtr(epval, epenv);
00543                         SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(epenv->env + epenv->local_size));
00544                     }
00545                 }
00546             }
00547         }
00548         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00549     }
00550 }
00551 
00552 void
00553 rb_vm_stack_to_heap(rb_thread_t *th)
00554 {
00555     rb_control_frame_t *cfp = th->cfp;
00556     while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) {
00557         rb_vm_make_env_object(th, cfp);
00558         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00559     }
00560 }
00561 
00562 /* Proc */
00563 
00564 static VALUE
00565 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00566 {
00567     if (!block->proc) {
00568         block->proc = rb_vm_make_proc(th, block, rb_cProc);
00569     }
00570     return block->proc;
00571 }
00572 
00573 VALUE
00574 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00575 {
00576     VALUE procval, envval, blockprocval = 0;
00577     rb_proc_t *proc;
00578     rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00579 
00580     if (block->proc) {
00581         rb_bug("rb_vm_make_proc: Proc value is already created.");
00582     }
00583 
00584     envval = vm_make_env_object(th, cfp, &blockprocval);
00585 
00586     if (PROCDEBUG) {
00587         check_env_value(envval);
00588     }
00589     procval = rb_proc_alloc(klass);
00590     GetProcPtr(procval, proc);
00591     proc->blockprocval = blockprocval;
00592     proc->block.self = block->self;
00593     proc->block.klass = block->klass;
00594     proc->block.ep = block->ep;
00595     proc->block.iseq = block->iseq;
00596     proc->block.proc = procval;
00597     proc->envval = envval;
00598     proc->safe_level = th->safe_level;
00599 
00600     if (VMDEBUG) {
00601         if (th->stack < block->ep && block->ep < th->stack + th->stack_size) {
00602             rb_bug("invalid ptr: block->ep");
00603         }
00604     }
00605 
00606     return procval;
00607 }
00608 
00609 /* C -> Ruby: block */
00610 
00611 static inline VALUE
00612 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00613                     VALUE self, int argc, const VALUE *argv,
00614                     const rb_block_t *blockptr, const NODE *cref,
00615                     VALUE defined_class)
00616 {
00617     if (SPECIAL_CONST_P(block->iseq))
00618         return Qnil;
00619     else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00620         const rb_iseq_t *iseq = block->iseq;
00621         const rb_control_frame_t *cfp;
00622         int i, opt_pc, arg_size = iseq->arg_size;
00623         int type = block_proc_is_lambda(block->proc) ?
00624           VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00625 
00626         cfp = th->cfp;
00627         CHECK_VM_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00628 
00629         for (i=0; i<argc; i++) {
00630             cfp->sp[i] = argv[i];
00631         }
00632 
00633         opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00634                                      type == VM_FRAME_MAGIC_LAMBDA);
00635 
00636         vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH,
00637                       self, defined_class,
00638                       VM_ENVVAL_PREV_EP_PTR(block->ep),
00639                       iseq->iseq_encoded + opt_pc,
00640                       cfp->sp + arg_size, iseq->local_size - arg_size,
00641                       th->passed_me);
00642         th->passed_me = 0;
00643 
00644         if (cref) {
00645             th->cfp->ep[-1] = (VALUE)cref;
00646         }
00647 
00648         return vm_exec(th);
00649     }
00650     else {
00651         return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00652     }
00653 }
00654 
00655 static inline const rb_block_t *
00656 check_block(rb_thread_t *th)
00657 {
00658     const rb_block_t *blockptr = VM_CF_BLOCK_PTR(th->cfp);
00659 
00660     if (blockptr == 0) {
00661         rb_vm_localjump_error("no block given", Qnil, 0);
00662     }
00663 
00664     return blockptr;
00665 }
00666 
00667 static inline VALUE
00668 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00669 {
00670     const rb_block_t *blockptr = check_block(th);
00671     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref,
00672                                blockptr->klass);
00673 }
00674 
00675 static inline VALUE
00676 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00677 {
00678     const rb_block_t *blockptr = check_block(th);
00679     return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0,
00680                                blockptr->klass);
00681 }
00682 
00683 static VALUE
00684 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00685                int argc, const VALUE *argv, const rb_block_t *blockptr)
00686 {
00687     VALUE val = Qundef;
00688     int state;
00689     volatile int stored_safe = th->safe_level;
00690 
00691     TH_PUSH_TAG(th);
00692     if ((state = EXEC_TAG()) == 0) {
00693         if (!proc->is_from_method) {
00694             th->safe_level = proc->safe_level;
00695         }
00696         val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0,
00697                                   defined_class);
00698     }
00699     TH_POP_TAG();
00700 
00701     if (!proc->is_from_method) {
00702         th->safe_level = stored_safe;
00703     }
00704 
00705     if (state) {
00706         JUMP_TAG(state);
00707     }
00708     return val;
00709 }
00710 
00711 VALUE
00712 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
00713                   int argc, const VALUE *argv, const rb_block_t *blockptr)
00714 {
00715     return vm_invoke_proc(th, proc, proc->block.self, proc->block.klass,
00716                           argc, argv, blockptr);
00717 }
00718 
00719 /* special variable */
00720 
00721 static rb_control_frame_t *
00722 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00723 {
00724     while (cfp->pc == 0) {
00725         cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00726         if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00727             return 0;
00728         }
00729     }
00730     return cfp;
00731 }
00732 
00733 static VALUE
00734 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00735 {
00736     cfp = vm_normal_frame(th, cfp);
00737     return lep_svar_get(th, cfp ? VM_CF_LEP(cfp) : 0, key);
00738 }
00739 
00740 static void
00741 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00742 {
00743     cfp = vm_normal_frame(th, cfp);
00744     lep_svar_set(th, cfp ? VM_CF_LEP(cfp) : 0, key, val);
00745 }
00746 
00747 static VALUE
00748 vm_svar_get(VALUE key)
00749 {
00750     rb_thread_t *th = GET_THREAD();
00751     return vm_cfp_svar_get(th, th->cfp, key);
00752 }
00753 
00754 static void
00755 vm_svar_set(VALUE key, VALUE val)
00756 {
00757     rb_thread_t *th = GET_THREAD();
00758     vm_cfp_svar_set(th, th->cfp, key, val);
00759 }
00760 
00761 VALUE
00762 rb_backref_get(void)
00763 {
00764     return vm_svar_get(1);
00765 }
00766 
00767 void
00768 rb_backref_set(VALUE val)
00769 {
00770     vm_svar_set(1, val);
00771 }
00772 
00773 VALUE
00774 rb_lastline_get(void)
00775 {
00776     return vm_svar_get(0);
00777 }
00778 
00779 void
00780 rb_lastline_set(VALUE val)
00781 {
00782     vm_svar_set(0, val);
00783 }
00784 
00785 /* misc */
00786 
00787 VALUE
00788 rb_sourcefilename(void)
00789 {
00790     rb_thread_t *th = GET_THREAD();
00791     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00792 
00793     if (cfp) {
00794         return cfp->iseq->location.path;
00795     }
00796     else {
00797         return Qnil;
00798     }
00799 }
00800 
00801 const char *
00802 rb_sourcefile(void)
00803 {
00804     rb_thread_t *th = GET_THREAD();
00805     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00806 
00807     if (cfp) {
00808         return RSTRING_PTR(cfp->iseq->location.path);
00809     }
00810     else {
00811         return 0;
00812     }
00813 }
00814 
00815 int
00816 rb_sourceline(void)
00817 {
00818     rb_thread_t *th = GET_THREAD();
00819     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00820 
00821     if (cfp) {
00822         return rb_vm_get_sourceline(cfp);
00823     }
00824     else {
00825         return 0;
00826     }
00827 }
00828 
00829 NODE *
00830 rb_vm_cref(void)
00831 {
00832     rb_thread_t *th = GET_THREAD();
00833     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00834 
00835     if (cfp == 0) {
00836         return NULL;
00837     }
00838     return rb_vm_get_cref(cfp->iseq, cfp->ep);
00839 }
00840 
00841 #if 0
00842 void
00843 debug_cref(NODE *cref)
00844 {
00845     while (cref) {
00846         dp(cref->nd_clss);
00847         printf("%ld\n", cref->nd_visi);
00848         cref = cref->nd_next;
00849     }
00850 }
00851 #endif
00852 
00853 VALUE
00854 rb_vm_cbase(void)
00855 {
00856     rb_thread_t *th = GET_THREAD();
00857     rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00858 
00859     if (cfp == 0) {
00860         rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00861     }
00862     return vm_get_cbase(cfp->iseq, cfp->ep);
00863 }
00864 
00865 /* jump */
00866 
00867 static VALUE
00868 make_localjump_error(const char *mesg, VALUE value, int reason)
00869 {
00870     extern VALUE rb_eLocalJumpError;
00871     VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00872     ID id;
00873 
00874     switch (reason) {
00875       case TAG_BREAK:
00876         CONST_ID(id, "break");
00877         break;
00878       case TAG_REDO:
00879         CONST_ID(id, "redo");
00880         break;
00881       case TAG_RETRY:
00882         CONST_ID(id, "retry");
00883         break;
00884       case TAG_NEXT:
00885         CONST_ID(id, "next");
00886         break;
00887       case TAG_RETURN:
00888         CONST_ID(id, "return");
00889         break;
00890       default:
00891         CONST_ID(id, "noreason");
00892         break;
00893     }
00894     rb_iv_set(exc, "@exit_value", value);
00895     rb_iv_set(exc, "@reason", ID2SYM(id));
00896     return exc;
00897 }
00898 
00899 void
00900 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00901 {
00902     VALUE exc = make_localjump_error(mesg, value, reason);
00903     rb_exc_raise(exc);
00904 }
00905 
00906 VALUE
00907 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00908 {
00909     VALUE result = Qnil;
00910 
00911     if (val == Qundef) {
00912         val = GET_THREAD()->tag->retval;
00913     }
00914     switch (state) {
00915       case 0:
00916         break;
00917       case TAG_RETURN:
00918         result = make_localjump_error("unexpected return", val, state);
00919         break;
00920       case TAG_BREAK:
00921         result = make_localjump_error("unexpected break", val, state);
00922         break;
00923       case TAG_NEXT:
00924         result = make_localjump_error("unexpected next", val, state);
00925         break;
00926       case TAG_REDO:
00927         result = make_localjump_error("unexpected redo", Qnil, state);
00928         break;
00929       case TAG_RETRY:
00930         result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00931         break;
00932       default:
00933         break;
00934     }
00935     return result;
00936 }
00937 
00938 void
00939 rb_vm_jump_tag_but_local_jump(int state)
00940 {
00941     VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
00942     if (!NIL_P(exc)) rb_exc_raise(exc);
00943     JUMP_TAG(state);
00944 }
00945 
00946 NORETURN(static void vm_iter_break(rb_thread_t *th, VALUE val));
00947 
00948 static void
00949 vm_iter_break(rb_thread_t *th, VALUE val)
00950 {
00951     rb_control_frame_t *cfp = th->cfp;
00952     VALUE *ep = VM_CF_PREV_EP(cfp);
00953 
00954     th->state = TAG_BREAK;
00955     th->errinfo = (VALUE)NEW_THROW_OBJECT(val, (VALUE)ep, TAG_BREAK);
00956     TH_JUMP_TAG(th, TAG_BREAK);
00957 }
00958 
00959 void
00960 rb_iter_break(void)
00961 {
00962     vm_iter_break(GET_THREAD(), Qnil);
00963 }
00964 
00965 void
00966 rb_iter_break_value(VALUE val)
00967 {
00968     vm_iter_break(GET_THREAD(), val);
00969 }
00970 
00971 /* optimization: redefine management */
00972 
00973 static st_table *vm_opt_method_table = 0;
00974 
00975 static int
00976 vm_redefinition_check_flag(VALUE klass)
00977 {
00978     if (klass == rb_cFixnum) return FIXNUM_REDEFINED_OP_FLAG;
00979     if (klass == rb_cFloat)  return FLOAT_REDEFINED_OP_FLAG;
00980     if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
00981     if (klass == rb_cArray)  return ARRAY_REDEFINED_OP_FLAG;
00982     if (klass == rb_cHash)   return HASH_REDEFINED_OP_FLAG;
00983     if (klass == rb_cBignum) return BIGNUM_REDEFINED_OP_FLAG;
00984     if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
00985     if (klass == rb_cTime)   return TIME_REDEFINED_OP_FLAG;
00986     return 0;
00987 }
00988 
00989 static void
00990 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
00991 {
00992     st_data_t bop;
00993     if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
00994         if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
00995             int flag = vm_redefinition_check_flag(klass);
00996 
00997             ruby_vm_redefined_flag[bop] |= flag;
00998         }
00999     }
01000 }
01001 
01002 static int
01003 check_redefined_method(st_data_t key, st_data_t value, st_data_t data)
01004 {
01005     ID mid = (ID)key;
01006     rb_method_entry_t *me = (rb_method_entry_t *)value;
01007     VALUE klass = (VALUE)data;
01008     rb_method_entry_t *newme = rb_method_entry(klass, mid, NULL);
01009 
01010     if (newme != me)
01011         rb_vm_check_redefinition_opt_method(me, me->klass);
01012     return ST_CONTINUE;
01013 }
01014 
01015 void
01016 rb_vm_check_redefinition_by_prepend(VALUE klass)
01017 {
01018     if (!vm_redefinition_check_flag(klass)) return;
01019     st_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method,
01020                (st_data_t)klass);
01021 }
01022 
01023 static void
01024 add_opt_method(VALUE klass, ID mid, VALUE bop)
01025 {
01026     rb_method_entry_t *me;
01027     if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01028         me->def->type == VM_METHOD_TYPE_CFUNC) {
01029         st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01030     }
01031     else {
01032         rb_bug("undefined optimized method: %s", rb_id2name(mid));
01033     }
01034 }
01035 
01036 static void
01037 vm_init_redefined_flag(void)
01038 {
01039     ID mid;
01040     VALUE bop;
01041 
01042     vm_opt_method_table = st_init_numtable();
01043 
01044 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01045 #define C(k) add_opt_method(rb_c##k, mid, bop)
01046     OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01047     OP(MINUS, MINUS), (C(Fixnum), C(Float));
01048     OP(MULT, MULT), (C(Fixnum), C(Float));
01049     OP(DIV, DIV), (C(Fixnum), C(Float));
01050     OP(MOD, MOD), (C(Fixnum), C(Float));
01051     OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01052     OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01053     OP(LT, LT), (C(Fixnum), C(Float));
01054     OP(LE, LE), (C(Fixnum), C(Float));
01055     OP(GT, GT), (C(Fixnum), C(Float));
01056     OP(GE, GE), (C(Fixnum), C(Float));
01057     OP(LTLT, LTLT), (C(String), C(Array));
01058     OP(AREF, AREF), (C(Array), C(Hash));
01059     OP(ASET, ASET), (C(Array), C(Hash));
01060     OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01061     OP(Size, SIZE), (C(Array), C(String), C(Hash));
01062     OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
01063     OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01064 #undef C
01065 #undef OP
01066 }
01067 
01068 /* for vm development */
01069 
01070 #if VMDEBUG
01071 static const char *
01072 vm_frametype_name(const rb_control_frame_t *cfp)
01073 {
01074     switch (VM_FRAME_TYPE(cfp)) {
01075       case VM_FRAME_MAGIC_METHOD: return "method";
01076       case VM_FRAME_MAGIC_BLOCK:  return "block";
01077       case VM_FRAME_MAGIC_CLASS:  return "class";
01078       case VM_FRAME_MAGIC_TOP:    return "top";
01079       case VM_FRAME_MAGIC_CFUNC:  return "cfunc";
01080       case VM_FRAME_MAGIC_PROC:   return "proc";
01081       case VM_FRAME_MAGIC_IFUNC:  return "ifunc";
01082       case VM_FRAME_MAGIC_EVAL:   return "eval";
01083       case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01084       default:
01085         rb_bug("unknown frame");
01086     }
01087 }
01088 #endif
01089 
01090 /* evaluator body */
01091 
01092 /*                  finish
01093   VMe (h1)          finish
01094     VM              finish F1 F2
01095       cfunc         finish F1 F2 C1
01096         rb_funcall  finish F1 F2 C1
01097           VMe       finish F1 F2 C1
01098             VM      finish F1 F2 C1 F3
01099 
01100   F1 - F3 : pushed by VM
01101   C1      : pushed by send insn (CFUNC)
01102 
01103   struct CONTROL_FRAME {
01104     VALUE *pc;                  // cfp[0], program counter
01105     VALUE *sp;                  // cfp[1], stack pointer
01106     VALUE *bp;                  // cfp[2], base pointer
01107     rb_iseq_t *iseq;            // cfp[3], iseq
01108     VALUE flag;                 // cfp[4], magic
01109     VALUE self;                 // cfp[5], self
01110     VALUE *ep;                  // cfp[6], env pointer
01111     rb_iseq_t * block_iseq;     // cfp[7], block iseq
01112     VALUE proc;                 // cfp[8], always 0
01113   };
01114 
01115   struct BLOCK {
01116     VALUE self;
01117     VALUE *ep;
01118     rb_iseq_t *block_iseq;
01119     VALUE proc;
01120   };
01121 
01122   struct METHOD_CONTROL_FRAME {
01123     rb_control_frame_t frame;
01124   };
01125 
01126   struct METHOD_FRAME {
01127     VALUE arg0;
01128     ...
01129     VALUE argM;
01130     VALUE param0;
01131     ...
01132     VALUE paramN;
01133     VALUE cref;
01134     VALUE special;                         // lep [1]
01135     struct block_object *block_ptr | 0x01; // lep [0]
01136   };
01137 
01138   struct BLOCK_CONTROL_FRAME {
01139     rb_control_frame_t frame;
01140   };
01141 
01142   struct BLOCK_FRAME {
01143     VALUE arg0;
01144     ...
01145     VALUE argM;
01146     VALUE param0;
01147     ...
01148     VALUE paramN;
01149     VALUE cref;
01150     VALUE *(prev_ptr | 0x01); // ep[0]
01151   };
01152 
01153   struct CLASS_CONTROL_FRAME {
01154     rb_control_frame_t frame;
01155   };
01156 
01157   struct CLASS_FRAME {
01158     VALUE param0;
01159     ...
01160     VALUE paramN;
01161     VALUE cref;
01162     VALUE prev_ep; // for frame jump
01163   };
01164 
01165   struct C_METHOD_CONTROL_FRAME {
01166     VALUE *pc;                       // 0
01167     VALUE *sp;                       // stack pointer
01168     VALUE *bp;                       // base pointer (used in exception)
01169     rb_iseq_t *iseq;                 // cmi
01170     VALUE magic;                     // C_METHOD_FRAME
01171     VALUE self;                      // ?
01172     VALUE *ep;                       // ep == lep
01173     rb_iseq_t * block_iseq;          //
01174     VALUE proc;                      // always 0
01175   };
01176 
01177   struct C_BLOCK_CONTROL_FRAME {
01178     VALUE *pc;                       // point only "finish" insn
01179     VALUE *sp;                       // sp
01180     rb_iseq_t *iseq;                 // ?
01181     VALUE magic;                     // C_METHOD_FRAME
01182     VALUE self;                      // needed?
01183     VALUE *ep;                       // ep
01184     rb_iseq_t * block_iseq; // 0
01185   };
01186  */
01187 
01188 
01189 static VALUE
01190 vm_exec(rb_thread_t *th)
01191 {
01192     int state;
01193     VALUE result, err;
01194     VALUE initial = 0;
01195     VALUE *escape_ep = NULL;
01196 
01197     TH_PUSH_TAG(th);
01198     _tag.retval = Qnil;
01199     if ((state = EXEC_TAG()) == 0) {
01200       vm_loop_start:
01201         result = vm_exec_core(th, initial);
01202         if ((state = th->state) != 0) {
01203             err = result;
01204             th->state = 0;
01205             goto exception_handler;
01206         }
01207     }
01208     else {
01209         int i;
01210         struct iseq_catch_table_entry *entry;
01211         unsigned long epc, cont_pc, cont_sp;
01212         VALUE catch_iseqval;
01213         rb_control_frame_t *cfp;
01214         VALUE type;
01215 
01216         err = th->errinfo;
01217 
01218       exception_handler:
01219         cont_pc = cont_sp = catch_iseqval = 0;
01220 
01221         while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01222             if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01223                 const rb_method_entry_t *me = th->cfp->me;
01224                 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass, Qnil);
01225                 RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->klass, me->called_id);
01226             }
01227             th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01228         }
01229 
01230         cfp = th->cfp;
01231         epc = cfp->pc - cfp->iseq->iseq_encoded;
01232 
01233         if (state == TAG_BREAK || state == TAG_RETURN) {
01234             escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01235 
01236             if (cfp->ep == escape_ep) {
01237                 if (state == TAG_RETURN) {
01238                     if (!VM_FRAME_TYPE_FINISH_P(cfp)) {
01239                         SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->ep);
01240                         SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01241                     }
01242                     else {
01243                         for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01244                             entry = &cfp->iseq->catch_table[i];
01245                             if (entry->start < epc && entry->end >= epc) {
01246                                 if (entry->type == CATCH_TYPE_ENSURE) {
01247                                     catch_iseqval = entry->iseq;
01248                                     cont_pc = entry->cont;
01249                                     cont_sp = entry->sp;
01250                                     break;
01251                                 }
01252                             }
01253                         }
01254                         if (!catch_iseqval) {
01255                             result = GET_THROWOBJ_VAL(err);
01256                             th->errinfo = Qnil;
01257                             vm_pop_frame(th);
01258                             goto finish_vme;
01259                         }
01260                     }
01261                     /* through */
01262                 }
01263                 else {
01264                     /* TAG_BREAK */
01265 #if OPT_STACK_CACHING
01266                     initial = (GET_THROWOBJ_VAL(err));
01267 #else
01268                     *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01269 #endif
01270                     th->errinfo = Qnil;
01271                     goto vm_loop_start;
01272                 }
01273             }
01274         }
01275 
01276         if (state == TAG_RAISE) {
01277             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01278                 entry = &cfp->iseq->catch_table[i];
01279                 if (entry->start < epc && entry->end >= epc) {
01280 
01281                     if (entry->type == CATCH_TYPE_RESCUE ||
01282                         entry->type == CATCH_TYPE_ENSURE) {
01283                         catch_iseqval = entry->iseq;
01284                         cont_pc = entry->cont;
01285                         cont_sp = entry->sp;
01286                         break;
01287                     }
01288                 }
01289             }
01290         }
01291         else if (state == TAG_RETRY) {
01292             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01293                 entry = &cfp->iseq->catch_table[i];
01294                 if (entry->start < epc && entry->end >= epc) {
01295 
01296                     if (entry->type == CATCH_TYPE_ENSURE) {
01297                         catch_iseqval = entry->iseq;
01298                         cont_pc = entry->cont;
01299                         cont_sp = entry->sp;
01300                         break;
01301                     }
01302                     else if (entry->type == CATCH_TYPE_RETRY) {
01303                         VALUE *escape_ep;
01304                         escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01305                         if (cfp->ep == escape_ep) {
01306                             cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01307                             th->errinfo = Qnil;
01308                             goto vm_loop_start;
01309                         }
01310                     }
01311                 }
01312             }
01313         }
01314         else if (state == TAG_BREAK && ((VALUE)escape_ep & ~0x03) == 0) {
01315             type = CATCH_TYPE_BREAK;
01316 
01317           search_restart_point:
01318             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01319                 entry = &cfp->iseq->catch_table[i];
01320 
01321                 if (entry->start < epc && entry->end >= epc) {
01322                     if (entry->type == CATCH_TYPE_ENSURE) {
01323                         catch_iseqval = entry->iseq;
01324                         cont_pc = entry->cont;
01325                         cont_sp = entry->sp;
01326                         break;
01327                     }
01328                     else if (entry->type == type) {
01329                         cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01330                         cfp->sp = vm_base_ptr(cfp) + entry->sp;
01331 
01332                         if (state != TAG_REDO) {
01333 #if OPT_STACK_CACHING
01334                             initial = (GET_THROWOBJ_VAL(err));
01335 #else
01336                             *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01337 #endif
01338                         }
01339                         th->errinfo = Qnil;
01340                         th->state = 0;
01341                         goto vm_loop_start;
01342                     }
01343                 }
01344             }
01345         }
01346         else if (state == TAG_REDO) {
01347             type = CATCH_TYPE_REDO;
01348             goto search_restart_point;
01349         }
01350         else if (state == TAG_NEXT) {
01351             type = CATCH_TYPE_NEXT;
01352             goto search_restart_point;
01353         }
01354         else {
01355             for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01356                 entry = &cfp->iseq->catch_table[i];
01357                 if (entry->start < epc && entry->end >= epc) {
01358 
01359                     if (entry->type == CATCH_TYPE_ENSURE) {
01360                         catch_iseqval = entry->iseq;
01361                         cont_pc = entry->cont;
01362                         cont_sp = entry->sp;
01363                         break;
01364                     }
01365                 }
01366             }
01367         }
01368 
01369         if (catch_iseqval != 0) {
01370             /* found catch table */
01371             rb_iseq_t *catch_iseq;
01372 
01373             /* enter catch scope */
01374             GetISeqPtr(catch_iseqval, catch_iseq);
01375             cfp->sp = vm_base_ptr(cfp) + cont_sp;
01376             cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01377 
01378             /* push block frame */
01379             cfp->sp[0] = err;
01380             vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01381                           cfp->self, cfp->klass,
01382                           VM_ENVVAL_PREV_EP_PTR(cfp->ep),
01383                           catch_iseq->iseq_encoded,
01384                           cfp->sp + 1 /* push value */,
01385                           catch_iseq->local_size - 1,
01386                           cfp->me);
01387 
01388             state = 0;
01389             th->state = 0;
01390             th->errinfo = Qnil;
01391             goto vm_loop_start;
01392         }
01393         else {
01394             /* skip frame */
01395 
01396             switch (VM_FRAME_TYPE(th->cfp)) {
01397               case VM_FRAME_MAGIC_METHOD:
01398                 RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0);
01399                 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0, Qnil);
01400                 break;
01401               case VM_FRAME_MAGIC_BLOCK:
01402                 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, Qnil);
01403                 break;
01404               case VM_FRAME_MAGIC_CLASS:
01405                 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0, Qnil);
01406                 break;
01407             }
01408 
01409             if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
01410                 vm_pop_frame(th);
01411                 th->errinfo = err;
01412                 TH_POP_TAG2();
01413                 JUMP_TAG(state);
01414             }
01415             else {
01416                 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01417                 goto exception_handler;
01418             }
01419         }
01420     }
01421   finish_vme:
01422     TH_POP_TAG();
01423     return result;
01424 }
01425 
01426 /* misc */
01427 
01428 VALUE
01429 rb_iseq_eval(VALUE iseqval)
01430 {
01431     rb_thread_t *th = GET_THREAD();
01432     VALUE val;
01433 
01434     vm_set_top_stack(th, iseqval);
01435 
01436     val = vm_exec(th);
01437     RB_GC_GUARD(iseqval); /* prohibit tail call optimization */
01438     return val;
01439 }
01440 
01441 VALUE
01442 rb_iseq_eval_main(VALUE iseqval)
01443 {
01444     rb_thread_t *th = GET_THREAD();
01445     VALUE val;
01446 
01447     vm_set_main_stack(th, iseqval);
01448 
01449     val = vm_exec(th);
01450     RB_GC_GUARD(iseqval); /* prohibit tail call optimization */
01451     return val;
01452 }
01453 
01454 int
01455 rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp)
01456 {
01457     rb_iseq_t *iseq = cfp->iseq;
01458     if (!iseq && cfp->me) {
01459         if (idp) *idp = cfp->me->def->original_id;
01460         if (klassp) *klassp = cfp->me->klass;
01461         return 1;
01462     }
01463     while (iseq) {
01464         if (RUBY_VM_IFUNC_P(iseq)) {
01465             if (idp) CONST_ID(*idp, "<ifunc>");
01466             if (klassp) *klassp = 0;
01467             return 1;
01468         }
01469         if (iseq->defined_method_id) {
01470             if (idp) *idp = iseq->defined_method_id;
01471             if (klassp) *klassp = iseq->klass;
01472             return 1;
01473         }
01474         if (iseq->local_iseq == iseq) {
01475             break;
01476         }
01477         iseq = iseq->parent_iseq;
01478     }
01479     return 0;
01480 }
01481 
01482 int
01483 rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp)
01484 {
01485     return rb_vm_control_frame_id_and_class(th->cfp, idp, klassp);
01486 }
01487 
01488 int
01489 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01490 {
01491     return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01492 }
01493 
01494 VALUE
01495 rb_thread_current_status(const rb_thread_t *th)
01496 {
01497     const rb_control_frame_t *cfp = th->cfp;
01498     VALUE str = Qnil;
01499 
01500     if (cfp->iseq != 0) {
01501         if (cfp->pc != 0) {
01502             rb_iseq_t *iseq = cfp->iseq;
01503             int line_no = rb_vm_get_sourceline(cfp);
01504             char *file = RSTRING_PTR(iseq->location.path);
01505             str = rb_sprintf("%s:%d:in `%s'",
01506                              file, line_no, RSTRING_PTR(iseq->location.label));
01507         }
01508     }
01509     else if (cfp->me->def->original_id) {
01510         str = rb_sprintf("`%s#%s' (cfunc)",
01511                          rb_class2name(cfp->me->klass),
01512                          rb_id2name(cfp->me->def->original_id));
01513     }
01514 
01515     return str;
01516 }
01517 
01518 VALUE
01519 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01520                  const rb_block_t *blockptr, VALUE filename)
01521 {
01522     rb_thread_t *th = GET_THREAD();
01523     const rb_control_frame_t *reg_cfp = th->cfp;
01524     volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01525     VALUE val;
01526 
01527     vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
01528                   recv, CLASS_OF(recv), VM_ENVVAL_BLOCK_PTR(blockptr), 0, reg_cfp->sp, 1, 0);
01529 
01530     val = (*func)(arg);
01531 
01532     vm_pop_frame(th);
01533     return val;
01534 }
01535 
01536 /* vm */
01537 
01538 static int
01539 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01540 {
01541     VALUE thval = (VALUE)key;
01542     rb_gc_mark(thval);
01543     return ST_CONTINUE;
01544 }
01545 
01546 void vm_trace_mark_event_hooks(rb_hook_list_t *hooks);
01547 
01548 void
01549 rb_vm_mark(void *ptr)
01550 {
01551     int i;
01552 
01553     RUBY_MARK_ENTER("vm");
01554     RUBY_GC_INFO("-------------------------------------------------\n");
01555     if (ptr) {
01556         rb_vm_t *vm = ptr;
01557         if (vm->living_threads) {
01558             st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01559         }
01560         RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01561         RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01562         RUBY_MARK_UNLESS_NULL(vm->load_path);
01563         RUBY_MARK_UNLESS_NULL(vm->load_path_snapshot);
01564         RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
01565         RUBY_MARK_UNLESS_NULL(vm->expanded_load_path);
01566         RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01567         RUBY_MARK_UNLESS_NULL(vm->loaded_features_snapshot);
01568         RUBY_MARK_UNLESS_NULL(vm->top_self);
01569         RUBY_MARK_UNLESS_NULL(vm->coverages);
01570         rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01571 
01572         if (vm->loading_table) {
01573             rb_mark_tbl(vm->loading_table);
01574         }
01575         if (vm->loaded_features_index) {
01576             rb_mark_tbl(vm->loaded_features_index);
01577         }
01578 
01579         vm_trace_mark_event_hooks(&vm->event_hooks);
01580 
01581         for (i = 0; i < RUBY_NSIG; i++) {
01582             if (vm->trap_list[i].cmd)
01583                 rb_gc_mark(vm->trap_list[i].cmd);
01584         }
01585         if (vm->defined_strings) {
01586             rb_gc_mark_locations(vm->defined_strings, vm->defined_strings + DEFINED_EXPR);
01587         }
01588     }
01589 
01590     RUBY_MARK_LEAVE("vm");
01591 }
01592 
01593 #define vm_free 0
01594 
01595 int
01596 ruby_vm_destruct(rb_vm_t *vm)
01597 {
01598     RUBY_FREE_ENTER("vm");
01599     if (vm) {
01600         rb_thread_t *th = vm->main_thread;
01601 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01602         struct rb_objspace *objspace = vm->objspace;
01603 #endif
01604         rb_gc_force_recycle(vm->self);
01605         vm->main_thread = 0;
01606         if (th) {
01607             rb_fiber_reset_root_local_storage(th->self);
01608             thread_free(th);
01609         }
01610         if (vm->living_threads) {
01611             st_free_table(vm->living_threads);
01612             vm->living_threads = 0;
01613         }
01614 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01615         if (objspace) {
01616             rb_objspace_free(objspace);
01617         }
01618 #endif
01619         ruby_vm_run_at_exit_hooks(vm);
01620         rb_vm_gvl_destroy(vm);
01621         ruby_xfree(vm);
01622         ruby_current_vm = 0;
01623     }
01624     RUBY_FREE_LEAVE("vm");
01625     return 0;
01626 }
01627 
01628 static size_t
01629 vm_memsize(const void *ptr)
01630 {
01631     if (ptr) {
01632         const rb_vm_t *vmobj = ptr;
01633         size_t size = sizeof(rb_vm_t);
01634         size += st_memsize(vmobj->living_threads);
01635         if (vmobj->defined_strings) {
01636             size += DEFINED_EXPR * sizeof(VALUE);
01637         }
01638         return size;
01639     }
01640     else {
01641         return 0;
01642     }
01643 }
01644 
01645 static const rb_data_type_t vm_data_type = {
01646     "VM",
01647     {rb_vm_mark, vm_free, vm_memsize,},
01648 };
01649 
01650 
01651 static VALUE
01652 vm_default_params(void)
01653 {
01654     rb_vm_t *vm = GET_VM();
01655     VALUE result = rb_hash_new();
01656 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
01657     SET(thread_vm_stack_size);
01658     SET(thread_machine_stack_size);
01659     SET(fiber_vm_stack_size);
01660     SET(fiber_machine_stack_size);
01661 #undef SET
01662     rb_obj_freeze(result);
01663     return result;
01664 }
01665 
01666 static size_t
01667 get_param(const char *name, size_t default_value, size_t min_value)
01668 {
01669     const char *envval;
01670     size_t result = default_value;
01671     if ((envval = getenv(name)) != 0) {
01672         long val = atol(envval);
01673         if (val < (long)min_value) {
01674             val = (long)min_value;
01675         }
01676         result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
01677     }
01678     if (0) fprintf(stderr, "%s: %"PRIdSIZE"\n", name, result); /* debug print */
01679 
01680     return result;
01681 }
01682 
01683 static void
01684 check_machine_stack_size(size_t *sizep)
01685 {
01686 #ifdef PTHREAD_STACK_MIN
01687     size_t size = *sizep;
01688 #endif
01689 
01690 #ifdef __SYMBIAN32__
01691     *sizep = 64 * 1024; /* 64KB: Let's be slightly more frugal on mobile platform */
01692 #endif
01693 
01694 #ifdef PTHREAD_STACK_MIN
01695     if (size < PTHREAD_STACK_MIN) {
01696         *sizep = PTHREAD_STACK_MIN * 2;
01697     }
01698 #endif
01699 }
01700 
01701 static void
01702 vm_default_params_setup(rb_vm_t *vm)
01703 {
01704     vm->default_params.thread_vm_stack_size =
01705       get_param("RUBY_THREAD_VM_STACK_SIZE",
01706                 RUBY_VM_THREAD_VM_STACK_SIZE,
01707                 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
01708 
01709     vm->default_params.thread_machine_stack_size =
01710       get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
01711                 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
01712                 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
01713 
01714     vm->default_params.fiber_vm_stack_size =
01715       get_param("RUBY_FIBER_VM_STACK_SIZE",
01716                 RUBY_VM_FIBER_VM_STACK_SIZE,
01717                 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
01718 
01719     vm->default_params.fiber_machine_stack_size =
01720       get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
01721                 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
01722                 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
01723 
01724     /* environment dependent check */
01725     check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
01726     check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
01727 }
01728 
01729 static void
01730 vm_init2(rb_vm_t *vm)
01731 {
01732     MEMZERO(vm, rb_vm_t, 1);
01733     vm->src_encoding_index = -1;
01734     vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK; /* len set 0 */
01735     vm->at_exit.basic.klass = 0;
01736 
01737     vm_default_params_setup(vm);
01738 }
01739 
01740 /* Thread */
01741 
01742 #define USE_THREAD_DATA_RECYCLE 1
01743 
01744 #if USE_THREAD_DATA_RECYCLE
01745 #define RECYCLE_MAX 64
01746 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01747 static int thread_recycle_stack_count = 0;
01748 
01749 static VALUE *
01750 thread_recycle_stack(size_t size)
01751 {
01752     if (thread_recycle_stack_count) {
01753         /* TODO: check stack size if stack sizes are variable */
01754         return thread_recycle_stack_slot[--thread_recycle_stack_count];
01755     }
01756     else {
01757         return ALLOC_N(VALUE, size);
01758     }
01759 }
01760 
01761 #else
01762 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01763 #endif
01764 
01765 void
01766 rb_thread_recycle_stack_release(VALUE *stack)
01767 {
01768 #if USE_THREAD_DATA_RECYCLE
01769     if (thread_recycle_stack_count < RECYCLE_MAX) {
01770         thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01771         return;
01772     }
01773 #endif
01774     ruby_xfree(stack);
01775 }
01776 
01777 #ifdef USE_THREAD_RECYCLE
01778 static rb_thread_t *
01779 thread_recycle_struct(void)
01780 {
01781     void *p = ALLOC_N(rb_thread_t, 1);
01782     memset(p, 0, sizeof(rb_thread_t));
01783     return p;
01784 }
01785 #endif
01786 
01787 void
01788 rb_thread_mark(void *ptr)
01789 {
01790     rb_thread_t *th = NULL;
01791     RUBY_MARK_ENTER("thread");
01792     if (ptr) {
01793         th = ptr;
01794         if (th->stack) {
01795             VALUE *p = th->stack;
01796             VALUE *sp = th->cfp->sp;
01797             rb_control_frame_t *cfp = th->cfp;
01798             rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01799 
01800             while (p < sp) {
01801                 rb_gc_mark(*p++);
01802             }
01803             rb_gc_mark_locations(p, p + th->mark_stack_len);
01804 
01805             while (cfp != limit_cfp) {
01806                 rb_iseq_t *iseq = cfp->iseq;
01807                 rb_gc_mark(cfp->proc);
01808                 rb_gc_mark(cfp->self);
01809                 rb_gc_mark(cfp->klass);
01810                 if (iseq) {
01811                     rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01812                 }
01813                 if (cfp->me) {
01814                     /* TODO: marking `me' can be more sophisticated way */
01815                     ((rb_method_entry_t *)cfp->me)->mark = 1;
01816                     rb_mark_method_entry(cfp->me);
01817                 }
01818                 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01819             }
01820         }
01821 
01822         /* mark ruby objects */
01823         RUBY_MARK_UNLESS_NULL(th->first_proc);
01824         if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01825 
01826         RUBY_MARK_UNLESS_NULL(th->thgroup);
01827         RUBY_MARK_UNLESS_NULL(th->value);
01828         RUBY_MARK_UNLESS_NULL(th->errinfo);
01829         RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
01830         RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
01831         RUBY_MARK_UNLESS_NULL(th->root_svar);
01832         RUBY_MARK_UNLESS_NULL(th->top_self);
01833         RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01834         RUBY_MARK_UNLESS_NULL(th->fiber);
01835         RUBY_MARK_UNLESS_NULL(th->root_fiber);
01836         RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01837         RUBY_MARK_UNLESS_NULL(th->last_status);
01838 
01839         RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01840 
01841         rb_mark_tbl(th->local_storage);
01842 
01843         if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01844             rb_gc_mark_machine_stack(th);
01845             rb_gc_mark_locations((VALUE *)&th->machine_regs,
01846                                  (VALUE *)(&th->machine_regs) +
01847                                  sizeof(th->machine_regs) / sizeof(VALUE));
01848         }
01849 
01850         vm_trace_mark_event_hooks(&th->event_hooks);
01851     }
01852 
01853     RUBY_MARK_LEAVE("thread");
01854 }
01855 
01856 static void
01857 thread_free(void *ptr)
01858 {
01859     rb_thread_t *th;
01860     RUBY_FREE_ENTER("thread");
01861 
01862     if (ptr) {
01863         th = ptr;
01864 
01865         if (!th->root_fiber) {
01866             RUBY_FREE_UNLESS_NULL(th->stack);
01867         }
01868 
01869         if (th->locking_mutex != Qfalse) {
01870             rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01871         }
01872         if (th->keeping_mutexes != NULL) {
01873             rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01874         }
01875 
01876         if (th->local_storage) {
01877             st_free_table(th->local_storage);
01878         }
01879 
01880         if (th->vm && th->vm->main_thread == th) {
01881             RUBY_GC_INFO("main thread\n");
01882         }
01883         else {
01884 #ifdef USE_SIGALTSTACK
01885             if (th->altstack) {
01886                 free(th->altstack);
01887             }
01888 #endif
01889             ruby_xfree(ptr);
01890         }
01891         if (ruby_current_thread == th)
01892             ruby_current_thread = NULL;
01893     }
01894     RUBY_FREE_LEAVE("thread");
01895 }
01896 
01897 static size_t
01898 thread_memsize(const void *ptr)
01899 {
01900     if (ptr) {
01901         const rb_thread_t *th = ptr;
01902         size_t size = sizeof(rb_thread_t);
01903 
01904         if (!th->root_fiber) {
01905             size += th->stack_size * sizeof(VALUE);
01906         }
01907         if (th->local_storage) {
01908             size += st_memsize(th->local_storage);
01909         }
01910         return size;
01911     }
01912     else {
01913         return 0;
01914     }
01915 }
01916 
01917 #define thread_data_type ruby_threadptr_data_type
01918 const rb_data_type_t ruby_threadptr_data_type = {
01919     "VM/thread",
01920     {
01921         rb_thread_mark,
01922         thread_free,
01923         thread_memsize,
01924     },
01925 };
01926 
01927 VALUE
01928 rb_obj_is_thread(VALUE obj)
01929 {
01930     if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
01931         return Qtrue;
01932     }
01933     else {
01934         return Qfalse;
01935     }
01936 }
01937 
01938 static VALUE
01939 thread_alloc(VALUE klass)
01940 {
01941     VALUE volatile obj;
01942 #ifdef USE_THREAD_RECYCLE
01943     rb_thread_t *th = thread_recycle_struct();
01944     obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01945 #else
01946     rb_thread_t *th;
01947     obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01948 #endif
01949     return obj;
01950 }
01951 
01952 static void
01953 th_init(rb_thread_t *th, VALUE self)
01954 {
01955     th->self = self;
01956 
01957     /* allocate thread stack */
01958 #ifdef USE_SIGALTSTACK
01959     /* altstack of main thread is reallocated in another place */
01960     th->altstack = malloc(rb_sigaltstack_size());
01961 #endif
01962     /* th->stack_size is word number.
01963      * th->vm->default_params.thread_vm_stack_size is byte size.
01964      */
01965     th->stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
01966     th->stack = thread_recycle_stack(th->stack_size);
01967 
01968     th->cfp = (void *)(th->stack + th->stack_size);
01969 
01970     vm_push_frame(th, 0 /* dummy iseq */, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
01971                   Qnil /* dummy self */, Qnil /* dummy klass */, VM_ENVVAL_BLOCK_PTR(0), 0 /* dummy pc */, th->stack, 1, 0);
01972 
01973     th->status = THREAD_RUNNABLE;
01974     th->errinfo = Qnil;
01975     th->last_status = Qnil;
01976     th->waiting_fd = -1;
01977     th->root_svar = Qnil;
01978 
01979 #if OPT_CALL_THREADED_CODE
01980     th->retval = Qundef;
01981 #endif
01982 }
01983 
01984 static VALUE
01985 ruby_thread_init(VALUE self)
01986 {
01987     rb_thread_t *th;
01988     rb_vm_t *vm = GET_THREAD()->vm;
01989     GetThreadPtr(self, th);
01990 
01991     th->vm = vm;
01992     th_init(th, self);
01993     rb_iv_set(self, "locals", rb_hash_new());
01994 
01995     th->top_wrapper = 0;
01996     th->top_self = rb_vm_top_self();
01997     th->root_svar = Qnil;
01998     return self;
01999 }
02000 
02001 VALUE
02002 rb_thread_alloc(VALUE klass)
02003 {
02004     VALUE self = thread_alloc(klass);
02005     ruby_thread_init(self);
02006     return self;
02007 }
02008 
02009 static void
02010 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
02011                  rb_num_t is_singleton, NODE *cref)
02012 {
02013     VALUE klass = cref->nd_clss;
02014     int noex = (int)cref->nd_visi;
02015     rb_iseq_t *miseq;
02016     GetISeqPtr(iseqval, miseq);
02017 
02018     if (miseq->klass) {
02019         RB_GC_GUARD(iseqval) = rb_iseq_clone(iseqval, 0);
02020         GetISeqPtr(iseqval, miseq);
02021     }
02022 
02023     if (NIL_P(klass)) {
02024         rb_raise(rb_eTypeError, "no class/module to add method");
02025     }
02026 
02027     if (is_singleton) {
02028         klass = rb_singleton_class(obj); /* class and frozen checked in this API */
02029         noex = NOEX_PUBLIC;
02030     }
02031 
02032     /* dup */
02033     COPY_CREF(miseq->cref_stack, cref);
02034     miseq->cref_stack->nd_visi = NOEX_PUBLIC;
02035     miseq->klass = klass;
02036     miseq->defined_method_id = id;
02037     rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
02038 
02039     if (!is_singleton && noex == NOEX_MODFUNC) {
02040         rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
02041     }
02042     INC_VM_STATE_VERSION();
02043 }
02044 
02045 #define REWIND_CFP(expr) do { \
02046     rb_thread_t *th__ = GET_THREAD(); \
02047     th__->cfp++; expr; th__->cfp--; \
02048 } while (0)
02049 
02050 static VALUE
02051 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02052 {
02053     REWIND_CFP({
02054         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
02055     });
02056     return Qnil;
02057 }
02058 
02059 static VALUE
02060 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02061 {
02062     REWIND_CFP({
02063         vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
02064     });
02065     return Qnil;
02066 }
02067 
02068 static VALUE
02069 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
02070 {
02071     REWIND_CFP({
02072         rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
02073     });
02074     return Qnil;
02075 }
02076 
02077 static VALUE
02078 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
02079 {
02080     REWIND_CFP({
02081         rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
02082     });
02083     return Qnil;
02084 }
02085 
02086 static VALUE
02087 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02088 {
02089     REWIND_CFP({
02090         rb_undef(cbase, SYM2ID(sym));
02091         INC_VM_STATE_VERSION();
02092     });
02093     return Qnil;
02094 }
02095 
02096 static VALUE
02097 m_core_set_postexe(VALUE self, VALUE iseqval)
02098 {
02099     REWIND_CFP({
02100         rb_iseq_t *blockiseq;
02101         rb_block_t *blockptr;
02102         rb_thread_t *th = GET_THREAD();
02103         rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02104         VALUE proc;
02105 
02106         if (cfp == 0) {
02107             rb_bug("m_core_set_postexe: unreachable");
02108         }
02109 
02110         GetISeqPtr(iseqval, blockiseq);
02111 
02112         blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02113         blockptr->iseq = blockiseq;
02114         blockptr->proc = 0;
02115 
02116         proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02117         rb_set_end_proc(rb_call_end_proc, proc);
02118     });
02119     return Qnil;
02120 }
02121 
02122 static VALUE
02123 m_core_hash_from_ary(VALUE self, VALUE ary)
02124 {
02125     VALUE hash = rb_hash_new();
02126     int i;
02127 
02128     if (RUBY_DTRACE_HASH_CREATE_ENABLED()) {
02129         RUBY_DTRACE_HASH_CREATE(RARRAY_LEN(ary), rb_sourcefile(), rb_sourceline());
02130     }
02131 
02132     assert(RARRAY_LEN(ary) % 2 == 0);
02133     for (i=0; i<RARRAY_LEN(ary); i+=2) {
02134         rb_hash_aset(hash, RARRAY_PTR(ary)[i], RARRAY_PTR(ary)[i+1]);
02135     }
02136 
02137     return hash;
02138 }
02139 
02140 static VALUE
02141 m_core_hash_merge_ary(VALUE self, VALUE hash, VALUE ary)
02142 {
02143     int i;
02144 
02145     assert(RARRAY_LEN(ary) % 2 == 0);
02146     for (i=0; i<RARRAY_LEN(ary); i+=2) {
02147         rb_hash_aset(hash, RARRAY_PTR(ary)[i], RARRAY_PTR(ary)[i+1]);
02148     }
02149 
02150     return hash;
02151 }
02152 
02153 static VALUE
02154 m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
02155 {
02156     int i;
02157     VALUE hash = argv[0];
02158 
02159     for (i=1; i<argc; i+=2) {
02160         rb_hash_aset(hash, argv[i], argv[i+1]);
02161     }
02162 
02163     return hash;
02164 }
02165 
02166 static int
02167 kwmerge_ii(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
02168 {
02169     if (existing) return ST_STOP;
02170     *value = arg;
02171     return ST_CONTINUE;
02172 }
02173 
02174 static int
02175 kwmerge_i(VALUE key, VALUE value, VALUE hash)
02176 {
02177     if (!SYMBOL_P(key)) Check_Type(key, T_SYMBOL);
02178     st_update(RHASH_TBL(hash), key, kwmerge_ii, (st_data_t)value);
02179     return ST_CONTINUE;
02180 }
02181 
02182 static VALUE
02183 m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
02184 {
02185     kw = rb_convert_type(kw, T_HASH, "Hash", "to_hash");
02186     rb_hash_foreach(kw, kwmerge_i, hash);
02187     return hash;
02188 }
02189 
02190 extern VALUE *rb_gc_stack_start;
02191 extern size_t rb_gc_stack_maxsize;
02192 #ifdef __ia64
02193 extern VALUE *rb_gc_register_stack_start;
02194 #endif
02195 
02196 /* debug functions */
02197 
02198 /* :nodoc: */
02199 static VALUE
02200 sdr(void)
02201 {
02202     rb_vm_bugreport();
02203     return Qnil;
02204 }
02205 
02206 /* :nodoc: */
02207 static VALUE
02208 nsdr(void)
02209 {
02210     VALUE ary = rb_ary_new();
02211 #if HAVE_BACKTRACE
02212 #include <execinfo.h>
02213 #define MAX_NATIVE_TRACE 1024
02214     static void *trace[MAX_NATIVE_TRACE];
02215     int n = backtrace(trace, MAX_NATIVE_TRACE);
02216     char **syms = backtrace_symbols(trace, n);
02217     int i;
02218 
02219     if (syms == 0) {
02220         rb_memerror();
02221     }
02222 
02223     for (i=0; i<n; i++) {
02224         rb_ary_push(ary, rb_str_new2(syms[i]));
02225     }
02226     free(syms); /* OK */
02227 #endif
02228     return ary;
02229 }
02230 
02231 #if VM_COLLECT_USAGE_DETAILS
02232 static VALUE usage_analysis_insn_stop(VALUE self);
02233 static VALUE usage_analysis_operand_stop(VALUE self);
02234 static VALUE usage_analysis_register_stop(VALUE self);
02235 #endif
02236 
02237 void
02238 Init_VM(void)
02239 {
02240     VALUE opts;
02241     VALUE klass;
02242     VALUE fcore;
02243 
02244     /* ::RubyVM */
02245     rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02246     rb_undef_alloc_func(rb_cRubyVM);
02247     rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02248 
02249     /* FrozenCore (hidden) */
02250     fcore = rb_class_new(rb_cBasicObject);
02251     RBASIC(fcore)->flags = T_ICLASS;
02252     klass = rb_singleton_class(fcore);
02253     rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02254     rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02255     rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02256     rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02257     rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02258     rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02259     rb_define_method_id(klass, id_core_hash_from_ary, m_core_hash_from_ary, 1);
02260     rb_define_method_id(klass, id_core_hash_merge_ary, m_core_hash_merge_ary, 2);
02261     rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
02262     rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
02263     rb_define_method_id(klass, idProc, rb_block_proc, 0);
02264     rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
02265     rb_obj_freeze(fcore);
02266     rb_gc_register_mark_object(fcore);
02267     rb_mRubyVMFrozenCore = fcore;
02268 
02269     /* ::RubyVM::Env */
02270     rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02271     rb_undef_alloc_func(rb_cEnv);
02272     rb_undef_method(CLASS_OF(rb_cEnv), "new");
02273 
02274     /* ::Thread */
02275     rb_cThread = rb_define_class("Thread", rb_cObject);
02276     rb_undef_alloc_func(rb_cThread);
02277 
02278 #if VM_COLLECT_USAGE_DETAILS
02279     /* ::RubyVM::USAGE_ANALYSIS_* */
02280     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02281     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02282     rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02283 
02284     rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
02285     rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
02286     rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
02287 #endif
02288 
02289     /* ::RubyVM::OPTS, which shows vm build options */
02290     rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02291 
02292 #if   OPT_DIRECT_THREADED_CODE
02293     rb_ary_push(opts, rb_str_new2("direct threaded code"));
02294 #elif OPT_TOKEN_THREADED_CODE
02295     rb_ary_push(opts, rb_str_new2("token threaded code"));
02296 #elif OPT_CALL_THREADED_CODE
02297     rb_ary_push(opts, rb_str_new2("call threaded code"));
02298 #endif
02299 
02300 #if OPT_STACK_CACHING
02301     rb_ary_push(opts, rb_str_new2("stack caching"));
02302 #endif
02303 #if OPT_OPERANDS_UNIFICATION
02304     rb_ary_push(opts, rb_str_new2("operands unification]"));
02305 #endif
02306 #if OPT_INSTRUCTIONS_UNIFICATION
02307     rb_ary_push(opts, rb_str_new2("instructions unification"));
02308 #endif
02309 #if OPT_INLINE_METHOD_CACHE
02310     rb_ary_push(opts, rb_str_new2("inline method cache"));
02311 #endif
02312 #if OPT_BLOCKINLINING
02313     rb_ary_push(opts, rb_str_new2("block inlining"));
02314 #endif
02315 
02316     /* ::RubyVM::INSTRUCTION_NAMES */
02317     rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02318 
02319     /* ::RubyVM::DEFAULT_PARAMS
02320      * This constant variable shows VM's default parameters.
02321      * Note that changing these values does not affect VM exection.
02322      * Specification is not stable and you should not depend on this value.
02323      * Of course, this constant is MRI specific.
02324      */
02325     rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
02326 
02327     /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
02328 #if VMDEBUG
02329     rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02330     rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02331 #else
02332     (void)sdr;
02333     (void)nsdr;
02334 #endif
02335 
02336     /* VM bootstrap: phase 2 */
02337     {
02338         rb_vm_t *vm = ruby_current_vm;
02339         rb_thread_t *th = GET_THREAD();
02340         VALUE filename = rb_str_new2("<main>");
02341         volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02342         volatile VALUE th_self;
02343         rb_iseq_t *iseq;
02344 
02345         /* create vm object */
02346         vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02347 
02348         /* create main thread */
02349         th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02350         rb_iv_set(th_self, "locals", rb_hash_new());
02351         vm->main_thread = th;
02352         vm->running_thread = th;
02353         th->vm = vm;
02354         th->top_wrapper = 0;
02355         th->top_self = rb_vm_top_self();
02356         rb_thread_set_current(th);
02357 
02358         vm->living_threads = st_init_numtable();
02359         st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02360 
02361         rb_gc_register_mark_object(iseqval);
02362         GetISeqPtr(iseqval, iseq);
02363         th->cfp->iseq = iseq;
02364         th->cfp->pc = iseq->iseq_encoded;
02365         th->cfp->self = th->top_self;
02366         th->cfp->klass = Qnil;
02367 
02368         /*
02369          * The Binding of the top level scope
02370          */
02371         rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02372     }
02373     vm_init_redefined_flag();
02374 
02375     /* vm_backtrace.c */
02376     Init_vm_backtrace();
02377     VM_PROFILE_ATEXIT();
02378 }
02379 
02380 void
02381 rb_vm_set_progname(VALUE filename)
02382 {
02383     rb_thread_t *th = GET_VM()->main_thread;
02384     rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02385     --cfp;
02386     cfp->iseq->location.path = filename;
02387 }
02388 
02389 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02390 struct rb_objspace *rb_objspace_alloc(void);
02391 #endif
02392 
02393 void
02394 Init_BareVM(void)
02395 {
02396     /* VM bootstrap: phase 1 */
02397     rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
02398     rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
02399     if (!vm || !th) {
02400         fprintf(stderr, "[FATAL] failed to allocate memory\n");
02401         exit(EXIT_FAILURE);
02402     }
02403     MEMZERO(th, rb_thread_t, 1);
02404     rb_thread_set_current_raw(th);
02405 
02406     vm_init2(vm);
02407 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02408     vm->objspace = rb_objspace_alloc();
02409 #endif
02410     ruby_current_vm = vm;
02411 
02412     Init_native_thread();
02413     th->vm = vm;
02414     th_init(th, 0);
02415     ruby_thread_init_stack(th);
02416 }
02417 
02418 /* top self */
02419 
02420 static VALUE
02421 main_to_s(VALUE obj)
02422 {
02423     return rb_str_new2("main");
02424 }
02425 
02426 VALUE
02427 rb_vm_top_self(void)
02428 {
02429     return GET_VM()->top_self;
02430 }
02431 
02432 void
02433 Init_top_self(void)
02434 {
02435     rb_vm_t *vm = GET_VM();
02436 
02437     vm->top_self = rb_obj_alloc(rb_cObject);
02438     rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02439     rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
02440 
02441     /* initialize mark object array */
02442     vm->mark_object_ary = rb_ary_tmp_new(1);
02443 }
02444 
02445 VALUE *
02446 ruby_vm_verbose_ptr(rb_vm_t *vm)
02447 {
02448     return &vm->verbose;
02449 }
02450 
02451 VALUE *
02452 ruby_vm_debug_ptr(rb_vm_t *vm)
02453 {
02454     return &vm->debug;
02455 }
02456 
02457 VALUE *
02458 rb_ruby_verbose_ptr(void)
02459 {
02460     return ruby_vm_verbose_ptr(GET_VM());
02461 }
02462 
02463 VALUE *
02464 rb_ruby_debug_ptr(void)
02465 {
02466     return ruby_vm_debug_ptr(GET_VM());
02467 }
02468 
02469 /* iseq.c */
02470 VALUE insn_operand_intern(rb_iseq_t *iseq,
02471                           VALUE insn, int op_no, VALUE op,
02472                           int len, size_t pos, VALUE *pnop, VALUE child);
02473 
02474 #if VM_COLLECT_USAGE_DETAILS
02475 
02476 #define HASH_ASET(h, k, v) st_insert(RHASH_TBL(h), (st_data_t)(k), (st_data_t)(v))
02477 
02478 /* uh = {
02479  *   insn(Fixnum) => ihash(Hash)
02480  * }
02481  * ihash = {
02482  *   -1(Fixnum) => count,      # insn usage
02483  *    0(Fixnum) => ophash,     # operand usage
02484  * }
02485  * ophash = {
02486  *   val(interned string) => count(Fixnum)
02487  * }
02488  */
02489 static void
02490 vm_analysis_insn(int insn)
02491 {
02492     ID usage_hash;
02493     ID bigram_hash;
02494     static int prev_insn = -1;
02495 
02496     VALUE uh;
02497     VALUE ihash;
02498     VALUE cv;
02499 
02500     CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02501     CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
02502     uh = rb_const_get(rb_cRubyVM, usage_hash);
02503     if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02504         ihash = rb_hash_new();
02505         HASH_ASET(uh, INT2FIX(insn), ihash);
02506     }
02507     if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
02508         cv = INT2FIX(0);
02509     }
02510     HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
02511 
02512     /* calc bigram */
02513     if (prev_insn != -1) {
02514         VALUE bi;
02515         VALUE ary[2];
02516         VALUE cv;
02517 
02518         ary[0] = INT2FIX(prev_insn);
02519         ary[1] = INT2FIX(insn);
02520         bi = rb_ary_new4(2, &ary[0]);
02521 
02522         uh = rb_const_get(rb_cRubyVM, bigram_hash);
02523         if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
02524             cv = INT2FIX(0);
02525         }
02526         HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
02527     }
02528     prev_insn = insn;
02529 }
02530 
02531 static void
02532 vm_analysis_operand(int insn, int n, VALUE op)
02533 {
02534     ID usage_hash;
02535 
02536     VALUE uh;
02537     VALUE ihash;
02538     VALUE ophash;
02539     VALUE valstr;
02540     VALUE cv;
02541 
02542     CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02543 
02544     uh = rb_const_get(rb_cRubyVM, usage_hash);
02545     if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02546         ihash = rb_hash_new();
02547         HASH_ASET(uh, INT2FIX(insn), ihash);
02548     }
02549     if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
02550         ophash = rb_hash_new();
02551         HASH_ASET(ihash, INT2FIX(n), ophash);
02552     }
02553     /* intern */
02554     valstr = insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
02555 
02556     /* set count */
02557     if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
02558         cv = INT2FIX(0);
02559     }
02560     HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
02561 }
02562 
02563 static void
02564 vm_analysis_register(int reg, int isset)
02565 {
02566     ID usage_hash;
02567     VALUE uh;
02568     VALUE valstr;
02569     static const char regstrs[][5] = {
02570         "pc",                   /* 0 */
02571         "sp",                   /* 1 */
02572         "ep",                   /* 2 */
02573         "cfp",                  /* 3 */
02574         "self",                 /* 4 */
02575         "iseq",                 /* 5 */
02576     };
02577     static const char getsetstr[][4] = {
02578         "get",
02579         "set",
02580     };
02581     static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
02582 
02583     VALUE cv;
02584 
02585     CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
02586     if (syms[0] == 0) {
02587         char buff[0x10];
02588         int i;
02589 
02590         for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
02591             int j;
02592             for (j = 0; j < 2; j++) {
02593                 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
02594                 syms[i][j] = ID2SYM(rb_intern(buff));
02595             }
02596         }
02597     }
02598     valstr = syms[reg][isset];
02599 
02600     uh = rb_const_get(rb_cRubyVM, usage_hash);
02601     if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
02602         cv = INT2FIX(0);
02603     }
02604     HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
02605 }
02606 
02607 #undef HASH_ASET
02608 
02609 void (*ruby_vm_collect_usage_func_insn)(int insn) = vm_analysis_insn;
02610 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = vm_analysis_operand;
02611 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = vm_analysis_register;
02612 
02613 /* :nodoc: */
02614 static VALUE
02615 usage_analysis_insn_stop(VALUE self)
02616 {
02617     ruby_vm_collect_usage_func_insn = 0;
02618     return Qnil;
02619 }
02620 
02621 /* :nodoc: */
02622 static VALUE
02623 usage_analysis_operand_stop(VALUE self)
02624 {
02625     ruby_vm_collect_usage_func_operand = 0;
02626     return Qnil;
02627 }
02628 
02629 /* :nodoc: */
02630 static VALUE
02631 usage_analysis_register_stop(VALUE self)
02632 {
02633     ruby_vm_collect_usage_func_register = 0;
02634     return Qnil;
02635 }
02636 
02637 #else
02638 
02639 void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
02640 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
02641 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
02642 
02643 #endif
02644 
02645 #if VM_COLLECT_USAGE_DETAILS
02646 /* @param insn instruction number */
02647 static void
02648 vm_collect_usage_insn(int insn)
02649 {
02650     if (RUBY_DTRACE_INSN_ENABLED()) {
02651         RUBY_DTRACE_INSN(rb_insns_name(insn));
02652     }
02653     if (ruby_vm_collect_usage_func_insn)
02654         (*ruby_vm_collect_usage_func_insn)(insn);
02655 }
02656 
02657 /* @param insn instruction number
02658  * @param n    n-th operand
02659  * @param op   operand value
02660  */
02661 static void
02662 vm_collect_usage_operand(int insn, int n, VALUE op)
02663 {
02664     if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
02665         VALUE valstr;
02666 
02667         valstr = insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
02668 
02669         RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
02670         RB_GC_GUARD(valstr);
02671     }
02672     if (ruby_vm_collect_usage_func_operand)
02673         (*ruby_vm_collect_usage_func_operand)(insn, n, op);
02674 }
02675 
02676 /* @param reg register id. see code of vm_analysis_register() */
02677 /* @param iseset 0: read, 1: write */
02678 static void
02679 vm_collect_usage_register(int reg, int isset)
02680 {
02681     if (ruby_vm_collect_usage_func_register)
02682         (*ruby_vm_collect_usage_func_register)(reg, isset);
02683 }
02684 #endif
02685 
02686