Ruby
2.0.0p247(2013-06-27revision41674)
|
00001 /********************************************************************** 00002 00003 cont.c - 00004 00005 $Author: nobu $ 00006 created at: Thu May 23 09:03:43 2007 00007 00008 Copyright (C) 2007 Koichi Sasada 00009 00010 **********************************************************************/ 00011 00012 #include "ruby/ruby.h" 00013 #include "internal.h" 00014 #include "vm_core.h" 00015 #include "gc.h" 00016 #include "eval_intern.h" 00017 00018 #if ((defined(_WIN32) && _WIN32_WINNT >= 0x0400) || (defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT))) && !defined(__NetBSD__) && !defined(__sun) && !defined(__ia64) && !defined(FIBER_USE_NATIVE) 00019 #define FIBER_USE_NATIVE 1 00020 00021 /* FIBER_USE_NATIVE enables Fiber performance improvement using system 00022 * dependent method such as make/setcontext on POSIX system or 00023 * CreateFiber() API on Windows. 00024 * This hack make Fiber context switch faster (x2 or more). 00025 * However, it decrease maximum number of Fiber. For example, on the 00026 * 32bit POSIX OS, ten or twenty thousands Fiber can be created. 00027 * 00028 * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9" 00029 * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese). 00030 */ 00031 00032 /* On our experience, NetBSD doesn't support using setcontext() and pthread 00033 * simultaneously. This is because pthread_self(), TLS and other information 00034 * are represented by stack pointer (higher bits of stack pointer). 00035 * TODO: check such constraint on configure. 00036 */ 00037 #elif !defined(FIBER_USE_NATIVE) 00038 #define FIBER_USE_NATIVE 0 00039 #endif 00040 00041 #if FIBER_USE_NATIVE 00042 #ifndef _WIN32 00043 #include <unistd.h> 00044 #include <sys/mman.h> 00045 #include <ucontext.h> 00046 #endif 00047 #define RB_PAGE_SIZE (pagesize) 00048 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1)) 00049 static long pagesize; 00050 #endif /*FIBER_USE_NATIVE*/ 00051 00052 #define CAPTURE_JUST_VALID_VM_STACK 1 00053 00054 enum context_type { 00055 CONTINUATION_CONTEXT = 0, 00056 FIBER_CONTEXT = 1, 00057 ROOT_FIBER_CONTEXT = 2 00058 }; 00059 00060 typedef struct rb_context_struct { 00061 enum context_type type; 00062 VALUE self; 00063 int argc; 00064 VALUE value; 00065 VALUE *vm_stack; 00066 #ifdef CAPTURE_JUST_VALID_VM_STACK 00067 size_t vm_stack_slen; /* length of stack (head of th->stack) */ 00068 size_t vm_stack_clen; /* length of control frames (tail of th->stack) */ 00069 #endif 00070 VALUE *machine_stack; 00071 VALUE *machine_stack_src; 00072 #ifdef __ia64 00073 VALUE *machine_register_stack; 00074 VALUE *machine_register_stack_src; 00075 int machine_register_stack_size; 00076 #endif 00077 rb_thread_t saved_thread; 00078 rb_jmpbuf_t jmpbuf; 00079 size_t machine_stack_size; 00080 } rb_context_t; 00081 00082 enum fiber_status { 00083 CREATED, 00084 RUNNING, 00085 TERMINATED 00086 }; 00087 00088 #if FIBER_USE_NATIVE && !defined(_WIN32) 00089 #define MAX_MAHINE_STACK_CACHE 10 00090 static int machine_stack_cache_index = 0; 00091 typedef struct machine_stack_cache_struct { 00092 void *ptr; 00093 size_t size; 00094 } machine_stack_cache_t; 00095 static machine_stack_cache_t machine_stack_cache[MAX_MAHINE_STACK_CACHE]; 00096 static machine_stack_cache_t terminated_machine_stack; 00097 #endif 00098 00099 typedef struct rb_fiber_struct { 00100 rb_context_t cont; 00101 VALUE prev; 00102 enum fiber_status status; 00103 struct rb_fiber_struct *prev_fiber; 00104 struct rb_fiber_struct *next_fiber; 00105 /* If a fiber invokes "transfer", 00106 * then this fiber can't "resume" any more after that. 00107 * You shouldn't mix "transfer" and "resume". 00108 */ 00109 int transfered; 00110 00111 #if FIBER_USE_NATIVE 00112 #ifdef _WIN32 00113 void *fib_handle; 00114 #else 00115 ucontext_t context; 00116 #endif 00117 #endif 00118 } rb_fiber_t; 00119 00120 static const rb_data_type_t cont_data_type, fiber_data_type; 00121 static VALUE rb_cContinuation; 00122 static VALUE rb_cFiber; 00123 static VALUE rb_eFiberError; 00124 00125 #define GetContPtr(obj, ptr) \ 00126 TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr)) 00127 00128 #define GetFiberPtr(obj, ptr) do {\ 00129 TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \ 00130 if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \ 00131 } while (0) 00132 00133 NOINLINE(static VALUE cont_capture(volatile int *stat)); 00134 00135 #define THREAD_MUST_BE_RUNNING(th) do { \ 00136 if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \ 00137 } while (0) 00138 00139 static void 00140 cont_mark(void *ptr) 00141 { 00142 RUBY_MARK_ENTER("cont"); 00143 if (ptr) { 00144 rb_context_t *cont = ptr; 00145 rb_gc_mark(cont->value); 00146 rb_thread_mark(&cont->saved_thread); 00147 rb_gc_mark(cont->saved_thread.self); 00148 00149 if (cont->vm_stack) { 00150 #ifdef CAPTURE_JUST_VALID_VM_STACK 00151 rb_gc_mark_locations(cont->vm_stack, 00152 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen); 00153 #else 00154 rb_gc_mark_localtion(cont->vm_stack, 00155 cont->vm_stack, cont->saved_thread.stack_size); 00156 #endif 00157 } 00158 00159 if (cont->machine_stack) { 00160 if (cont->type == CONTINUATION_CONTEXT) { 00161 /* cont */ 00162 rb_gc_mark_locations(cont->machine_stack, 00163 cont->machine_stack + cont->machine_stack_size); 00164 } 00165 else { 00166 /* fiber */ 00167 rb_thread_t *th; 00168 rb_fiber_t *fib = (rb_fiber_t*)cont; 00169 GetThreadPtr(cont->saved_thread.self, th); 00170 if ((th->fiber != cont->self) && fib->status == RUNNING) { 00171 rb_gc_mark_locations(cont->machine_stack, 00172 cont->machine_stack + cont->machine_stack_size); 00173 } 00174 } 00175 } 00176 #ifdef __ia64 00177 if (cont->machine_register_stack) { 00178 rb_gc_mark_locations(cont->machine_register_stack, 00179 cont->machine_register_stack + cont->machine_register_stack_size); 00180 } 00181 #endif 00182 } 00183 RUBY_MARK_LEAVE("cont"); 00184 } 00185 00186 static void 00187 cont_free(void *ptr) 00188 { 00189 RUBY_FREE_ENTER("cont"); 00190 if (ptr) { 00191 rb_context_t *cont = ptr; 00192 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout); 00193 #if FIBER_USE_NATIVE 00194 if (cont->type == CONTINUATION_CONTEXT) { 00195 /* cont */ 00196 RUBY_FREE_UNLESS_NULL(cont->machine_stack); 00197 } 00198 else { 00199 /* fiber */ 00200 #ifdef _WIN32 00201 if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) { 00202 /* don't delete root fiber handle */ 00203 rb_fiber_t *fib = (rb_fiber_t*)cont; 00204 if (fib->fib_handle) { 00205 DeleteFiber(fib->fib_handle); 00206 } 00207 } 00208 #else /* not WIN32 */ 00209 if (GET_THREAD()->fiber != cont->self) { 00210 rb_fiber_t *fib = (rb_fiber_t*)cont; 00211 if (fib->context.uc_stack.ss_sp) { 00212 if (cont->type == ROOT_FIBER_CONTEXT) { 00213 rb_bug("Illegal root fiber parameter"); 00214 } 00215 munmap((void*)fib->context.uc_stack.ss_sp, fib->context.uc_stack.ss_size); 00216 } 00217 } 00218 else { 00219 /* It may reached here when finalize */ 00220 /* TODO examine whether it is a bug */ 00221 /* rb_bug("cont_free: release self"); */ 00222 } 00223 #endif 00224 } 00225 #else /* not FIBER_USE_NATIVE */ 00226 RUBY_FREE_UNLESS_NULL(cont->machine_stack); 00227 #endif 00228 #ifdef __ia64 00229 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack); 00230 #endif 00231 RUBY_FREE_UNLESS_NULL(cont->vm_stack); 00232 00233 /* free rb_cont_t or rb_fiber_t */ 00234 ruby_xfree(ptr); 00235 } 00236 RUBY_FREE_LEAVE("cont"); 00237 } 00238 00239 static size_t 00240 cont_memsize(const void *ptr) 00241 { 00242 const rb_context_t *cont = ptr; 00243 size_t size = 0; 00244 if (cont) { 00245 size = sizeof(*cont); 00246 if (cont->vm_stack) { 00247 #ifdef CAPTURE_JUST_VALID_VM_STACK 00248 size_t n = (cont->vm_stack_slen + cont->vm_stack_clen); 00249 #else 00250 size_t n = cont->saved_thread.stack_size; 00251 #endif 00252 size += n * sizeof(*cont->vm_stack); 00253 } 00254 00255 if (cont->machine_stack) { 00256 size += cont->machine_stack_size * sizeof(*cont->machine_stack); 00257 } 00258 #ifdef __ia64 00259 if (cont->machine_register_stack) { 00260 size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack); 00261 } 00262 #endif 00263 } 00264 return size; 00265 } 00266 00267 static void 00268 fiber_mark(void *ptr) 00269 { 00270 RUBY_MARK_ENTER("cont"); 00271 if (ptr) { 00272 rb_fiber_t *fib = ptr; 00273 rb_gc_mark(fib->prev); 00274 cont_mark(&fib->cont); 00275 } 00276 RUBY_MARK_LEAVE("cont"); 00277 } 00278 00279 static void 00280 fiber_link_join(rb_fiber_t *fib) 00281 { 00282 VALUE current_fibval = rb_fiber_current(); 00283 rb_fiber_t *current_fib; 00284 GetFiberPtr(current_fibval, current_fib); 00285 00286 /* join fiber link */ 00287 fib->next_fiber = current_fib->next_fiber; 00288 fib->prev_fiber = current_fib; 00289 current_fib->next_fiber->prev_fiber = fib; 00290 current_fib->next_fiber = fib; 00291 } 00292 00293 static void 00294 fiber_link_remove(rb_fiber_t *fib) 00295 { 00296 fib->prev_fiber->next_fiber = fib->next_fiber; 00297 fib->next_fiber->prev_fiber = fib->prev_fiber; 00298 } 00299 00300 static void 00301 fiber_free(void *ptr) 00302 { 00303 RUBY_FREE_ENTER("fiber"); 00304 if (ptr) { 00305 rb_fiber_t *fib = ptr; 00306 if (fib->cont.type != ROOT_FIBER_CONTEXT && 00307 fib->cont.saved_thread.local_storage) { 00308 st_free_table(fib->cont.saved_thread.local_storage); 00309 } 00310 fiber_link_remove(fib); 00311 00312 cont_free(&fib->cont); 00313 } 00314 RUBY_FREE_LEAVE("fiber"); 00315 } 00316 00317 static size_t 00318 fiber_memsize(const void *ptr) 00319 { 00320 const rb_fiber_t *fib = ptr; 00321 size_t size = 0; 00322 if (ptr) { 00323 size = sizeof(*fib); 00324 if (fib->cont.type != ROOT_FIBER_CONTEXT) { 00325 size += st_memsize(fib->cont.saved_thread.local_storage); 00326 } 00327 size += cont_memsize(&fib->cont); 00328 } 00329 return size; 00330 } 00331 00332 VALUE 00333 rb_obj_is_fiber(VALUE obj) 00334 { 00335 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) { 00336 return Qtrue; 00337 } 00338 else { 00339 return Qfalse; 00340 } 00341 } 00342 00343 static void 00344 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont) 00345 { 00346 size_t size; 00347 00348 SET_MACHINE_STACK_END(&th->machine_stack_end); 00349 #ifdef __ia64 00350 th->machine_register_stack_end = rb_ia64_bsp(); 00351 #endif 00352 00353 if (th->machine_stack_start > th->machine_stack_end) { 00354 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end; 00355 cont->machine_stack_src = th->machine_stack_end; 00356 } 00357 else { 00358 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start; 00359 cont->machine_stack_src = th->machine_stack_start; 00360 } 00361 00362 if (cont->machine_stack) { 00363 REALLOC_N(cont->machine_stack, VALUE, size); 00364 } 00365 else { 00366 cont->machine_stack = ALLOC_N(VALUE, size); 00367 } 00368 00369 FLUSH_REGISTER_WINDOWS; 00370 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size); 00371 00372 #ifdef __ia64 00373 rb_ia64_flushrs(); 00374 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start; 00375 cont->machine_register_stack_src = th->machine_register_stack_start; 00376 if (cont->machine_register_stack) { 00377 REALLOC_N(cont->machine_register_stack, VALUE, size); 00378 } 00379 else { 00380 cont->machine_register_stack = ALLOC_N(VALUE, size); 00381 } 00382 00383 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size); 00384 #endif 00385 } 00386 00387 static const rb_data_type_t cont_data_type = { 00388 "continuation", 00389 {cont_mark, cont_free, cont_memsize,}, 00390 }; 00391 00392 static void 00393 cont_save_thread(rb_context_t *cont, rb_thread_t *th) 00394 { 00395 /* save thread context */ 00396 cont->saved_thread = *th; 00397 /* saved_thread->machine_stack_(start|end) should be NULL */ 00398 /* because it may happen GC afterward */ 00399 cont->saved_thread.machine_stack_start = 0; 00400 cont->saved_thread.machine_stack_end = 0; 00401 #ifdef __ia64 00402 cont->saved_thread.machine_register_stack_start = 0; 00403 cont->saved_thread.machine_register_stack_end = 0; 00404 #endif 00405 } 00406 00407 static void 00408 cont_init(rb_context_t *cont, rb_thread_t *th) 00409 { 00410 /* save thread context */ 00411 cont_save_thread(cont, th); 00412 cont->saved_thread.local_storage = 0; 00413 } 00414 00415 static rb_context_t * 00416 cont_new(VALUE klass) 00417 { 00418 rb_context_t *cont; 00419 volatile VALUE contval; 00420 rb_thread_t *th = GET_THREAD(); 00421 00422 THREAD_MUST_BE_RUNNING(th); 00423 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont); 00424 cont->self = contval; 00425 cont_init(cont, th); 00426 return cont; 00427 } 00428 00429 static VALUE 00430 cont_capture(volatile int *stat) 00431 { 00432 rb_context_t *cont; 00433 rb_thread_t *th = GET_THREAD(), *sth; 00434 volatile VALUE contval; 00435 00436 THREAD_MUST_BE_RUNNING(th); 00437 rb_vm_stack_to_heap(th); 00438 cont = cont_new(rb_cContinuation); 00439 contval = cont->self; 00440 sth = &cont->saved_thread; 00441 00442 #ifdef CAPTURE_JUST_VALID_VM_STACK 00443 cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack; 00444 cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp; 00445 cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen); 00446 MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen); 00447 MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen); 00448 #else 00449 cont->vm_stack = ALLOC_N(VALUE, th->stack_size); 00450 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size); 00451 #endif 00452 sth->stack = 0; 00453 00454 cont_save_machine_stack(th, cont); 00455 00456 if (ruby_setjmp(cont->jmpbuf)) { 00457 volatile VALUE value; 00458 00459 value = cont->value; 00460 if (cont->argc == -1) rb_exc_raise(value); 00461 cont->value = Qnil; 00462 *stat = 1; 00463 return value; 00464 } 00465 else { 00466 *stat = 0; 00467 return contval; 00468 } 00469 } 00470 00471 static void 00472 cont_restore_thread(rb_context_t *cont) 00473 { 00474 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread; 00475 00476 /* restore thread context */ 00477 if (cont->type == CONTINUATION_CONTEXT) { 00478 /* continuation */ 00479 VALUE fib; 00480 00481 th->fiber = sth->fiber; 00482 fib = th->fiber ? th->fiber : th->root_fiber; 00483 00484 if (fib) { 00485 rb_fiber_t *fcont; 00486 GetFiberPtr(fib, fcont); 00487 th->stack_size = fcont->cont.saved_thread.stack_size; 00488 th->stack = fcont->cont.saved_thread.stack; 00489 } 00490 #ifdef CAPTURE_JUST_VALID_VM_STACK 00491 MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen); 00492 MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen, 00493 cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen); 00494 #else 00495 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size); 00496 #endif 00497 } 00498 else { 00499 /* fiber */ 00500 th->stack = sth->stack; 00501 th->stack_size = sth->stack_size; 00502 th->local_storage = sth->local_storage; 00503 th->fiber = cont->self; 00504 } 00505 00506 th->cfp = sth->cfp; 00507 th->safe_level = sth->safe_level; 00508 th->raised_flag = sth->raised_flag; 00509 th->state = sth->state; 00510 th->status = sth->status; 00511 th->tag = sth->tag; 00512 th->protect_tag = sth->protect_tag; 00513 th->errinfo = sth->errinfo; 00514 th->first_proc = sth->first_proc; 00515 th->root_lep = sth->root_lep; 00516 th->root_svar = sth->root_svar; 00517 } 00518 00519 #if FIBER_USE_NATIVE 00520 #ifdef _WIN32 00521 static void 00522 fiber_set_stack_location(void) 00523 { 00524 rb_thread_t *th = GET_THREAD(); 00525 VALUE *ptr; 00526 00527 SET_MACHINE_STACK_END(&ptr); 00528 th->machine_stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE)); 00529 } 00530 00531 static VOID CALLBACK 00532 fiber_entry(void *arg) 00533 { 00534 fiber_set_stack_location(); 00535 rb_fiber_start(); 00536 } 00537 #else /* _WIN32 */ 00538 00539 /* 00540 * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL 00541 * if MAP_STACK is passed. 00542 * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755 00543 */ 00544 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) 00545 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK) 00546 #else 00547 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON) 00548 #endif 00549 00550 static char* 00551 fiber_machine_stack_alloc(size_t size) 00552 { 00553 char *ptr; 00554 00555 if (machine_stack_cache_index > 0) { 00556 if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) { 00557 ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr; 00558 machine_stack_cache_index--; 00559 machine_stack_cache[machine_stack_cache_index].ptr = NULL; 00560 machine_stack_cache[machine_stack_cache_index].size = 0; 00561 } 00562 else{ 00563 /* TODO handle multiple machine stack size */ 00564 rb_bug("machine_stack_cache size is not canonicalized"); 00565 } 00566 } 00567 else { 00568 void *page; 00569 STACK_GROW_DIR_DETECTION; 00570 00571 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0); 00572 if (ptr == MAP_FAILED) { 00573 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber"); 00574 } 00575 00576 /* guard page setup */ 00577 page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0); 00578 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) { 00579 rb_raise(rb_eFiberError, "mprotect failed"); 00580 } 00581 } 00582 00583 return ptr; 00584 } 00585 #endif 00586 00587 static void 00588 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size) 00589 { 00590 rb_thread_t *sth = &fib->cont.saved_thread; 00591 00592 #ifdef _WIN32 00593 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL); 00594 if (!fib->fib_handle) { 00595 /* try to release unnecessary fibers & retry to create */ 00596 rb_gc(); 00597 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL); 00598 if (!fib->fib_handle) { 00599 rb_raise(rb_eFiberError, "can't create fiber"); 00600 } 00601 } 00602 sth->machine_stack_maxsize = size; 00603 #else /* not WIN32 */ 00604 ucontext_t *context = &fib->context; 00605 char *ptr; 00606 STACK_GROW_DIR_DETECTION; 00607 00608 getcontext(context); 00609 ptr = fiber_machine_stack_alloc(size); 00610 context->uc_link = NULL; 00611 context->uc_stack.ss_sp = ptr; 00612 context->uc_stack.ss_size = size; 00613 makecontext(context, rb_fiber_start, 0); 00614 sth->machine_stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size)); 00615 sth->machine_stack_maxsize = size - RB_PAGE_SIZE; 00616 #endif 00617 #ifdef __ia64 00618 sth->machine_register_stack_maxsize = sth->machine_stack_maxsize; 00619 #endif 00620 } 00621 00622 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)); 00623 00624 static void 00625 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib) 00626 { 00627 rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread; 00628 00629 if (newfib->status != RUNNING) { 00630 fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size); 00631 } 00632 00633 /* restore thread context */ 00634 cont_restore_thread(&newfib->cont); 00635 th->machine_stack_maxsize = sth->machine_stack_maxsize; 00636 if (sth->machine_stack_end && (newfib != oldfib)) { 00637 rb_bug("fiber_setcontext: sth->machine_stack_end has non zero value"); 00638 } 00639 00640 /* save oldfib's machine stack */ 00641 if (oldfib->status != TERMINATED) { 00642 STACK_GROW_DIR_DETECTION; 00643 SET_MACHINE_STACK_END(&th->machine_stack_end); 00644 if (STACK_DIR_UPPER(0, 1)) { 00645 oldfib->cont.machine_stack_size = th->machine_stack_start - th->machine_stack_end; 00646 oldfib->cont.machine_stack = th->machine_stack_end; 00647 } 00648 else { 00649 oldfib->cont.machine_stack_size = th->machine_stack_end - th->machine_stack_start; 00650 oldfib->cont.machine_stack = th->machine_stack_start; 00651 } 00652 } 00653 /* exchange machine_stack_start between oldfib and newfib */ 00654 oldfib->cont.saved_thread.machine_stack_start = th->machine_stack_start; 00655 th->machine_stack_start = sth->machine_stack_start; 00656 /* oldfib->machine_stack_end should be NULL */ 00657 oldfib->cont.saved_thread.machine_stack_end = 0; 00658 #ifndef _WIN32 00659 if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) { 00660 rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL"); 00661 } 00662 #endif 00663 00664 /* swap machine context */ 00665 #ifdef _WIN32 00666 SwitchToFiber(newfib->fib_handle); 00667 #else 00668 swapcontext(&oldfib->context, &newfib->context); 00669 #endif 00670 } 00671 #endif 00672 00673 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *))); 00674 00675 static void 00676 cont_restore_1(rb_context_t *cont) 00677 { 00678 cont_restore_thread(cont); 00679 00680 /* restore machine stack */ 00681 #ifdef _M_AMD64 00682 { 00683 /* workaround for x64 SEH */ 00684 jmp_buf buf; 00685 setjmp(buf); 00686 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame = 00687 ((_JUMP_BUFFER*)(&buf))->Frame; 00688 } 00689 #endif 00690 if (cont->machine_stack_src) { 00691 FLUSH_REGISTER_WINDOWS; 00692 MEMCPY(cont->machine_stack_src, cont->machine_stack, 00693 VALUE, cont->machine_stack_size); 00694 } 00695 00696 #ifdef __ia64 00697 if (cont->machine_register_stack_src) { 00698 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack, 00699 VALUE, cont->machine_register_stack_size); 00700 } 00701 #endif 00702 00703 ruby_longjmp(cont->jmpbuf, 1); 00704 } 00705 00706 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *))); 00707 00708 #ifdef __ia64 00709 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4 00710 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4 00711 static volatile int C(a), C(b), C(c), C(d), C(e); 00712 static volatile int C(f), C(g), C(h), C(i), C(j); 00713 static volatile int C(k), C(l), C(m), C(n), C(o); 00714 static volatile int C(p), C(q), C(r), C(s), C(t); 00715 #if 0 00716 {/* the above lines make cc-mode.el confused so much */} 00717 #endif 00718 int rb_dummy_false = 0; 00719 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *))); 00720 static void 00721 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp) 00722 { 00723 if (rb_dummy_false) { 00724 /* use registers as much as possible */ 00725 E(a) = E(b) = E(c) = E(d) = E(e) = 00726 E(f) = E(g) = E(h) = E(i) = E(j) = 00727 E(k) = E(l) = E(m) = E(n) = E(o) = 00728 E(p) = E(q) = E(r) = E(s) = E(t) = 0; 00729 E(a) = E(b) = E(c) = E(d) = E(e) = 00730 E(f) = E(g) = E(h) = E(i) = E(j) = 00731 E(k) = E(l) = E(m) = E(n) = E(o) = 00732 E(p) = E(q) = E(r) = E(s) = E(t) = 0; 00733 } 00734 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) { 00735 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp()); 00736 } 00737 cont_restore_0(cont, vp); 00738 } 00739 #undef C 00740 #undef E 00741 #endif 00742 00743 static void 00744 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame) 00745 { 00746 if (cont->machine_stack_src) { 00747 #ifdef HAVE_ALLOCA 00748 #define STACK_PAD_SIZE 1 00749 #else 00750 #define STACK_PAD_SIZE 1024 00751 #endif 00752 VALUE space[STACK_PAD_SIZE]; 00753 00754 #if !STACK_GROW_DIRECTION 00755 if (addr_in_prev_frame > &space[0]) { 00756 /* Stack grows downward */ 00757 #endif 00758 #if STACK_GROW_DIRECTION <= 0 00759 volatile VALUE *const end = cont->machine_stack_src; 00760 if (&space[0] > end) { 00761 # ifdef HAVE_ALLOCA 00762 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end); 00763 space[0] = *sp; 00764 # else 00765 cont_restore_0(cont, &space[0]); 00766 # endif 00767 } 00768 #endif 00769 #if !STACK_GROW_DIRECTION 00770 } 00771 else { 00772 /* Stack grows upward */ 00773 #endif 00774 #if STACK_GROW_DIRECTION >= 0 00775 volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size; 00776 if (&space[STACK_PAD_SIZE] < end) { 00777 # ifdef HAVE_ALLOCA 00778 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]); 00779 space[0] = *sp; 00780 # else 00781 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]); 00782 # endif 00783 } 00784 #endif 00785 #if !STACK_GROW_DIRECTION 00786 } 00787 #endif 00788 } 00789 cont_restore_1(cont); 00790 } 00791 #ifdef __ia64 00792 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp()) 00793 #endif 00794 00795 /* 00796 * Document-class: Continuation 00797 * 00798 * Continuation objects are generated by Kernel#callcc, 00799 * after having +require+d <i>continuation</i>. They hold 00800 * a return address and execution context, allowing a nonlocal return 00801 * to the end of the <code>callcc</code> block from anywhere within a 00802 * program. Continuations are somewhat analogous to a structured 00803 * version of C's <code>setjmp/longjmp</code> (although they contain 00804 * more state, so you might consider them closer to threads). 00805 * 00806 * For instance: 00807 * 00808 * require "continuation" 00809 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ] 00810 * callcc{|cc| $cc = cc} 00811 * puts(message = arr.shift) 00812 * $cc.call unless message =~ /Max/ 00813 * 00814 * <em>produces:</em> 00815 * 00816 * Freddie 00817 * Herbie 00818 * Ron 00819 * Max 00820 * 00821 * This (somewhat contrived) example allows the inner loop to abandon 00822 * processing early: 00823 * 00824 * require "continuation" 00825 * callcc {|cont| 00826 * for i in 0..4 00827 * print "\n#{i}: " 00828 * for j in i*5...(i+1)*5 00829 * cont.call() if j == 17 00830 * printf "%3d", j 00831 * end 00832 * end 00833 * } 00834 * puts 00835 * 00836 * <em>produces:</em> 00837 * 00838 * 0: 0 1 2 3 4 00839 * 1: 5 6 7 8 9 00840 * 2: 10 11 12 13 14 00841 * 3: 15 16 00842 */ 00843 00844 /* 00845 * call-seq: 00846 * callcc {|cont| block } -> obj 00847 * 00848 * Generates a Continuation object, which it passes to 00849 * the associated block. You need to <code>require 00850 * 'continuation'</code> before using this method. Performing a 00851 * <em>cont</em><code>.call</code> will cause the #callcc 00852 * to return (as will falling through the end of the block). The 00853 * value returned by the #callcc is the value of the 00854 * block, or the value passed to <em>cont</em><code>.call</code>. See 00855 * class Continuation for more details. Also see 00856 * Kernel#throw for an alternative mechanism for 00857 * unwinding a call stack. 00858 */ 00859 00860 static VALUE 00861 rb_callcc(VALUE self) 00862 { 00863 volatile int called; 00864 volatile VALUE val = cont_capture(&called); 00865 00866 if (called) { 00867 return val; 00868 } 00869 else { 00870 return rb_yield(val); 00871 } 00872 } 00873 00874 static VALUE 00875 make_passing_arg(int argc, VALUE *argv) 00876 { 00877 switch (argc) { 00878 case 0: 00879 return Qnil; 00880 case 1: 00881 return argv[0]; 00882 default: 00883 return rb_ary_new4(argc, argv); 00884 } 00885 } 00886 00887 /* 00888 * call-seq: 00889 * cont.call(args, ...) 00890 * cont[args, ...] 00891 * 00892 * Invokes the continuation. The program continues from the end of the 00893 * <code>callcc</code> block. If no arguments are given, the original 00894 * <code>callcc</code> returns <code>nil</code>. If one argument is 00895 * given, <code>callcc</code> returns it. Otherwise, an array 00896 * containing <i>args</i> is returned. 00897 * 00898 * callcc {|cont| cont.call } #=> nil 00899 * callcc {|cont| cont.call 1 } #=> 1 00900 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3] 00901 */ 00902 00903 static VALUE 00904 rb_cont_call(int argc, VALUE *argv, VALUE contval) 00905 { 00906 rb_context_t *cont; 00907 rb_thread_t *th = GET_THREAD(); 00908 GetContPtr(contval, cont); 00909 00910 if (cont->saved_thread.self != th->self) { 00911 rb_raise(rb_eRuntimeError, "continuation called across threads"); 00912 } 00913 if (cont->saved_thread.protect_tag != th->protect_tag) { 00914 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier"); 00915 } 00916 if (cont->saved_thread.fiber) { 00917 rb_fiber_t *fcont; 00918 GetFiberPtr(cont->saved_thread.fiber, fcont); 00919 00920 if (th->fiber != cont->saved_thread.fiber) { 00921 rb_raise(rb_eRuntimeError, "continuation called across fiber"); 00922 } 00923 } 00924 00925 cont->argc = argc; 00926 cont->value = make_passing_arg(argc, argv); 00927 00928 /* restore `tracing' context. see [Feature #4347] */ 00929 th->trace_arg = cont->saved_thread.trace_arg; 00930 00931 cont_restore_0(cont, &contval); 00932 return Qnil; /* unreachable */ 00933 } 00934 00935 /*********/ 00936 /* fiber */ 00937 /*********/ 00938 00939 /* 00940 * Document-class: Fiber 00941 * 00942 * Fibers are primitives for implementing light weight cooperative 00943 * concurrency in Ruby. Basically they are a means of creating code blocks 00944 * that can be paused and resumed, much like threads. The main difference 00945 * is that they are never preempted and that the scheduling must be done by 00946 * the programmer and not the VM. 00947 * 00948 * As opposed to other stackless light weight concurrency models, each fiber 00949 * comes with a small 4KB stack. This enables the fiber to be paused from deeply 00950 * nested function calls within the fiber block. 00951 * 00952 * When a fiber is created it will not run automatically. Rather it must be 00953 * be explicitly asked to run using the <code>Fiber#resume</code> method. 00954 * The code running inside the fiber can give up control by calling 00955 * <code>Fiber.yield</code> in which case it yields control back to caller 00956 * (the caller of the <code>Fiber#resume</code>). 00957 * 00958 * Upon yielding or termination the Fiber returns the value of the last 00959 * executed expression 00960 * 00961 * For instance: 00962 * 00963 * fiber = Fiber.new do 00964 * Fiber.yield 1 00965 * 2 00966 * end 00967 * 00968 * puts fiber.resume 00969 * puts fiber.resume 00970 * puts fiber.resume 00971 * 00972 * <em>produces</em> 00973 * 00974 * 1 00975 * 2 00976 * FiberError: dead fiber called 00977 * 00978 * The <code>Fiber#resume</code> method accepts an arbitrary number of 00979 * parameters, if it is the first call to <code>resume</code> then they 00980 * will be passed as block arguments. Otherwise they will be the return 00981 * value of the call to <code>Fiber.yield</code> 00982 * 00983 * Example: 00984 * 00985 * fiber = Fiber.new do |first| 00986 * second = Fiber.yield first + 2 00987 * end 00988 * 00989 * puts fiber.resume 10 00990 * puts fiber.resume 14 00991 * puts fiber.resume 18 00992 * 00993 * <em>produces</em> 00994 * 00995 * 12 00996 * 14 00997 * FiberError: dead fiber called 00998 * 00999 */ 01000 01001 static const rb_data_type_t fiber_data_type = { 01002 "fiber", 01003 {fiber_mark, fiber_free, fiber_memsize,}, 01004 }; 01005 01006 static VALUE 01007 fiber_alloc(VALUE klass) 01008 { 01009 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0); 01010 } 01011 01012 static rb_fiber_t* 01013 fiber_t_alloc(VALUE fibval) 01014 { 01015 rb_fiber_t *fib; 01016 rb_thread_t *th = GET_THREAD(); 01017 01018 if (DATA_PTR(fibval) != 0) { 01019 rb_raise(rb_eRuntimeError, "cannot initialize twice"); 01020 } 01021 01022 THREAD_MUST_BE_RUNNING(th); 01023 fib = ALLOC(rb_fiber_t); 01024 memset(fib, 0, sizeof(rb_fiber_t)); 01025 fib->cont.self = fibval; 01026 fib->cont.type = FIBER_CONTEXT; 01027 cont_init(&fib->cont, th); 01028 fib->prev = Qnil; 01029 fib->status = CREATED; 01030 01031 DATA_PTR(fibval) = fib; 01032 01033 return fib; 01034 } 01035 01036 static VALUE 01037 fiber_init(VALUE fibval, VALUE proc) 01038 { 01039 rb_fiber_t *fib = fiber_t_alloc(fibval); 01040 rb_context_t *cont = &fib->cont; 01041 rb_thread_t *th = &cont->saved_thread; 01042 01043 /* initialize cont */ 01044 cont->vm_stack = 0; 01045 01046 th->stack = 0; 01047 th->stack_size = 0; 01048 01049 fiber_link_join(fib); 01050 01051 th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE); 01052 th->stack = ALLOC_N(VALUE, th->stack_size); 01053 01054 th->cfp = (void *)(th->stack + th->stack_size); 01055 th->cfp--; 01056 th->cfp->pc = 0; 01057 th->cfp->sp = th->stack + 1; 01058 #if VM_DEBUG_BP_CHECK 01059 th->cfp->bp_check = 0; 01060 #endif 01061 th->cfp->ep = th->stack; 01062 *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0); 01063 th->cfp->self = Qnil; 01064 th->cfp->klass = Qnil; 01065 th->cfp->flag = 0; 01066 th->cfp->iseq = 0; 01067 th->cfp->proc = 0; 01068 th->cfp->block_iseq = 0; 01069 th->cfp->me = 0; 01070 th->tag = 0; 01071 th->local_storage = st_init_numtable(); 01072 01073 th->first_proc = proc; 01074 01075 #if !FIBER_USE_NATIVE 01076 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1); 01077 #endif 01078 01079 return fibval; 01080 } 01081 01082 /* :nodoc: */ 01083 static VALUE 01084 rb_fiber_init(VALUE fibval) 01085 { 01086 return fiber_init(fibval, rb_block_proc()); 01087 } 01088 01089 VALUE 01090 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj) 01091 { 01092 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj)); 01093 } 01094 01095 static VALUE 01096 return_fiber(void) 01097 { 01098 rb_fiber_t *fib; 01099 VALUE curr = rb_fiber_current(); 01100 VALUE prev; 01101 GetFiberPtr(curr, fib); 01102 01103 prev = fib->prev; 01104 if (NIL_P(prev)) { 01105 const VALUE root_fiber = GET_THREAD()->root_fiber; 01106 01107 if (root_fiber == curr) { 01108 rb_raise(rb_eFiberError, "can't yield from root fiber"); 01109 } 01110 return root_fiber; 01111 } 01112 else { 01113 fib->prev = Qnil; 01114 return prev; 01115 } 01116 } 01117 01118 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv); 01119 01120 static void 01121 rb_fiber_terminate(rb_fiber_t *fib) 01122 { 01123 VALUE value = fib->cont.value; 01124 fib->status = TERMINATED; 01125 #if FIBER_USE_NATIVE && !defined(_WIN32) 01126 /* Ruby must not switch to other thread until storing terminated_machine_stack */ 01127 terminated_machine_stack.ptr = fib->context.uc_stack.ss_sp; 01128 terminated_machine_stack.size = fib->context.uc_stack.ss_size / sizeof(VALUE); 01129 fib->context.uc_stack.ss_sp = NULL; 01130 fib->cont.machine_stack = NULL; 01131 fib->cont.machine_stack_size = 0; 01132 #endif 01133 rb_fiber_transfer(return_fiber(), 1, &value); 01134 } 01135 01136 void 01137 rb_fiber_start(void) 01138 { 01139 rb_thread_t *th = GET_THREAD(); 01140 rb_fiber_t *fib; 01141 rb_context_t *cont; 01142 rb_proc_t *proc; 01143 int state; 01144 01145 GetFiberPtr(th->fiber, fib); 01146 cont = &fib->cont; 01147 01148 TH_PUSH_TAG(th); 01149 if ((state = EXEC_TAG()) == 0) { 01150 int argc; 01151 VALUE *argv, args; 01152 GetProcPtr(cont->saved_thread.first_proc, proc); 01153 args = cont->value; 01154 argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args; 01155 cont->value = Qnil; 01156 th->errinfo = Qnil; 01157 th->root_lep = rb_vm_ep_local_ep(proc->block.ep); 01158 th->root_svar = Qnil; 01159 01160 fib->status = RUNNING; 01161 cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0); 01162 } 01163 TH_POP_TAG(); 01164 01165 if (state) { 01166 if (state == TAG_RAISE || state == TAG_FATAL) { 01167 rb_threadptr_pending_interrupt_enque(th, th->errinfo); 01168 } 01169 else { 01170 VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->errinfo); 01171 if (!NIL_P(err)) 01172 rb_threadptr_pending_interrupt_enque(th, err); 01173 } 01174 RUBY_VM_SET_INTERRUPT(th); 01175 } 01176 01177 rb_fiber_terminate(fib); 01178 rb_bug("rb_fiber_start: unreachable"); 01179 } 01180 01181 static rb_fiber_t * 01182 root_fiber_alloc(rb_thread_t *th) 01183 { 01184 rb_fiber_t *fib; 01185 /* no need to allocate vm stack */ 01186 fib = fiber_t_alloc(fiber_alloc(rb_cFiber)); 01187 fib->cont.type = ROOT_FIBER_CONTEXT; 01188 #if FIBER_USE_NATIVE 01189 #ifdef _WIN32 01190 fib->fib_handle = ConvertThreadToFiber(0); 01191 #endif 01192 #endif 01193 fib->status = RUNNING; 01194 fib->prev_fiber = fib->next_fiber = fib; 01195 01196 return fib; 01197 } 01198 01199 VALUE 01200 rb_fiber_current(void) 01201 { 01202 rb_thread_t *th = GET_THREAD(); 01203 if (th->fiber == 0) { 01204 /* save root */ 01205 rb_fiber_t *fib = root_fiber_alloc(th); 01206 th->root_fiber = th->fiber = fib->cont.self; 01207 } 01208 return th->fiber; 01209 } 01210 01211 static VALUE 01212 fiber_store(rb_fiber_t *next_fib) 01213 { 01214 rb_thread_t *th = GET_THREAD(); 01215 rb_fiber_t *fib; 01216 01217 if (th->fiber) { 01218 GetFiberPtr(th->fiber, fib); 01219 cont_save_thread(&fib->cont, th); 01220 } 01221 else { 01222 /* create current fiber */ 01223 fib = root_fiber_alloc(th); 01224 th->root_fiber = th->fiber = fib->cont.self; 01225 } 01226 01227 #if !FIBER_USE_NATIVE 01228 cont_save_machine_stack(th, &fib->cont); 01229 #endif 01230 01231 if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) { 01232 #if FIBER_USE_NATIVE 01233 fiber_setcontext(next_fib, fib); 01234 #ifndef _WIN32 01235 if (terminated_machine_stack.ptr) { 01236 if (machine_stack_cache_index < MAX_MAHINE_STACK_CACHE) { 01237 machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr; 01238 machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size; 01239 machine_stack_cache_index++; 01240 } 01241 else { 01242 if (terminated_machine_stack.ptr != fib->cont.machine_stack) { 01243 munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE)); 01244 } 01245 else { 01246 rb_bug("terminated fiber resumed"); 01247 } 01248 } 01249 terminated_machine_stack.ptr = NULL; 01250 terminated_machine_stack.size = 0; 01251 } 01252 #endif 01253 #endif 01254 /* restored */ 01255 GetFiberPtr(th->fiber, fib); 01256 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value); 01257 return fib->cont.value; 01258 } 01259 #if !FIBER_USE_NATIVE 01260 else { 01261 return Qundef; 01262 } 01263 #endif 01264 } 01265 01266 static inline VALUE 01267 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume) 01268 { 01269 VALUE value; 01270 rb_fiber_t *fib; 01271 rb_context_t *cont; 01272 rb_thread_t *th = GET_THREAD(); 01273 01274 GetFiberPtr(fibval, fib); 01275 cont = &fib->cont; 01276 01277 if (th->fiber == fibval) { 01278 /* ignore fiber context switch 01279 * because destination fiber is same as current fiber 01280 */ 01281 return make_passing_arg(argc, argv); 01282 } 01283 01284 if (cont->saved_thread.self != th->self) { 01285 rb_raise(rb_eFiberError, "fiber called across threads"); 01286 } 01287 else if (cont->saved_thread.protect_tag != th->protect_tag) { 01288 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier"); 01289 } 01290 else if (fib->status == TERMINATED) { 01291 value = rb_exc_new2(rb_eFiberError, "dead fiber called"); 01292 if (th->fiber != fibval) { 01293 GetFiberPtr(th->fiber, fib); 01294 if (fib->status != TERMINATED) rb_exc_raise(value); 01295 fibval = th->root_fiber; 01296 } 01297 else { 01298 fibval = fib->prev; 01299 if (NIL_P(fibval)) fibval = th->root_fiber; 01300 } 01301 GetFiberPtr(fibval, fib); 01302 cont = &fib->cont; 01303 cont->argc = -1; 01304 cont->value = value; 01305 #if FIBER_USE_NATIVE 01306 { 01307 VALUE oldfibval; 01308 rb_fiber_t *oldfib; 01309 oldfibval = rb_fiber_current(); 01310 GetFiberPtr(oldfibval, oldfib); 01311 fiber_setcontext(fib, oldfib); 01312 } 01313 #else 01314 cont_restore_0(cont, &value); 01315 #endif 01316 } 01317 01318 if (is_resume) { 01319 fib->prev = rb_fiber_current(); 01320 } 01321 else { 01322 /* restore `tracing' context. see [Feature #4347] */ 01323 th->trace_arg = cont->saved_thread.trace_arg; 01324 } 01325 01326 cont->argc = argc; 01327 cont->value = make_passing_arg(argc, argv); 01328 01329 value = fiber_store(fib); 01330 #if !FIBER_USE_NATIVE 01331 if (value == Qundef) { 01332 cont_restore_0(cont, &value); 01333 rb_bug("rb_fiber_resume: unreachable"); 01334 } 01335 #endif 01336 RUBY_VM_CHECK_INTS(th); 01337 01338 return value; 01339 } 01340 01341 VALUE 01342 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv) 01343 { 01344 return fiber_switch(fib, argc, argv, 0); 01345 } 01346 01347 VALUE 01348 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv) 01349 { 01350 rb_fiber_t *fib; 01351 GetFiberPtr(fibval, fib); 01352 01353 if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) { 01354 rb_raise(rb_eFiberError, "double resume"); 01355 } 01356 if (fib->transfered != 0) { 01357 rb_raise(rb_eFiberError, "cannot resume transferred Fiber"); 01358 } 01359 01360 return fiber_switch(fibval, argc, argv, 1); 01361 } 01362 01363 VALUE 01364 rb_fiber_yield(int argc, VALUE *argv) 01365 { 01366 return rb_fiber_transfer(return_fiber(), argc, argv); 01367 } 01368 01369 void 01370 rb_fiber_reset_root_local_storage(VALUE thval) 01371 { 01372 rb_thread_t *th; 01373 rb_fiber_t *fib; 01374 01375 GetThreadPtr(thval, th); 01376 if (th->root_fiber && th->root_fiber != th->fiber) { 01377 GetFiberPtr(th->root_fiber, fib); 01378 th->local_storage = fib->cont.saved_thread.local_storage; 01379 } 01380 } 01381 01382 /* 01383 * call-seq: 01384 * fiber.alive? -> true or false 01385 * 01386 * Returns true if the fiber can still be resumed (or transferred 01387 * to). After finishing execution of the fiber block this method will 01388 * always return false. You need to <code>require 'fiber'</code> 01389 * before using this method. 01390 */ 01391 VALUE 01392 rb_fiber_alive_p(VALUE fibval) 01393 { 01394 rb_fiber_t *fib; 01395 GetFiberPtr(fibval, fib); 01396 return fib->status != TERMINATED ? Qtrue : Qfalse; 01397 } 01398 01399 /* 01400 * call-seq: 01401 * fiber.resume(args, ...) -> obj 01402 * 01403 * Resumes the fiber from the point at which the last <code>Fiber.yield</code> 01404 * was called, or starts running it if it is the first call to 01405 * <code>resume</code>. Arguments passed to resume will be the value of 01406 * the <code>Fiber.yield</code> expression or will be passed as block 01407 * parameters to the fiber's block if this is the first <code>resume</code>. 01408 * 01409 * Alternatively, when resume is called it evaluates to the arguments passed 01410 * to the next <code>Fiber.yield</code> statement inside the fiber's block 01411 * or to the block value if it runs to completion without any 01412 * <code>Fiber.yield</code> 01413 */ 01414 static VALUE 01415 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib) 01416 { 01417 return rb_fiber_resume(fib, argc, argv); 01418 } 01419 01420 /* 01421 * call-seq: 01422 * fiber.transfer(args, ...) -> obj 01423 * 01424 * Transfer control to another fiber, resuming it from where it last 01425 * stopped or starting it if it was not resumed before. The calling 01426 * fiber will be suspended much like in a call to 01427 * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code> 01428 * before using this method. 01429 * 01430 * The fiber which receives the transfer call is treats it much like 01431 * a resume call. Arguments passed to transfer are treated like those 01432 * passed to resume. 01433 * 01434 * You cannot resume a fiber that transferred control to another one. 01435 * This will cause a double resume error. You need to transfer control 01436 * back to this fiber before it can yield and resume. 01437 * 01438 * Example: 01439 * 01440 * fiber1 = Fiber.new do 01441 * puts "In Fiber 1" 01442 * Fiber.yield 01443 * end 01444 * 01445 * fiber2 = Fiber.new do 01446 * puts "In Fiber 2" 01447 * fiber1.transfer 01448 * puts "Never see this message" 01449 * end 01450 * 01451 * fiber3 = Fiber.new do 01452 * puts "In Fiber 3" 01453 * end 01454 * 01455 * fiber2.resume 01456 * fiber3.resume 01457 * 01458 * <em>produces</em> 01459 * 01460 * In fiber 2 01461 * In fiber 1 01462 * In fiber 3 01463 * 01464 */ 01465 static VALUE 01466 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval) 01467 { 01468 rb_fiber_t *fib; 01469 GetFiberPtr(fibval, fib); 01470 fib->transfered = 1; 01471 return rb_fiber_transfer(fibval, argc, argv); 01472 } 01473 01474 /* 01475 * call-seq: 01476 * Fiber.yield(args, ...) -> obj 01477 * 01478 * Yields control back to the context that resumed the fiber, passing 01479 * along any arguments that were passed to it. The fiber will resume 01480 * processing at this point when <code>resume</code> is called next. 01481 * Any arguments passed to the next <code>resume</code> will be the 01482 * value that this <code>Fiber.yield</code> expression evaluates to. 01483 */ 01484 static VALUE 01485 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass) 01486 { 01487 return rb_fiber_yield(argc, argv); 01488 } 01489 01490 /* 01491 * call-seq: 01492 * Fiber.current() -> fiber 01493 * 01494 * Returns the current fiber. You need to <code>require 'fiber'</code> 01495 * before using this method. If you are not running in the context of 01496 * a fiber this method will return the root fiber. 01497 */ 01498 static VALUE 01499 rb_fiber_s_current(VALUE klass) 01500 { 01501 return rb_fiber_current(); 01502 } 01503 01504 01505 01506 /* 01507 * Document-class: FiberError 01508 * 01509 * Raised when an invalid operation is attempted on a Fiber, in 01510 * particular when attempting to call/resume a dead fiber, 01511 * attempting to yield from the root fiber, or calling a fiber across 01512 * threads. 01513 * 01514 * fiber = Fiber.new{} 01515 * fiber.resume #=> nil 01516 * fiber.resume #=> FiberError: dead fiber called 01517 */ 01518 01519 void 01520 Init_Cont(void) 01521 { 01522 #if FIBER_USE_NATIVE 01523 rb_thread_t *th = GET_THREAD(); 01524 01525 #ifdef _WIN32 01526 SYSTEM_INFO info; 01527 GetSystemInfo(&info); 01528 pagesize = info.dwPageSize; 01529 #else /* not WIN32 */ 01530 pagesize = sysconf(_SC_PAGESIZE); 01531 #endif 01532 SET_MACHINE_STACK_END(&th->machine_stack_end); 01533 #endif 01534 01535 rb_cFiber = rb_define_class("Fiber", rb_cObject); 01536 rb_define_alloc_func(rb_cFiber, fiber_alloc); 01537 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError); 01538 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1); 01539 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0); 01540 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1); 01541 } 01542 01543 #if defined __GNUC__ && __GNUC__ >= 4 01544 #pragma GCC visibility push(default) 01545 #endif 01546 01547 void 01548 ruby_Init_Continuation_body(void) 01549 { 01550 rb_cContinuation = rb_define_class("Continuation", rb_cObject); 01551 rb_undef_alloc_func(rb_cContinuation); 01552 rb_undef_method(CLASS_OF(rb_cContinuation), "new"); 01553 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1); 01554 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1); 01555 rb_define_global_function("callcc", rb_callcc, 0); 01556 } 01557 01558 void 01559 ruby_Init_Fiber_as_Coroutine(void) 01560 { 01561 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1); 01562 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0); 01563 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0); 01564 } 01565 01566 #if defined __GNUC__ && __GNUC__ >= 4 01567 #pragma GCC visibility pop 01568 #endif 01569