00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "ruby/ruby.h"
00013 #include "vm_core.h"
00014 #include "gc.h"
00015 #include "eval_intern.h"
00016
00017 #define CAPTURE_JUST_VALID_VM_STACK 1
00018
00019 enum context_type {
00020 CONTINUATION_CONTEXT = 0,
00021 FIBER_CONTEXT = 1,
00022 ROOT_FIBER_CONTEXT = 2
00023 };
00024
00025 typedef struct rb_context_struct {
00026 enum context_type type;
00027 VALUE self;
00028 int argc;
00029 VALUE value;
00030 VALUE *vm_stack;
00031 #ifdef CAPTURE_JUST_VALID_VM_STACK
00032 size_t vm_stack_slen;
00033 size_t vm_stack_clen;
00034 #endif
00035 VALUE *machine_stack;
00036 VALUE *machine_stack_src;
00037 #ifdef __ia64
00038 VALUE *machine_register_stack;
00039 VALUE *machine_register_stack_src;
00040 int machine_register_stack_size;
00041 #endif
00042 rb_thread_t saved_thread;
00043 rb_jmpbuf_t jmpbuf;
00044 size_t machine_stack_size;
00045 } rb_context_t;
00046
00047 enum fiber_status {
00048 CREATED,
00049 RUNNING,
00050 TERMINATED
00051 };
00052
00053 typedef struct rb_fiber_struct {
00054 rb_context_t cont;
00055 VALUE prev;
00056 enum fiber_status status;
00057 struct rb_fiber_struct *prev_fiber;
00058 struct rb_fiber_struct *next_fiber;
00059 } rb_fiber_t;
00060
00061 static const rb_data_type_t cont_data_type, fiber_data_type;
00062 static VALUE rb_cContinuation;
00063 static VALUE rb_cFiber;
00064 static VALUE rb_eFiberError;
00065
00066 #define GetContPtr(obj, ptr) \
00067 TypedData_Get_Struct(obj, rb_context_t, &cont_data_type, ptr)
00068
00069 #define GetFiberPtr(obj, ptr) do {\
00070 TypedData_Get_Struct(obj, rb_fiber_t, &fiber_data_type, ptr); \
00071 if (!ptr) rb_raise(rb_eFiberError, "uninitialized fiber"); \
00072 } while(0)
00073
00074 NOINLINE(static VALUE cont_capture(volatile int *stat));
00075
00076 void rb_thread_mark(rb_thread_t *th);
00077 #define THREAD_MUST_BE_RUNNING(th) do { \
00078 if (!th->tag) rb_raise(rb_eThreadError, "not running thread"); \
00079 } while (0)
00080
00081 static void
00082 cont_mark(void *ptr)
00083 {
00084 RUBY_MARK_ENTER("cont");
00085 if (ptr) {
00086 rb_context_t *cont = ptr;
00087 rb_gc_mark(cont->value);
00088 rb_thread_mark(&cont->saved_thread);
00089
00090 if (cont->vm_stack) {
00091 #ifdef CAPTURE_JUST_VALID_VM_STACK
00092 rb_gc_mark_locations(cont->vm_stack,
00093 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
00094 #else
00095 rb_gc_mark_localtion(cont->vm_stack,
00096 cont->vm_stack, cont->saved_thread.stack_size);
00097 #endif
00098 }
00099
00100 if (cont->machine_stack) {
00101 rb_gc_mark_locations(cont->machine_stack,
00102 cont->machine_stack + cont->machine_stack_size);
00103 }
00104 #ifdef __ia64
00105 if (cont->machine_register_stack) {
00106 rb_gc_mark_locations(cont->machine_register_stack,
00107 cont->machine_register_stack + cont->machine_register_stack_size);
00108 }
00109 #endif
00110 }
00111 RUBY_MARK_LEAVE("cont");
00112 }
00113
00114 static void
00115 cont_free(void *ptr)
00116 {
00117 RUBY_FREE_ENTER("cont");
00118 if (ptr) {
00119 rb_context_t *cont = ptr;
00120 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
00121 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
00122 #ifdef __ia64
00123 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
00124 #endif
00125 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
00126
00127
00128 ruby_xfree(ptr);
00129 }
00130 RUBY_FREE_LEAVE("cont");
00131 }
00132
00133 static size_t
00134 cont_memsize(const void *ptr)
00135 {
00136 const rb_context_t *cont = ptr;
00137 size_t size = 0;
00138 if (cont) {
00139 size = sizeof(*cont);
00140 if (cont->vm_stack) {
00141 #ifdef CAPTURE_JUST_VALID_VM_STACK
00142 size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
00143 #else
00144 size_t n = cont->saved_thread.stack_size;
00145 #endif
00146 size += n * sizeof(*cont->vm_stack);
00147 }
00148
00149 if (cont->machine_stack) {
00150 size += cont->machine_stack_size * sizeof(*cont->machine_stack);
00151 }
00152 #ifdef __ia64
00153 if (cont->machine_register_stack) {
00154 size += cont->machine_register_stack_size * sizeof(*cont->machine_register_stack);
00155 }
00156 #endif
00157 }
00158 return size;
00159 }
00160
00161 static void
00162 fiber_mark(void *ptr)
00163 {
00164 RUBY_MARK_ENTER("cont");
00165 if (ptr) {
00166 rb_fiber_t *fib = ptr;
00167 rb_gc_mark(fib->prev);
00168 cont_mark(&fib->cont);
00169 }
00170 RUBY_MARK_LEAVE("cont");
00171 }
00172
00173 static void
00174 fiber_link_join(rb_fiber_t *fib)
00175 {
00176 VALUE current_fibval = rb_fiber_current();
00177 rb_fiber_t *current_fib;
00178 GetFiberPtr(current_fibval, current_fib);
00179
00180
00181 fib->next_fiber = current_fib->next_fiber;
00182 fib->prev_fiber = current_fib;
00183 current_fib->next_fiber->prev_fiber = fib;
00184 current_fib->next_fiber = fib;
00185 }
00186
00187 static void
00188 fiber_link_remove(rb_fiber_t *fib)
00189 {
00190 fib->prev_fiber->next_fiber = fib->next_fiber;
00191 fib->next_fiber->prev_fiber = fib->prev_fiber;
00192 }
00193
00194 static void
00195 fiber_free(void *ptr)
00196 {
00197 RUBY_FREE_ENTER("fiber");
00198 if (ptr) {
00199 rb_fiber_t *fib = ptr;
00200
00201 if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00202 fib->cont.saved_thread.local_storage) {
00203 st_free_table(fib->cont.saved_thread.local_storage);
00204 }
00205 fiber_link_remove(fib);
00206
00207 cont_free(&fib->cont);
00208 }
00209 RUBY_FREE_LEAVE("fiber");
00210 }
00211
00212 static size_t
00213 fiber_memsize(const void *ptr)
00214 {
00215 const rb_fiber_t *fib = ptr;
00216 size_t size = 0;
00217 if (ptr) {
00218 size = sizeof(*fib);
00219 if (fib->cont.type != ROOT_FIBER_CONTEXT) {
00220 size += st_memsize(fib->cont.saved_thread.local_storage);
00221 }
00222 size += cont_memsize(&fib->cont);
00223 }
00224 return 0;
00225 }
00226
00227 static void
00228 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
00229 {
00230 size_t size;
00231 rb_thread_t *sth = &cont->saved_thread;
00232
00233 SET_MACHINE_STACK_END(&th->machine_stack_end);
00234 #ifdef __ia64
00235 th->machine_register_stack_end = rb_ia64_bsp();
00236 #endif
00237
00238 if (th->machine_stack_start > th->machine_stack_end) {
00239 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
00240 cont->machine_stack_src = th->machine_stack_end;
00241 }
00242 else {
00243 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
00244 cont->machine_stack_src = th->machine_stack_start;
00245 }
00246
00247 if (cont->machine_stack) {
00248 REALLOC_N(cont->machine_stack, VALUE, size);
00249 }
00250 else {
00251 cont->machine_stack = ALLOC_N(VALUE, size);
00252 }
00253
00254 FLUSH_REGISTER_WINDOWS;
00255 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
00256
00257 #ifdef __ia64
00258 rb_ia64_flushrs();
00259 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
00260 cont->machine_register_stack_src = th->machine_register_stack_start;
00261 if (cont->machine_register_stack) {
00262 REALLOC_N(cont->machine_register_stack, VALUE, size);
00263 }
00264 else {
00265 cont->machine_register_stack = ALLOC_N(VALUE, size);
00266 }
00267
00268 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
00269 #endif
00270
00271 sth->machine_stack_start = sth->machine_stack_end = 0;
00272 #ifdef __ia64
00273 sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
00274 #endif
00275 }
00276
00277 static const rb_data_type_t cont_data_type = {
00278 "continuation",
00279 cont_mark, cont_free, cont_memsize,
00280 };
00281
00282 static void
00283 cont_init(rb_context_t *cont, rb_thread_t *th)
00284 {
00285
00286 cont->saved_thread = *th;
00287 cont->saved_thread.local_storage = 0;
00288 }
00289
00290 static rb_context_t *
00291 cont_new(VALUE klass)
00292 {
00293 rb_context_t *cont;
00294 volatile VALUE contval;
00295 rb_thread_t *th = GET_THREAD();
00296
00297 THREAD_MUST_BE_RUNNING(th);
00298 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
00299 cont->self = contval;
00300 cont_init(cont, th);
00301 return cont;
00302 }
00303
00304 void rb_vm_stack_to_heap(rb_thread_t *th);
00305
00306 static VALUE
00307 cont_capture(volatile int *stat)
00308 {
00309 rb_context_t *cont;
00310 rb_thread_t *th = GET_THREAD(), *sth;
00311 volatile VALUE contval;
00312
00313 THREAD_MUST_BE_RUNNING(th);
00314 rb_vm_stack_to_heap(th);
00315 cont = cont_new(rb_cContinuation);
00316 contval = cont->self;
00317 sth = &cont->saved_thread;
00318
00319 #ifdef CAPTURE_JUST_VALID_VM_STACK
00320 cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
00321 cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
00322 cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
00323 MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
00324 MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
00325 #else
00326 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
00327 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
00328 #endif
00329 sth->stack = 0;
00330
00331 cont_save_machine_stack(th, cont);
00332
00333 if (ruby_setjmp(cont->jmpbuf)) {
00334 VALUE value;
00335
00336 value = cont->value;
00337 if (cont->argc == -1) rb_exc_raise(value);
00338 cont->value = Qnil;
00339 *stat = 1;
00340 return value;
00341 }
00342 else {
00343 *stat = 0;
00344 return cont->self;
00345 }
00346 }
00347
00348 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
00349
00350 static void
00351 cont_restore_1(rb_context_t *cont)
00352 {
00353 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
00354
00355
00356 if (cont->type == CONTINUATION_CONTEXT) {
00357
00358 VALUE fib;
00359
00360 th->fiber = sth->fiber;
00361 fib = th->fiber ? th->fiber : th->root_fiber;
00362
00363 if (fib) {
00364 rb_fiber_t *fcont;
00365 GetFiberPtr(fib, fcont);
00366 th->stack_size = fcont->cont.saved_thread.stack_size;
00367 th->stack = fcont->cont.saved_thread.stack;
00368 }
00369 #ifdef CAPTURE_JUST_VALID_VM_STACK
00370 MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
00371 MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
00372 cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
00373 #else
00374 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
00375 #endif
00376 }
00377 else {
00378
00379 th->stack = sth->stack;
00380 th->stack_size = sth->stack_size;
00381 th->local_storage = sth->local_storage;
00382 th->fiber = cont->self;
00383 }
00384
00385 th->cfp = sth->cfp;
00386 th->safe_level = sth->safe_level;
00387 th->raised_flag = sth->raised_flag;
00388 th->state = sth->state;
00389 th->status = sth->status;
00390 th->tag = sth->tag;
00391 th->protect_tag = sth->protect_tag;
00392 th->errinfo = sth->errinfo;
00393 th->first_proc = sth->first_proc;
00394
00395
00396 #ifdef _M_AMD64
00397 {
00398
00399 jmp_buf buf;
00400 setjmp(buf);
00401 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
00402 ((_JUMP_BUFFER*)(&buf))->Frame;
00403 }
00404 #endif
00405 if (cont->machine_stack_src) {
00406 FLUSH_REGISTER_WINDOWS;
00407 MEMCPY(cont->machine_stack_src, cont->machine_stack,
00408 VALUE, cont->machine_stack_size);
00409 }
00410
00411 #ifdef __ia64
00412 if (cont->machine_register_stack_src) {
00413 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
00414 VALUE, cont->machine_register_stack_size);
00415 }
00416 #endif
00417
00418 ruby_longjmp(cont->jmpbuf, 1);
00419 }
00420
00421 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
00422
00423 #ifdef __ia64
00424 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
00425 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
00426 static volatile int C(a), C(b), C(c), C(d), C(e);
00427 static volatile int C(f), C(g), C(h), C(i), C(j);
00428 static volatile int C(k), C(l), C(m), C(n), C(o);
00429 static volatile int C(p), C(q), C(r), C(s), C(t);
00430 #if 0
00431 {}
00432 #endif
00433 int rb_dummy_false = 0;
00434 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
00435 static void
00436 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
00437 {
00438 if (rb_dummy_false) {
00439
00440 E(a) = E(b) = E(c) = E(d) = E(e) =
00441 E(f) = E(g) = E(h) = E(i) = E(j) =
00442 E(k) = E(l) = E(m) = E(n) = E(o) =
00443 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00444 E(a) = E(b) = E(c) = E(d) = E(e) =
00445 E(f) = E(g) = E(h) = E(i) = E(j) =
00446 E(k) = E(l) = E(m) = E(n) = E(o) =
00447 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00448 }
00449 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
00450 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00451 }
00452 cont_restore_0(cont, vp);
00453 }
00454 #undef C
00455 #undef E
00456 #endif
00457
00458 static void
00459 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
00460 {
00461 if (cont->machine_stack_src) {
00462 #ifdef HAVE_ALLOCA
00463 #define STACK_PAD_SIZE 1
00464 #else
00465 #define STACK_PAD_SIZE 1024
00466 #endif
00467 VALUE space[STACK_PAD_SIZE];
00468
00469 #if !STACK_GROW_DIRECTION
00470 if (addr_in_prev_frame > &space[0]) {
00471
00472 #endif
00473 #if STACK_GROW_DIRECTION <= 0
00474 volatile VALUE *const end = cont->machine_stack_src;
00475 if (&space[0] > end) {
00476 # ifdef HAVE_ALLOCA
00477 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
00478 (void)sp;
00479 # else
00480 cont_restore_0(cont, &space[0]);
00481 # endif
00482 }
00483 #endif
00484 #if !STACK_GROW_DIRECTION
00485 }
00486 else {
00487
00488 #endif
00489 #if STACK_GROW_DIRECTION >= 0
00490 volatile VALUE *const end = cont->machine_stack_src + cont->machine_stack_size;
00491 if (&space[STACK_PAD_SIZE] < end) {
00492 # ifdef HAVE_ALLOCA
00493 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
00494 (void)sp;
00495 # else
00496 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
00497 # endif
00498 }
00499 #endif
00500 #if !STACK_GROW_DIRECTION
00501 }
00502 #endif
00503 }
00504 cont_restore_1(cont);
00505 }
00506 #ifdef __ia64
00507 #define cont_restore_0(cont, vp) register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00508 #endif
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563
00564
00565
00566
00567
00568
00569
00570
00571 static VALUE
00572 rb_callcc(VALUE self)
00573 {
00574 volatile int called;
00575 volatile VALUE val = cont_capture(&called);
00576
00577 if (called) {
00578 return val;
00579 }
00580 else {
00581 return rb_yield(val);
00582 }
00583 }
00584
00585 static VALUE
00586 make_passing_arg(int argc, VALUE *argv)
00587 {
00588 switch(argc) {
00589 case 0:
00590 return Qnil;
00591 case 1:
00592 return argv[0];
00593 default:
00594 return rb_ary_new4(argc, argv);
00595 }
00596 }
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614 static VALUE
00615 rb_cont_call(int argc, VALUE *argv, VALUE contval)
00616 {
00617 rb_context_t *cont;
00618 rb_thread_t *th = GET_THREAD();
00619 GetContPtr(contval, cont);
00620
00621 if (cont->saved_thread.self != th->self) {
00622 rb_raise(rb_eRuntimeError, "continuation called across threads");
00623 }
00624 if (cont->saved_thread.protect_tag != th->protect_tag) {
00625 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
00626 }
00627 if (cont->saved_thread.fiber) {
00628 rb_fiber_t *fcont;
00629 GetFiberPtr(cont->saved_thread.fiber, fcont);
00630
00631 if (th->fiber != cont->saved_thread.fiber) {
00632 rb_raise(rb_eRuntimeError, "continuation called across fiber");
00633 }
00634 }
00635
00636 cont->argc = argc;
00637 cont->value = make_passing_arg(argc, argv);
00638
00639 cont_restore_0(cont, &contval);
00640 return Qnil;
00641 }
00642
00643
00644
00645
00646
00647
00648
00649
00650
00651
00652
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709 #define FIBER_VM_STACK_SIZE (4 * 1024)
00710
00711 static const rb_data_type_t fiber_data_type = {
00712 "fiber",
00713 fiber_mark, fiber_free, fiber_memsize,
00714 };
00715
00716 static VALUE
00717 fiber_alloc(VALUE klass)
00718 {
00719 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
00720 }
00721
00722 static rb_fiber_t*
00723 fiber_t_alloc(VALUE fibval)
00724 {
00725 rb_fiber_t *fib;
00726 rb_thread_t *th = GET_THREAD();
00727
00728 THREAD_MUST_BE_RUNNING(th);
00729 fib = ALLOC(rb_fiber_t);
00730 memset(fib, 0, sizeof(rb_fiber_t));
00731 fib->cont.self = fibval;
00732 fib->cont.type = FIBER_CONTEXT;
00733 cont_init(&fib->cont, th);
00734 fib->prev = Qnil;
00735 fib->status = CREATED;
00736
00737 DATA_PTR(fibval) = fib;
00738
00739 return fib;
00740 }
00741
00742 static VALUE
00743 fiber_init(VALUE fibval, VALUE proc)
00744 {
00745 rb_fiber_t *fib = fiber_t_alloc(fibval);
00746 rb_context_t *cont = &fib->cont;
00747 rb_thread_t *th = &cont->saved_thread;
00748
00749
00750
00751 cont->vm_stack = 0;
00752
00753 th->stack = 0;
00754 th->stack_size = 0;
00755
00756 fiber_link_join(fib);
00757
00758 th->stack_size = FIBER_VM_STACK_SIZE;
00759 th->stack = ALLOC_N(VALUE, th->stack_size);
00760
00761 th->cfp = (void *)(th->stack + th->stack_size);
00762 th->cfp--;
00763 th->cfp->pc = 0;
00764 th->cfp->sp = th->stack + 1;
00765 th->cfp->bp = 0;
00766 th->cfp->lfp = th->stack;
00767 *th->cfp->lfp = 0;
00768 th->cfp->dfp = th->stack;
00769 th->cfp->self = Qnil;
00770 th->cfp->flag = 0;
00771 th->cfp->iseq = 0;
00772 th->cfp->proc = 0;
00773 th->cfp->block_iseq = 0;
00774 th->cfp->me = 0;
00775 th->tag = 0;
00776 th->local_storage = st_init_numtable();
00777
00778 th->first_proc = proc;
00779
00780 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
00781
00782 return fibval;
00783 }
00784
00785
00786 static VALUE
00787 rb_fiber_init(VALUE fibval)
00788 {
00789 return fiber_init(fibval, rb_block_proc());
00790 }
00791
00792 VALUE
00793 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
00794 {
00795 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
00796 }
00797
00798 static VALUE
00799 return_fiber(void)
00800 {
00801 rb_fiber_t *fib;
00802 VALUE curr = rb_fiber_current();
00803 GetFiberPtr(curr, fib);
00804
00805 if (fib->prev == Qnil) {
00806 rb_thread_t *th = GET_THREAD();
00807
00808 if (th->root_fiber != curr) {
00809 return th->root_fiber;
00810 }
00811 else {
00812 rb_raise(rb_eFiberError, "can't yield from root fiber");
00813 }
00814 }
00815 else {
00816 VALUE prev = fib->prev;
00817 fib->prev = Qnil;
00818 return prev;
00819 }
00820 }
00821
00822 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
00823
00824 static void
00825 rb_fiber_terminate(rb_fiber_t *fib)
00826 {
00827 VALUE value = fib->cont.value;
00828 fib->status = TERMINATED;
00829 rb_fiber_transfer(return_fiber(), 1, &value);
00830 }
00831
00832 void
00833 rb_fiber_start(void)
00834 {
00835 rb_thread_t *th = GET_THREAD();
00836 rb_fiber_t *fib;
00837 rb_context_t *cont;
00838 rb_proc_t *proc;
00839 int state;
00840
00841 GetFiberPtr(th->fiber, fib);
00842 cont = &fib->cont;
00843
00844 TH_PUSH_TAG(th);
00845 if ((state = EXEC_TAG()) == 0) {
00846 int argc;
00847 VALUE *argv, args;
00848 GetProcPtr(cont->saved_thread.first_proc, proc);
00849 args = cont->value;
00850 argv = (argc = cont->argc) > 1 ? RARRAY_PTR(args) : &args;
00851 cont->value = Qnil;
00852 th->errinfo = Qnil;
00853 th->local_lfp = proc->block.lfp;
00854 th->local_svar = Qnil;
00855
00856 fib->status = RUNNING;
00857 cont->value = rb_vm_invoke_proc(th, proc, proc->block.self, argc, argv, 0);
00858 }
00859 TH_POP_TAG();
00860
00861 if (state) {
00862 if (state == TAG_RAISE) {
00863 th->thrown_errinfo = th->errinfo;
00864 }
00865 else {
00866 th->thrown_errinfo =
00867 rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
00868 }
00869 RUBY_VM_SET_INTERRUPT(th);
00870 }
00871
00872 rb_fiber_terminate(fib);
00873 rb_bug("rb_fiber_start: unreachable");
00874 }
00875
00876 static rb_fiber_t *
00877 root_fiber_alloc(rb_thread_t *th)
00878 {
00879 rb_fiber_t *fib;
00880
00881
00882 fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
00883 fib->cont.type = ROOT_FIBER_CONTEXT;
00884 fib->prev_fiber = fib->next_fiber = fib;
00885
00886 return fib;
00887 }
00888
00889 VALUE
00890 rb_fiber_current(void)
00891 {
00892 rb_thread_t *th = GET_THREAD();
00893 if (th->fiber == 0) {
00894
00895 rb_fiber_t *fib = root_fiber_alloc(th);
00896 th->root_fiber = th->fiber = fib->cont.self;
00897 }
00898 return th->fiber;
00899 }
00900
00901 static VALUE
00902 fiber_store(rb_fiber_t *next_fib)
00903 {
00904 rb_thread_t *th = GET_THREAD();
00905 rb_fiber_t *fib;
00906
00907 if (th->fiber) {
00908 GetFiberPtr(th->fiber, fib);
00909 fib->cont.saved_thread = *th;
00910 }
00911 else {
00912
00913 fib = root_fiber_alloc(th);
00914 th->root_fiber = th->fiber = fib->cont.self;
00915 }
00916
00917 cont_save_machine_stack(th, &fib->cont);
00918
00919 if (ruby_setjmp(fib->cont.jmpbuf)) {
00920
00921 GetFiberPtr(th->fiber, fib);
00922 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
00923 return fib->cont.value;
00924 }
00925 else {
00926 return Qundef;
00927 }
00928 }
00929
00930 static inline VALUE
00931 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
00932 {
00933 VALUE value;
00934 rb_fiber_t *fib;
00935 rb_context_t *cont;
00936 rb_thread_t *th = GET_THREAD();
00937
00938 GetFiberPtr(fibval, fib);
00939 cont = &fib->cont;
00940
00941 if (cont->saved_thread.self != th->self) {
00942 rb_raise(rb_eFiberError, "fiber called across threads");
00943 }
00944 else if (cont->saved_thread.protect_tag != th->protect_tag) {
00945 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
00946 }
00947 else if (fib->status == TERMINATED) {
00948 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
00949 if (th->fiber != fibval) {
00950 GetFiberPtr(th->fiber, fib);
00951 if (fib->status != TERMINATED) rb_exc_raise(value);
00952 fibval = th->root_fiber;
00953 }
00954 else {
00955 fibval = fib->prev;
00956 if (NIL_P(fibval)) fibval = th->root_fiber;
00957 }
00958 GetFiberPtr(fibval, fib);
00959 cont = &fib->cont;
00960 cont->argc = -1;
00961 cont->value = value;
00962 cont_restore_0(cont, &value);
00963 }
00964
00965 if (is_resume) {
00966 fib->prev = rb_fiber_current();
00967 }
00968
00969 cont->argc = argc;
00970 cont->value = make_passing_arg(argc, argv);
00971
00972 if ((value = fiber_store(fib)) == Qundef) {
00973 cont_restore_0(cont, &value);
00974 rb_bug("rb_fiber_resume: unreachable");
00975 }
00976
00977 RUBY_VM_CHECK_INTS();
00978
00979 return value;
00980 }
00981
00982 VALUE
00983 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
00984 {
00985 return fiber_switch(fib, argc, argv, 0);
00986 }
00987
00988 VALUE
00989 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
00990 {
00991 rb_fiber_t *fib;
00992 GetFiberPtr(fibval, fib);
00993
00994 if (fib->prev != Qnil) {
00995 rb_raise(rb_eFiberError, "double resume");
00996 }
00997
00998 return fiber_switch(fibval, argc, argv, 1);
00999 }
01000
01001 VALUE
01002 rb_fiber_yield(int argc, VALUE *argv)
01003 {
01004 return rb_fiber_transfer(return_fiber(), argc, argv);
01005 }
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015 VALUE
01016 rb_fiber_alive_p(VALUE fibval)
01017 {
01018 rb_fiber_t *fib;
01019 GetFiberPtr(fibval, fib);
01020 return fib->status != TERMINATED ? Qtrue : Qfalse;
01021 }
01022
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038 static VALUE
01039 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
01040 {
01041 return rb_fiber_resume(fib, argc, argv);
01042 }
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060 static VALUE
01061 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
01062 {
01063 return rb_fiber_transfer(fib, argc, argv);
01064 }
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076 static VALUE
01077 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
01078 {
01079 return rb_fiber_yield(argc, argv);
01080 }
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090 static VALUE
01091 rb_fiber_s_current(VALUE klass)
01092 {
01093 return rb_fiber_current();
01094 }
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111 void
01112 Init_Cont(void)
01113 {
01114 rb_cFiber = rb_define_class("Fiber", rb_cObject);
01115 rb_define_alloc_func(rb_cFiber, fiber_alloc);
01116 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
01117 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
01118 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
01119 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
01120 }
01121
01122 void
01123 ruby_Init_Continuation_body(void)
01124 {
01125 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
01126 rb_undef_alloc_func(rb_cContinuation);
01127 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
01128 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
01129 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
01130 rb_define_global_function("callcc", rb_callcc, 0);
01131 }
01132
01133 void
01134 ruby_Init_Fiber_as_Coroutine(void)
01135 {
01136 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
01137 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
01138 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
01139 }
01140