00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 #include "ruby/ruby.h"
00015 #include "ruby/st.h"
00016 #include "ruby/re.h"
00017 #include "ruby/io.h"
00018 #include "ruby/util.h"
00019 #include "eval_intern.h"
00020 #include "vm_core.h"
00021 #include "gc.h"
00022 #include <stdio.h>
00023 #include <setjmp.h>
00024 #include <sys/types.h>
00025
00026 #ifdef HAVE_SYS_TIME_H
00027 #include <sys/time.h>
00028 #endif
00029
00030 #ifdef HAVE_SYS_RESOURCE_H
00031 #include <sys/resource.h>
00032 #endif
00033
00034 #if defined _WIN32 || defined __CYGWIN__
00035 #include <windows.h>
00036 #endif
00037
00038 #ifdef HAVE_VALGRIND_MEMCHECK_H
00039 # include <valgrind/memcheck.h>
00040 # ifndef VALGRIND_MAKE_MEM_DEFINED
00041 # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
00042 # endif
00043 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
00044 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
00045 # endif
00046 #else
00047 # define VALGRIND_MAKE_MEM_DEFINED(p, n)
00048 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
00049 #endif
00050
00051 int rb_io_fptr_finalize(struct rb_io_t*);
00052
00053 #define rb_setjmp(env) RUBY_SETJMP(env)
00054 #define rb_jmp_buf rb_jmpbuf_t
00055
00056
00057 #ifdef __GNUC__
00058 # ifndef atarist
00059 # ifndef alloca
00060 # define alloca __builtin_alloca
00061 # endif
00062 # endif
00063 #else
00064 # ifdef HAVE_ALLOCA_H
00065 # include <alloca.h>
00066 # else
00067 # ifdef _AIX
00068 #pragma alloca
00069 # else
00070 # ifndef alloca
00071 void *alloca ();
00072 # endif
00073 # endif
00074 # endif
00075 #endif
00076
00077 #ifndef GC_MALLOC_LIMIT
00078 #define GC_MALLOC_LIMIT 8000000
00079 #endif
00080
00081 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
00082
00083 #define MARK_STACK_MAX 1024
00084
00085 int ruby_gc_debug_indent = 0;
00086
00087
00088 #define GC_PROFILE_MORE_DETAIL 0
00089 typedef struct gc_profile_record {
00090 double gc_time;
00091 double gc_mark_time;
00092 double gc_sweep_time;
00093 double gc_invoke_time;
00094
00095 size_t heap_use_slots;
00096 size_t heap_live_objects;
00097 size_t heap_free_objects;
00098 size_t heap_total_objects;
00099 size_t heap_use_size;
00100 size_t heap_total_size;
00101
00102 int have_finalize;
00103
00104 size_t allocate_increase;
00105 size_t allocate_limit;
00106 } gc_profile_record;
00107
00108 static double
00109 getrusage_time(void)
00110 {
00111 #ifdef RUSAGE_SELF
00112 struct rusage usage;
00113 struct timeval time;
00114 getrusage(RUSAGE_SELF, &usage);
00115 time = usage.ru_utime;
00116 return time.tv_sec + time.tv_usec * 1e-6;
00117 #elif defined _WIN32
00118 FILETIME creation_time, exit_time, kernel_time, user_time;
00119 ULARGE_INTEGER ui;
00120 LONG_LONG q;
00121 double t;
00122
00123 if (GetProcessTimes(GetCurrentProcess(),
00124 &creation_time, &exit_time, &kernel_time, &user_time) == 0)
00125 {
00126 return 0.0;
00127 }
00128 memcpy(&ui, &user_time, sizeof(FILETIME));
00129 q = ui.QuadPart / 10L;
00130 t = (DWORD)(q % 1000000L) * 1e-6;
00131 q /= 1000000L;
00132 #ifdef __GNUC__
00133 t += q;
00134 #else
00135 t += (double)(DWORD)(q >> 16) * (1 << 16);
00136 t += (DWORD)q & ~(~0 << 16);
00137 #endif
00138 return t;
00139 #else
00140 return 0.0;
00141 #endif
00142 }
00143
00144 #define GC_PROF_TIMER_START do {\
00145 if (objspace->profile.run) {\
00146 if (!objspace->profile.record) {\
00147 objspace->profile.size = 1000;\
00148 objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
00149 }\
00150 if (count >= objspace->profile.size) {\
00151 objspace->profile.size += 1000;\
00152 objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
00153 }\
00154 if (!objspace->profile.record) {\
00155 rb_bug("gc_profile malloc or realloc miss");\
00156 }\
00157 MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
00158 gc_time = getrusage_time();\
00159 objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
00160 }\
00161 } while(0)
00162
00163 #define GC_PROF_TIMER_STOP do {\
00164 if (objspace->profile.run) {\
00165 gc_time = getrusage_time() - gc_time;\
00166 if (gc_time < 0) gc_time = 0;\
00167 objspace->profile.record[count].gc_time = gc_time;\
00168 objspace->profile.count++;\
00169 }\
00170 } while(0)
00171
00172 #if GC_PROFILE_MORE_DETAIL
00173 #define INIT_GC_PROF_PARAMS double gc_time = 0, mark_time = 0, sweep_time = 0;\
00174 size_t count = objspace->profile.count
00175
00176 #define GC_PROF_MARK_TIMER_START do {\
00177 if (objspace->profile.run) {\
00178 mark_time = getrusage_time();\
00179 }\
00180 } while(0)
00181
00182 #define GC_PROF_MARK_TIMER_STOP do {\
00183 if (objspace->profile.run) {\
00184 mark_time = getrusage_time() - mark_time;\
00185 if (mark_time < 0) mark_time = 0;\
00186 objspace->profile.record[count].gc_mark_time = mark_time;\
00187 }\
00188 } while(0)
00189
00190 #define GC_PROF_SWEEP_TIMER_START do {\
00191 if (objspace->profile.run) {\
00192 sweep_time = getrusage_time();\
00193 }\
00194 } while(0)
00195
00196 #define GC_PROF_SWEEP_TIMER_STOP do {\
00197 if (objspace->profile.run) {\
00198 sweep_time = getrusage_time() - sweep_time;\
00199 if (sweep_time < 0) sweep_time = 0;\
00200 objspace->profile.record[count].gc_sweep_time = sweep_time;\
00201 }\
00202 } while(0)
00203 #define GC_PROF_SET_MALLOC_INFO do {\
00204 if (objspace->profile.run) {\
00205 gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00206 record->allocate_increase = malloc_increase;\
00207 record->allocate_limit = malloc_limit; \
00208 }\
00209 } while(0)
00210 #define GC_PROF_SET_HEAP_INFO do {\
00211 if (objspace->profile.run) {\
00212 gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00213 record->heap_use_slots = heaps_used;\
00214 record->heap_live_objects = live;\
00215 record->heap_free_objects = freed; \
00216 record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
00217 record->have_finalize = final_list ? Qtrue : Qfalse;\
00218 record->heap_use_size = live * sizeof(RVALUE); \
00219 record->heap_total_size = heaps_used * (HEAP_OBJ_LIMIT * sizeof(RVALUE));\
00220 }\
00221 } while(0)
00222 #else
00223 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
00224 size_t count = objspace->profile.count
00225 #define GC_PROF_MARK_TIMER_START
00226 #define GC_PROF_MARK_TIMER_STOP
00227 #define GC_PROF_SWEEP_TIMER_START
00228 #define GC_PROF_SWEEP_TIMER_STOP
00229 #define GC_PROF_SET_MALLOC_INFO
00230 #define GC_PROF_SET_HEAP_INFO do {\
00231 if (objspace->profile.run) {\
00232 gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
00233 record->heap_total_objects = heaps_used * HEAP_OBJ_LIMIT;\
00234 record->heap_use_size = live * sizeof(RVALUE); \
00235 record->heap_total_size = heaps_used * HEAP_SIZE;\
00236 }\
00237 } while(0)
00238 #endif
00239
00240
00241 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00242 #pragma pack(push, 1)
00243 #endif
00244
00245 typedef struct RVALUE {
00246 union {
00247 struct {
00248 VALUE flags;
00249 struct RVALUE *next;
00250 } free;
00251 struct RBasic basic;
00252 struct RObject object;
00253 struct RClass klass;
00254 struct RFloat flonum;
00255 struct RString string;
00256 struct RArray array;
00257 struct RRegexp regexp;
00258 struct RHash hash;
00259 struct RData data;
00260 struct RTypedData typeddata;
00261 struct RStruct rstruct;
00262 struct RBignum bignum;
00263 struct RFile file;
00264 struct RNode node;
00265 struct RMatch match;
00266 struct RRational rational;
00267 struct RComplex complex;
00268 } as;
00269 #ifdef GC_DEBUG
00270 const char *file;
00271 int line;
00272 #endif
00273 } RVALUE;
00274
00275 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00276 #pragma pack(pop)
00277 #endif
00278
00279 struct heaps_slot {
00280 void *membase;
00281 RVALUE *slot;
00282 size_t limit;
00283 int finalize_flag;
00284 };
00285
00286 #define HEAP_MIN_SLOTS 10000
00287 #define FREE_MIN 4096
00288
00289 struct gc_list {
00290 VALUE *varptr;
00291 struct gc_list *next;
00292 };
00293
00294 #define CALC_EXACT_MALLOC_SIZE 0
00295
00296 typedef struct rb_objspace {
00297 struct {
00298 size_t limit;
00299 size_t increase;
00300 #if CALC_EXACT_MALLOC_SIZE
00301 size_t allocated_size;
00302 size_t allocations;
00303 #endif
00304 } malloc_params;
00305 struct {
00306 size_t increment;
00307 struct heaps_slot *ptr;
00308 size_t length;
00309 size_t used;
00310 RVALUE *freelist;
00311 RVALUE *range[2];
00312 RVALUE *freed;
00313 } heap;
00314 struct {
00315 int dont_gc;
00316 int during_gc;
00317 } flags;
00318 struct {
00319 st_table *table;
00320 RVALUE *deferred;
00321 } final;
00322 struct {
00323 VALUE buffer[MARK_STACK_MAX];
00324 VALUE *ptr;
00325 int overflow;
00326 } markstack;
00327 struct {
00328 int run;
00329 gc_profile_record *record;
00330 size_t count;
00331 size_t size;
00332 double invoke_time;
00333 } profile;
00334 struct gc_list *global_list;
00335 unsigned int count;
00336 int gc_stress;
00337 } rb_objspace_t;
00338
00339 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00340 #define rb_objspace (*GET_VM()->objspace)
00341 static int ruby_initial_gc_stress = 0;
00342 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
00343 #else
00344 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
00345 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
00346 #endif
00347 #define malloc_limit objspace->malloc_params.limit
00348 #define malloc_increase objspace->malloc_params.increase
00349 #define heap_slots objspace->heap.slots
00350 #define heaps objspace->heap.ptr
00351 #define heaps_length objspace->heap.length
00352 #define heaps_used objspace->heap.used
00353 #define freelist objspace->heap.freelist
00354 #define lomem objspace->heap.range[0]
00355 #define himem objspace->heap.range[1]
00356 #define heaps_inc objspace->heap.increment
00357 #define heaps_freed objspace->heap.freed
00358 #define dont_gc objspace->flags.dont_gc
00359 #define during_gc objspace->flags.during_gc
00360 #define finalizer_table objspace->final.table
00361 #define deferred_final_list objspace->final.deferred
00362 #define mark_stack objspace->markstack.buffer
00363 #define mark_stack_ptr objspace->markstack.ptr
00364 #define mark_stack_overflow objspace->markstack.overflow
00365 #define global_List objspace->global_list
00366 #define ruby_gc_stress objspace->gc_stress
00367
00368 #define need_call_final (finalizer_table && finalizer_table->num_entries)
00369
00370 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
00371
00372 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00373 rb_objspace_t *
00374 rb_objspace_alloc(void)
00375 {
00376 rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
00377 memset(objspace, 0, sizeof(*objspace));
00378 malloc_limit = GC_MALLOC_LIMIT;
00379 ruby_gc_stress = ruby_initial_gc_stress;
00380
00381 return objspace;
00382 }
00383
00384 void
00385 rb_objspace_free(rb_objspace_t *objspace)
00386 {
00387 rb_objspace_call_finalizer(objspace);
00388 if (objspace->profile.record) {
00389 free(objspace->profile.record);
00390 objspace->profile.record = 0;
00391 }
00392 if (global_List) {
00393 struct gc_list *list, *next;
00394 for (list = global_List; list; list = next) {
00395 next = list->next;
00396 free(list);
00397 }
00398 }
00399 if (heaps) {
00400 size_t i;
00401 for (i = 0; i < heaps_used; ++i) {
00402 free(heaps[i].membase);
00403 }
00404 free(heaps);
00405 heaps_used = 0;
00406 heaps = 0;
00407 }
00408 free(objspace);
00409 }
00410 #endif
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420 #define HEAP_SIZE 0x4000
00421
00422
00423
00424
00425
00426
00427
00428 #define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE))
00429
00430 extern VALUE rb_cMutex;
00431 extern st_table *rb_class_tbl;
00432
00433 int ruby_disable_gc_stress = 0;
00434
00435 static void run_final(rb_objspace_t *objspace, VALUE obj);
00436 static int garbage_collect(rb_objspace_t *objspace);
00437
00438 void
00439 rb_global_variable(VALUE *var)
00440 {
00441 rb_gc_register_address(var);
00442 }
00443
00444 static void *
00445 ruby_memerror_body(void *dummy)
00446 {
00447 rb_memerror();
00448 return 0;
00449 }
00450
00451 static void
00452 ruby_memerror(void)
00453 {
00454 if (ruby_thread_has_gvl_p()) {
00455 rb_memerror();
00456 }
00457 else {
00458 if (ruby_native_thread_p()) {
00459 rb_thread_call_with_gvl(ruby_memerror_body, 0);
00460 }
00461 else {
00462
00463 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00464 exit(EXIT_FAILURE);
00465 }
00466 }
00467 }
00468
00469 void
00470 rb_memerror(void)
00471 {
00472 rb_thread_t *th = GET_THREAD();
00473 if (!nomem_error ||
00474 (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
00475 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00476 exit(EXIT_FAILURE);
00477 }
00478 if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
00479 rb_thread_raised_clear(th);
00480 GET_THREAD()->errinfo = nomem_error;
00481 JUMP_TAG(TAG_RAISE);
00482 }
00483 rb_thread_raised_set(th, RAISED_NOMEMORY);
00484 rb_exc_raise(nomem_error);
00485 }
00486
00487
00488
00489
00490
00491
00492
00493
00494 static VALUE
00495 gc_stress_get(VALUE self)
00496 {
00497 rb_objspace_t *objspace = &rb_objspace;
00498 return ruby_gc_stress ? Qtrue : Qfalse;
00499 }
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513 static VALUE
00514 gc_stress_set(VALUE self, VALUE flag)
00515 {
00516 rb_objspace_t *objspace = &rb_objspace;
00517 rb_secure(2);
00518 ruby_gc_stress = RTEST(flag);
00519 return flag;
00520 }
00521
00522
00523
00524
00525
00526
00527
00528
00529 static VALUE
00530 gc_profile_enable_get(VALUE self)
00531 {
00532 rb_objspace_t *objspace = &rb_objspace;
00533 return objspace->profile.run;
00534 }
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545 static VALUE
00546 gc_profile_enable(void)
00547 {
00548 rb_objspace_t *objspace = &rb_objspace;
00549
00550 objspace->profile.run = TRUE;
00551 return Qnil;
00552 }
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563 static VALUE
00564 gc_profile_disable(void)
00565 {
00566 rb_objspace_t *objspace = &rb_objspace;
00567
00568 objspace->profile.run = FALSE;
00569 return Qnil;
00570 }
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580 static VALUE
00581 gc_profile_clear(void)
00582 {
00583 rb_objspace_t *objspace = &rb_objspace;
00584 MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
00585 objspace->profile.count = 0;
00586 return Qnil;
00587 }
00588
00589 static void *
00590 negative_size_allocation_error_with_gvl(void *ptr)
00591 {
00592 rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
00593 return 0;
00594 }
00595
00596 static void
00597 negative_size_allocation_error(const char *msg)
00598 {
00599 if (ruby_thread_has_gvl_p()) {
00600 rb_raise(rb_eNoMemError, "%s", msg);
00601 }
00602 else {
00603 if (ruby_native_thread_p()) {
00604 rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
00605 }
00606 else {
00607 fprintf(stderr, "[FATAL] %s\n", msg);
00608 exit(EXIT_FAILURE);
00609 }
00610 }
00611 }
00612
00613 static void *
00614 gc_with_gvl(void *ptr)
00615 {
00616 return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
00617 }
00618
00619 static int
00620 garbage_collect_with_gvl(rb_objspace_t *objspace)
00621 {
00622 if (dont_gc) return TRUE;
00623 if (ruby_thread_has_gvl_p()) {
00624 return garbage_collect(objspace);
00625 }
00626 else {
00627 if (ruby_native_thread_p()) {
00628 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
00629 }
00630 else {
00631
00632 fprintf(stderr, "[FATAL] failed to allocate memory\n");
00633 exit(EXIT_FAILURE);
00634 }
00635 }
00636 }
00637
00638 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
00639
00640 static void *
00641 vm_xmalloc(rb_objspace_t *objspace, size_t size)
00642 {
00643 void *mem;
00644
00645 if ((ssize_t)size < 0) {
00646 negative_size_allocation_error("negative allocation size (or too big)");
00647 }
00648 if (size == 0) size = 1;
00649
00650 #if CALC_EXACT_MALLOC_SIZE
00651 size += sizeof(size_t);
00652 #endif
00653
00654 if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
00655 (malloc_increase+size) > malloc_limit) {
00656 garbage_collect_with_gvl(objspace);
00657 }
00658 mem = malloc(size);
00659 if (!mem) {
00660 if (garbage_collect_with_gvl(objspace)) {
00661 mem = malloc(size);
00662 }
00663 if (!mem) {
00664 ruby_memerror();
00665 }
00666 }
00667 malloc_increase += size;
00668
00669 #if CALC_EXACT_MALLOC_SIZE
00670 objspace->malloc_params.allocated_size += size;
00671 objspace->malloc_params.allocations++;
00672 ((size_t *)mem)[0] = size;
00673 mem = (size_t *)mem + 1;
00674 #endif
00675
00676 return mem;
00677 }
00678
00679 static void *
00680 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
00681 {
00682 void *mem;
00683
00684 if ((ssize_t)size < 0) {
00685 negative_size_allocation_error("negative re-allocation size");
00686 }
00687 if (!ptr) return vm_xmalloc(objspace, size);
00688 if (size == 0) {
00689 vm_xfree(objspace, ptr);
00690 return 0;
00691 }
00692 if (ruby_gc_stress && !ruby_disable_gc_stress)
00693 garbage_collect_with_gvl(objspace);
00694
00695 #if CALC_EXACT_MALLOC_SIZE
00696 size += sizeof(size_t);
00697 objspace->malloc_params.allocated_size -= size;
00698 ptr = (size_t *)ptr - 1;
00699 #endif
00700
00701 mem = realloc(ptr, size);
00702 if (!mem) {
00703 if (garbage_collect_with_gvl(objspace)) {
00704 mem = realloc(ptr, size);
00705 }
00706 if (!mem) {
00707 ruby_memerror();
00708 }
00709 }
00710 malloc_increase += size;
00711
00712 #if CALC_EXACT_MALLOC_SIZE
00713 objspace->malloc_params.allocated_size += size;
00714 ((size_t *)mem)[0] = size;
00715 mem = (size_t *)mem + 1;
00716 #endif
00717
00718 return mem;
00719 }
00720
00721 static void
00722 vm_xfree(rb_objspace_t *objspace, void *ptr)
00723 {
00724 #if CALC_EXACT_MALLOC_SIZE
00725 size_t size;
00726 ptr = ((size_t *)ptr) - 1;
00727 size = ((size_t*)ptr)[0];
00728 objspace->malloc_params.allocated_size -= size;
00729 objspace->malloc_params.allocations--;
00730 #endif
00731
00732 free(ptr);
00733 }
00734
00735 void *
00736 ruby_xmalloc(size_t size)
00737 {
00738 return vm_xmalloc(&rb_objspace, size);
00739 }
00740
00741 void *
00742 ruby_xmalloc2(size_t n, size_t size)
00743 {
00744 size_t len = size * n;
00745 if (n != 0 && size != len / n) {
00746 rb_raise(rb_eArgError, "malloc: possible integer overflow");
00747 }
00748 return vm_xmalloc(&rb_objspace, len);
00749 }
00750
00751 void *
00752 ruby_xcalloc(size_t n, size_t size)
00753 {
00754 void *mem = ruby_xmalloc2(n, size);
00755 memset(mem, 0, n * size);
00756
00757 return mem;
00758 }
00759
00760 void *
00761 ruby_xrealloc(void *ptr, size_t size)
00762 {
00763 return vm_xrealloc(&rb_objspace, ptr, size);
00764 }
00765
00766 void *
00767 ruby_xrealloc2(void *ptr, size_t n, size_t size)
00768 {
00769 size_t len = size * n;
00770 if (n != 0 && size != len / n) {
00771 rb_raise(rb_eArgError, "realloc: possible integer overflow");
00772 }
00773 return ruby_xrealloc(ptr, len);
00774 }
00775
00776 void
00777 ruby_xfree(void *x)
00778 {
00779 if (x)
00780 vm_xfree(&rb_objspace, x);
00781 }
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791
00792
00793
00794
00795
00796
00797 VALUE
00798 rb_gc_enable(void)
00799 {
00800 rb_objspace_t *objspace = &rb_objspace;
00801 int old = dont_gc;
00802
00803 dont_gc = FALSE;
00804 return old ? Qtrue : Qfalse;
00805 }
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819 VALUE
00820 rb_gc_disable(void)
00821 {
00822 rb_objspace_t *objspace = &rb_objspace;
00823 int old = dont_gc;
00824
00825 dont_gc = TRUE;
00826 return old ? Qtrue : Qfalse;
00827 }
00828
00829 VALUE rb_mGC;
00830
00831 void
00832 rb_gc_register_mark_object(VALUE obj)
00833 {
00834 VALUE ary = GET_THREAD()->vm->mark_object_ary;
00835 rb_ary_push(ary, obj);
00836 }
00837
00838 void
00839 rb_gc_register_address(VALUE *addr)
00840 {
00841 rb_objspace_t *objspace = &rb_objspace;
00842 struct gc_list *tmp;
00843
00844 tmp = ALLOC(struct gc_list);
00845 tmp->next = global_List;
00846 tmp->varptr = addr;
00847 global_List = tmp;
00848 }
00849
00850 void
00851 rb_gc_unregister_address(VALUE *addr)
00852 {
00853 rb_objspace_t *objspace = &rb_objspace;
00854 struct gc_list *tmp = global_List;
00855
00856 if (tmp->varptr == addr) {
00857 global_List = tmp->next;
00858 xfree(tmp);
00859 return;
00860 }
00861 while (tmp->next) {
00862 if (tmp->next->varptr == addr) {
00863 struct gc_list *t = tmp->next;
00864
00865 tmp->next = tmp->next->next;
00866 xfree(t);
00867 break;
00868 }
00869 tmp = tmp->next;
00870 }
00871 }
00872
00873
00874 static void
00875 allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
00876 {
00877 struct heaps_slot *p;
00878 size_t size;
00879
00880 size = next_heaps_length*sizeof(struct heaps_slot);
00881
00882 if (heaps_used > 0) {
00883 p = (struct heaps_slot *)realloc(heaps, size);
00884 if (p) heaps = p;
00885 }
00886 else {
00887 p = heaps = (struct heaps_slot *)malloc(size);
00888 }
00889
00890 if (p == 0) {
00891 during_gc = 0;
00892 rb_memerror();
00893 }
00894 heaps_length = next_heaps_length;
00895 }
00896
00897 static void
00898 assign_heap_slot(rb_objspace_t *objspace)
00899 {
00900 RVALUE *p, *pend, *membase;
00901 size_t hi, lo, mid;
00902 size_t objs;
00903
00904 objs = HEAP_OBJ_LIMIT;
00905 p = (RVALUE*)malloc(HEAP_SIZE);
00906
00907 if (p == 0) {
00908 during_gc = 0;
00909 rb_memerror();
00910 }
00911
00912 membase = p;
00913 if ((VALUE)p % sizeof(RVALUE) != 0) {
00914 p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
00915 if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
00916 objs--;
00917 }
00918 }
00919
00920 lo = 0;
00921 hi = heaps_used;
00922 while (lo < hi) {
00923 register RVALUE *mid_membase;
00924 mid = (lo + hi) / 2;
00925 mid_membase = heaps[mid].membase;
00926 if (mid_membase < membase) {
00927 lo = mid + 1;
00928 }
00929 else if (mid_membase > membase) {
00930 hi = mid;
00931 }
00932 else {
00933 rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
00934 }
00935 }
00936 if (hi < heaps_used) {
00937 MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
00938 }
00939 heaps[hi].membase = membase;
00940 heaps[hi].slot = p;
00941 heaps[hi].limit = objs;
00942 heaps[hi].finalize_flag = FALSE;
00943 pend = p + objs;
00944 if (lomem == 0 || lomem > p) lomem = p;
00945 if (himem < pend) himem = pend;
00946 heaps_used++;
00947
00948 while (p < pend) {
00949 p->as.free.flags = 0;
00950 p->as.free.next = freelist;
00951 freelist = p;
00952 p++;
00953 }
00954 }
00955
00956 static void
00957 init_heap(rb_objspace_t *objspace)
00958 {
00959 size_t add, i;
00960
00961 add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
00962
00963 if (!add) {
00964 add = 1;
00965 }
00966
00967 if ((heaps_used + add) > heaps_length) {
00968 allocate_heaps(objspace, heaps_used + add);
00969 }
00970
00971 for (i = 0; i < add; i++) {
00972 assign_heap_slot(objspace);
00973 }
00974 heaps_inc = 0;
00975 objspace->profile.invoke_time = getrusage_time();
00976 }
00977
00978
00979 static void
00980 set_heaps_increment(rb_objspace_t *objspace)
00981 {
00982 size_t next_heaps_length = (size_t)(heaps_used * 1.8);
00983
00984 if (next_heaps_length == heaps_used) {
00985 next_heaps_length++;
00986 }
00987
00988 heaps_inc = next_heaps_length - heaps_used;
00989
00990 if (next_heaps_length > heaps_length) {
00991 allocate_heaps(objspace, next_heaps_length);
00992 }
00993 }
00994
00995 static int
00996 heaps_increment(rb_objspace_t *objspace)
00997 {
00998 if (heaps_inc > 0) {
00999 assign_heap_slot(objspace);
01000 heaps_inc--;
01001 return TRUE;
01002 }
01003 return FALSE;
01004 }
01005
01006 #define RANY(o) ((RVALUE*)(o))
01007
01008 static VALUE
01009 rb_newobj_from_heap(rb_objspace_t *objspace)
01010 {
01011 VALUE obj;
01012
01013 if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
01014 if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
01015 during_gc = 0;
01016 rb_memerror();
01017 }
01018 }
01019
01020 obj = (VALUE)freelist;
01021 freelist = freelist->as.free.next;
01022
01023 MEMZERO((void*)obj, RVALUE, 1);
01024 #ifdef GC_DEBUG
01025 RANY(obj)->file = rb_sourcefile();
01026 RANY(obj)->line = rb_sourceline();
01027 #endif
01028
01029 return obj;
01030 }
01031
01032 #if USE_VALUE_CACHE
01033 static VALUE
01034 rb_fill_value_cache(rb_thread_t *th)
01035 {
01036 rb_objspace_t *objspace = &rb_objspace;
01037 int i;
01038 VALUE rv;
01039
01040
01041 for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
01042 VALUE v = rb_newobj_from_heap(objspace);
01043
01044 th->value_cache[i] = v;
01045 RBASIC(v)->flags = FL_MARK;
01046 }
01047 th->value_cache_ptr = &th->value_cache[0];
01048 rv = rb_newobj_from_heap(objspace);
01049
01050 return rv;
01051 }
01052 #endif
01053
01054 int
01055 rb_during_gc(void)
01056 {
01057 rb_objspace_t *objspace = &rb_objspace;
01058 return during_gc;
01059 }
01060
01061 VALUE
01062 rb_newobj(void)
01063 {
01064 #if USE_VALUE_CACHE || (defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE)
01065 rb_thread_t *th = GET_THREAD();
01066 #endif
01067 #if USE_VALUE_CACHE
01068 VALUE v = *th->value_cache_ptr;
01069 #endif
01070 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01071 rb_objspace_t *objspace = th->vm->objspace;
01072 #else
01073 rb_objspace_t *objspace = &rb_objspace;
01074 #endif
01075
01076 if (during_gc) {
01077 dont_gc = 1;
01078 during_gc = 0;
01079 rb_bug("object allocation during garbage collection phase");
01080 }
01081
01082 #if USE_VALUE_CACHE
01083 if (v) {
01084 RBASIC(v)->flags = 0;
01085 th->value_cache_ptr++;
01086 }
01087 else {
01088 v = rb_fill_value_cache(th);
01089 }
01090
01091 #if defined(GC_DEBUG)
01092 printf("cache index: %d, v: %p, th: %p\n",
01093 th->value_cache_ptr - th->value_cache, v, th);
01094 #endif
01095 return v;
01096 #else
01097 return rb_newobj_from_heap(objspace);
01098 #endif
01099 }
01100
01101 NODE*
01102 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
01103 {
01104 NODE *n = (NODE*)rb_newobj();
01105
01106 n->flags |= T_NODE;
01107 nd_set_type(n, type);
01108
01109 n->u1.value = a0;
01110 n->u2.value = a1;
01111 n->u3.value = a2;
01112
01113 return n;
01114 }
01115
01116 VALUE
01117 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
01118 {
01119 NEWOBJ(data, struct RData);
01120 if (klass) Check_Type(klass, T_CLASS);
01121 OBJSETUP(data, klass, T_DATA);
01122 data->data = datap;
01123 data->dfree = dfree;
01124 data->dmark = dmark;
01125
01126 return (VALUE)data;
01127 }
01128
01129 VALUE
01130 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
01131 {
01132 NEWOBJ(data, struct RTypedData);
01133
01134 if (klass) Check_Type(klass, T_CLASS);
01135
01136 OBJSETUP(data, klass, T_DATA);
01137
01138 data->data = datap;
01139 data->typed_flag = 1;
01140 data->type = type;
01141
01142 return (VALUE)data;
01143 }
01144
01145 size_t
01146 rb_objspace_data_type_memsize(VALUE obj)
01147 {
01148 if (RTYPEDDATA_P(obj)) {
01149 return RTYPEDDATA_TYPE(obj)->dsize(RTYPEDDATA_DATA(obj));
01150 }
01151 else {
01152 return 0;
01153 }
01154 }
01155
01156 const char *
01157 rb_objspace_data_type_name(VALUE obj)
01158 {
01159 if (RTYPEDDATA_P(obj)) {
01160 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
01161 }
01162 else {
01163 return 0;
01164 }
01165 }
01166
01167 #ifdef __ia64
01168 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
01169 #else
01170 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
01171 #endif
01172
01173 #define STACK_START (th->machine_stack_start)
01174 #define STACK_END (th->machine_stack_end)
01175 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
01176
01177 #if STACK_GROW_DIRECTION < 0
01178 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
01179 #elif STACK_GROW_DIRECTION > 0
01180 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
01181 #else
01182 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
01183 : (size_t)(STACK_END - STACK_START + 1))
01184 #endif
01185 #if !STACK_GROW_DIRECTION
01186 int ruby_stack_grow_direction;
01187 int
01188 ruby_get_stack_grow_direction(volatile VALUE *addr)
01189 {
01190 VALUE *end;
01191 SET_MACHINE_STACK_END(&end);
01192
01193 if (end > addr) return ruby_stack_grow_direction = 1;
01194 return ruby_stack_grow_direction = -1;
01195 }
01196 #endif
01197
01198 #define GC_WATER_MARK 512
01199
01200 size_t
01201 ruby_stack_length(VALUE **p)
01202 {
01203 rb_thread_t *th = GET_THREAD();
01204 SET_STACK_END;
01205 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
01206 return STACK_LENGTH;
01207 }
01208
01209 static int
01210 stack_check(void)
01211 {
01212 int ret;
01213 rb_thread_t *th = GET_THREAD();
01214 SET_STACK_END;
01215 ret = STACK_LENGTH > STACK_LEVEL_MAX - GC_WATER_MARK;
01216 #ifdef __ia64
01217 if (!ret) {
01218 ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
01219 th->machine_register_stack_maxsize/sizeof(VALUE) - GC_WATER_MARK;
01220 }
01221 #endif
01222 return ret;
01223 }
01224
01225 int
01226 ruby_stack_check(void)
01227 {
01228 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
01229 return 0;
01230 #else
01231 return stack_check();
01232 #endif
01233 }
01234
01235 static void
01236 init_mark_stack(rb_objspace_t *objspace)
01237 {
01238 mark_stack_overflow = 0;
01239 mark_stack_ptr = mark_stack;
01240 }
01241
01242 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
01243
01244 static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
01245 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
01246
01247 static void
01248 gc_mark_all(rb_objspace_t *objspace)
01249 {
01250 RVALUE *p, *pend;
01251 size_t i;
01252
01253 init_mark_stack(objspace);
01254 for (i = 0; i < heaps_used; i++) {
01255 p = heaps[i].slot; pend = p + heaps[i].limit;
01256 while (p < pend) {
01257 if ((p->as.basic.flags & FL_MARK) &&
01258 (p->as.basic.flags != FL_MARK)) {
01259 gc_mark_children(objspace, (VALUE)p, 0);
01260 }
01261 p++;
01262 }
01263 }
01264 }
01265
01266 static void
01267 gc_mark_rest(rb_objspace_t *objspace)
01268 {
01269 VALUE tmp_arry[MARK_STACK_MAX];
01270 VALUE *p;
01271
01272 p = (mark_stack_ptr - mark_stack) + tmp_arry;
01273 MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
01274
01275 init_mark_stack(objspace);
01276 while (p != tmp_arry) {
01277 p--;
01278 gc_mark_children(objspace, *p, 0);
01279 }
01280 }
01281
01282 static inline int
01283 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
01284 {
01285 register RVALUE *p = RANY(ptr);
01286 register struct heaps_slot *heap;
01287 register size_t hi, lo, mid;
01288
01289 if (p < lomem || p > himem) return FALSE;
01290 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
01291
01292
01293 lo = 0;
01294 hi = heaps_used;
01295 while (lo < hi) {
01296 mid = (lo + hi) / 2;
01297 heap = &heaps[mid];
01298 if (heap->slot <= p) {
01299 if (p < heap->slot + heap->limit)
01300 return TRUE;
01301 lo = mid + 1;
01302 }
01303 else {
01304 hi = mid;
01305 }
01306 }
01307 return FALSE;
01308 }
01309
01310 static void
01311 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
01312 {
01313 VALUE v;
01314 while (n--) {
01315 v = *x;
01316 VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
01317 if (is_pointer_to_heap(objspace, (void *)v)) {
01318 gc_mark(objspace, v, 0);
01319 }
01320 x++;
01321 }
01322 }
01323
01324 static void
01325 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
01326 {
01327 long n;
01328
01329 if (end <= start) return;
01330 n = end - start;
01331 mark_locations_array(objspace, start, n);
01332 }
01333
01334 void
01335 rb_gc_mark_locations(VALUE *start, VALUE *end)
01336 {
01337 gc_mark_locations(&rb_objspace, start, end);
01338 }
01339
01340 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
01341
01342 struct mark_tbl_arg {
01343 rb_objspace_t *objspace;
01344 int lev;
01345 };
01346
01347 static int
01348 mark_entry(ID key, VALUE value, st_data_t data)
01349 {
01350 struct mark_tbl_arg *arg = (void*)data;
01351 gc_mark(arg->objspace, value, arg->lev);
01352 return ST_CONTINUE;
01353 }
01354
01355 static void
01356 mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
01357 {
01358 struct mark_tbl_arg arg;
01359 if (!tbl) return;
01360 arg.objspace = objspace;
01361 arg.lev = lev;
01362 st_foreach(tbl, mark_entry, (st_data_t)&arg);
01363 }
01364
01365 static int
01366 mark_key(VALUE key, VALUE value, st_data_t data)
01367 {
01368 struct mark_tbl_arg *arg = (void*)data;
01369 gc_mark(arg->objspace, key, arg->lev);
01370 return ST_CONTINUE;
01371 }
01372
01373 static void
01374 mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
01375 {
01376 struct mark_tbl_arg arg;
01377 if (!tbl) return;
01378 arg.objspace = objspace;
01379 arg.lev = lev;
01380 st_foreach(tbl, mark_key, (st_data_t)&arg);
01381 }
01382
01383 void
01384 rb_mark_set(st_table *tbl)
01385 {
01386 mark_set(&rb_objspace, tbl, 0);
01387 }
01388
01389 static int
01390 mark_keyvalue(VALUE key, VALUE value, st_data_t data)
01391 {
01392 struct mark_tbl_arg *arg = (void*)data;
01393 gc_mark(arg->objspace, key, arg->lev);
01394 gc_mark(arg->objspace, value, arg->lev);
01395 return ST_CONTINUE;
01396 }
01397
01398 static void
01399 mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
01400 {
01401 struct mark_tbl_arg arg;
01402 if (!tbl) return;
01403 arg.objspace = objspace;
01404 arg.lev = lev;
01405 st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
01406 }
01407
01408 void
01409 rb_mark_hash(st_table *tbl)
01410 {
01411 mark_hash(&rb_objspace, tbl, 0);
01412 }
01413
01414 static void
01415 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me, int lev)
01416 {
01417 const rb_method_definition_t *def = me->def;
01418
01419 gc_mark(objspace, me->klass, lev);
01420 if (!def) return;
01421 switch (def->type) {
01422 case VM_METHOD_TYPE_ISEQ:
01423 gc_mark(objspace, def->body.iseq->self, lev);
01424 break;
01425 case VM_METHOD_TYPE_BMETHOD:
01426 gc_mark(objspace, def->body.proc, lev);
01427 break;
01428 case VM_METHOD_TYPE_ATTRSET:
01429 case VM_METHOD_TYPE_IVAR:
01430 gc_mark(objspace, def->body.attr.location, lev);
01431 break;
01432 default:
01433 break;
01434 }
01435 }
01436
01437 void
01438 rb_mark_method_entry(const rb_method_entry_t *me)
01439 {
01440 mark_method_entry(&rb_objspace, me, 0);
01441 }
01442
01443 static int
01444 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
01445 {
01446 struct mark_tbl_arg *arg = (void*)data;
01447 mark_method_entry(arg->objspace, me, arg->lev);
01448 return ST_CONTINUE;
01449 }
01450
01451 static void
01452 mark_m_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
01453 {
01454 struct mark_tbl_arg arg;
01455 if (!tbl) return;
01456 arg.objspace = objspace;
01457 arg.lev = lev;
01458 st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
01459 }
01460
01461 static int
01462 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
01463 {
01464 rb_free_method_entry(me);
01465 return ST_CONTINUE;
01466 }
01467
01468 void
01469 rb_free_m_table(st_table *tbl)
01470 {
01471 st_foreach(tbl, free_method_entry_i, 0);
01472 st_free_table(tbl);
01473 }
01474
01475 void
01476 rb_mark_tbl(st_table *tbl)
01477 {
01478 mark_tbl(&rb_objspace, tbl, 0);
01479 }
01480
01481 void
01482 rb_gc_mark_maybe(VALUE obj)
01483 {
01484 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
01485 gc_mark(&rb_objspace, obj, 0);
01486 }
01487 }
01488
01489 #define GC_LEVEL_MAX 250
01490
01491 static void
01492 gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
01493 {
01494 register RVALUE *obj;
01495
01496 obj = RANY(ptr);
01497 if (rb_special_const_p(ptr)) return;
01498 if (obj->as.basic.flags == 0) return;
01499 if (obj->as.basic.flags & FL_MARK) return;
01500 obj->as.basic.flags |= FL_MARK;
01501
01502 if (lev > GC_LEVEL_MAX || (lev == 0 && stack_check())) {
01503 if (!mark_stack_overflow) {
01504 if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
01505 *mark_stack_ptr = ptr;
01506 mark_stack_ptr++;
01507 }
01508 else {
01509 mark_stack_overflow = 1;
01510 }
01511 }
01512 return;
01513 }
01514 gc_mark_children(objspace, ptr, lev+1);
01515 }
01516
01517 void
01518 rb_gc_mark(VALUE ptr)
01519 {
01520 gc_mark(&rb_objspace, ptr, 0);
01521 }
01522
01523 static void
01524 gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
01525 {
01526 register RVALUE *obj = RANY(ptr);
01527
01528 goto marking;
01529
01530 again:
01531 obj = RANY(ptr);
01532 if (rb_special_const_p(ptr)) return;
01533 if (obj->as.basic.flags == 0) return;
01534 if (obj->as.basic.flags & FL_MARK) return;
01535 obj->as.basic.flags |= FL_MARK;
01536
01537 marking:
01538 if (FL_TEST(obj, FL_EXIVAR)) {
01539 rb_mark_generic_ivar(ptr);
01540 }
01541
01542 switch (BUILTIN_TYPE(obj)) {
01543 case T_NIL:
01544 case T_FIXNUM:
01545 rb_bug("rb_gc_mark() called for broken object");
01546 break;
01547
01548 case T_NODE:
01549 switch (nd_type(obj)) {
01550 case NODE_IF:
01551 case NODE_FOR:
01552 case NODE_ITER:
01553 case NODE_WHEN:
01554 case NODE_MASGN:
01555 case NODE_RESCUE:
01556 case NODE_RESBODY:
01557 case NODE_CLASS:
01558 case NODE_BLOCK_PASS:
01559 gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
01560
01561 case NODE_BLOCK:
01562 case NODE_OPTBLOCK:
01563 case NODE_ARRAY:
01564 case NODE_DSTR:
01565 case NODE_DXSTR:
01566 case NODE_DREGX:
01567 case NODE_DREGX_ONCE:
01568 case NODE_ENSURE:
01569 case NODE_CALL:
01570 case NODE_DEFS:
01571 case NODE_OP_ASGN1:
01572 case NODE_ARGS:
01573 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01574
01575 case NODE_SUPER:
01576 case NODE_FCALL:
01577 case NODE_DEFN:
01578 case NODE_ARGS_AUX:
01579 ptr = (VALUE)obj->as.node.u3.node;
01580 goto again;
01581
01582 case NODE_WHILE:
01583 case NODE_UNTIL:
01584 case NODE_AND:
01585 case NODE_OR:
01586 case NODE_CASE:
01587 case NODE_SCLASS:
01588 case NODE_DOT2:
01589 case NODE_DOT3:
01590 case NODE_FLIP2:
01591 case NODE_FLIP3:
01592 case NODE_MATCH2:
01593 case NODE_MATCH3:
01594 case NODE_OP_ASGN_OR:
01595 case NODE_OP_ASGN_AND:
01596 case NODE_MODULE:
01597 case NODE_ALIAS:
01598 case NODE_VALIAS:
01599 case NODE_ARGSCAT:
01600 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01601
01602 case NODE_GASGN:
01603 case NODE_LASGN:
01604 case NODE_DASGN:
01605 case NODE_DASGN_CURR:
01606 case NODE_IASGN:
01607 case NODE_IASGN2:
01608 case NODE_CVASGN:
01609 case NODE_COLON3:
01610 case NODE_OPT_N:
01611 case NODE_EVSTR:
01612 case NODE_UNDEF:
01613 case NODE_POSTEXE:
01614 ptr = (VALUE)obj->as.node.u2.node;
01615 goto again;
01616
01617 case NODE_HASH:
01618 case NODE_LIT:
01619 case NODE_STR:
01620 case NODE_XSTR:
01621 case NODE_DEFINED:
01622 case NODE_MATCH:
01623 case NODE_RETURN:
01624 case NODE_BREAK:
01625 case NODE_NEXT:
01626 case NODE_YIELD:
01627 case NODE_COLON2:
01628 case NODE_SPLAT:
01629 case NODE_TO_ARY:
01630 ptr = (VALUE)obj->as.node.u1.node;
01631 goto again;
01632
01633 case NODE_SCOPE:
01634 case NODE_CDECL:
01635 case NODE_OPT_ARG:
01636 gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
01637 ptr = (VALUE)obj->as.node.u2.node;
01638 goto again;
01639
01640 case NODE_ZARRAY:
01641 case NODE_ZSUPER:
01642 case NODE_VCALL:
01643 case NODE_GVAR:
01644 case NODE_LVAR:
01645 case NODE_DVAR:
01646 case NODE_IVAR:
01647 case NODE_CVAR:
01648 case NODE_NTH_REF:
01649 case NODE_BACK_REF:
01650 case NODE_REDO:
01651 case NODE_RETRY:
01652 case NODE_SELF:
01653 case NODE_NIL:
01654 case NODE_TRUE:
01655 case NODE_FALSE:
01656 case NODE_ERRINFO:
01657 case NODE_BLOCK_ARG:
01658 break;
01659 case NODE_ALLOCA:
01660 mark_locations_array(objspace,
01661 (VALUE*)obj->as.node.u1.value,
01662 obj->as.node.u3.cnt);
01663 ptr = (VALUE)obj->as.node.u2.node;
01664 goto again;
01665
01666 default:
01667 if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
01668 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
01669 }
01670 if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
01671 gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
01672 }
01673 if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
01674 gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
01675 }
01676 }
01677 return;
01678 }
01679
01680 gc_mark(objspace, obj->as.basic.klass, lev);
01681 switch (BUILTIN_TYPE(obj)) {
01682 case T_ICLASS:
01683 case T_CLASS:
01684 case T_MODULE:
01685 mark_m_tbl(objspace, RCLASS_M_TBL(obj), lev);
01686 mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
01687 ptr = RCLASS_SUPER(obj);
01688 goto again;
01689
01690 case T_ARRAY:
01691 if (FL_TEST(obj, ELTS_SHARED)) {
01692 ptr = obj->as.array.as.heap.aux.shared;
01693 goto again;
01694 }
01695 else {
01696 long i, len = RARRAY_LEN(obj);
01697 VALUE *ptr = RARRAY_PTR(obj);
01698 for (i=0; i < len; i++) {
01699 gc_mark(objspace, *ptr++, lev);
01700 }
01701 }
01702 break;
01703
01704 case T_HASH:
01705 mark_hash(objspace, obj->as.hash.ntbl, lev);
01706 ptr = obj->as.hash.ifnone;
01707 goto again;
01708
01709 case T_STRING:
01710 #define STR_ASSOC FL_USER3
01711 if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
01712 ptr = obj->as.string.as.heap.aux.shared;
01713 goto again;
01714 }
01715 break;
01716
01717 case T_DATA:
01718 if (RTYPEDDATA_P(obj)) {
01719 if (obj->as.typeddata.type->dmark) (*obj->as.typeddata.type->dmark)(DATA_PTR(obj));
01720 }
01721 else {
01722 if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
01723 }
01724 break;
01725
01726 case T_OBJECT:
01727 {
01728 long i, len = ROBJECT_NUMIV(obj);
01729 VALUE *ptr = ROBJECT_IVPTR(obj);
01730 for (i = 0; i < len; i++) {
01731 gc_mark(objspace, *ptr++, lev);
01732 }
01733 }
01734 break;
01735
01736 case T_FILE:
01737 if (obj->as.file.fptr) {
01738 gc_mark(objspace, obj->as.file.fptr->pathv, lev);
01739 gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
01740 gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat, lev);
01741 gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts, lev);
01742 gc_mark(objspace, obj->as.file.fptr->encs.ecopts, lev);
01743 gc_mark(objspace, obj->as.file.fptr->write_lock, lev);
01744 }
01745 break;
01746
01747 case T_REGEXP:
01748 gc_mark(objspace, obj->as.regexp.src, lev);
01749 break;
01750
01751 case T_FLOAT:
01752 case T_BIGNUM:
01753 case T_ZOMBIE:
01754 break;
01755
01756 case T_MATCH:
01757 gc_mark(objspace, obj->as.match.regexp, lev);
01758 if (obj->as.match.str) {
01759 ptr = obj->as.match.str;
01760 goto again;
01761 }
01762 break;
01763
01764 case T_RATIONAL:
01765 gc_mark(objspace, obj->as.rational.num, lev);
01766 gc_mark(objspace, obj->as.rational.den, lev);
01767 break;
01768
01769 case T_COMPLEX:
01770 gc_mark(objspace, obj->as.complex.real, lev);
01771 gc_mark(objspace, obj->as.complex.imag, lev);
01772 break;
01773
01774 case T_STRUCT:
01775 {
01776 long len = RSTRUCT_LEN(obj);
01777 VALUE *ptr = RSTRUCT_PTR(obj);
01778
01779 while (len--) {
01780 gc_mark(objspace, *ptr++, lev);
01781 }
01782 }
01783 break;
01784
01785 default:
01786 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
01787 BUILTIN_TYPE(obj), (void *)obj,
01788 is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
01789 }
01790 }
01791
01792 static int obj_free(rb_objspace_t *, VALUE);
01793
01794 static inline void
01795 add_freelist(rb_objspace_t *objspace, RVALUE *p)
01796 {
01797 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
01798 p->as.free.flags = 0;
01799 p->as.free.next = freelist;
01800 freelist = p;
01801 }
01802
01803 static void
01804 finalize_list(rb_objspace_t *objspace, RVALUE *p)
01805 {
01806 while (p) {
01807 RVALUE *tmp = p->as.free.next;
01808 run_final(objspace, (VALUE)p);
01809 if (!FL_TEST(p, FL_SINGLETON)) {
01810 add_freelist(objspace, p);
01811 }
01812 else {
01813 struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
01814 slot->limit--;
01815 }
01816 p = tmp;
01817 }
01818 }
01819
01820 static void
01821 free_unused_heaps(rb_objspace_t *objspace)
01822 {
01823 size_t i, j;
01824 RVALUE *last = 0;
01825
01826 for (i = j = 1; j < heaps_used; i++) {
01827 if (heaps[i].limit == 0) {
01828 if (!last) {
01829 last = heaps[i].membase;
01830 }
01831 else {
01832 free(heaps[i].membase);
01833 }
01834 heaps_used--;
01835 }
01836 else {
01837 if (i != j) {
01838 heaps[j] = heaps[i];
01839 }
01840 j++;
01841 }
01842 }
01843 if (last) {
01844 if (last < heaps_freed) {
01845 free(heaps_freed);
01846 heaps_freed = last;
01847 }
01848 else {
01849 free(last);
01850 }
01851 }
01852 }
01853
01854 static void
01855 gc_sweep(rb_objspace_t *objspace)
01856 {
01857 RVALUE *p, *pend, *final_list;
01858 size_t freed = 0;
01859 size_t i;
01860 size_t live = 0, free_min = 0, do_heap_free = 0;
01861
01862 do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
01863 free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
01864
01865 if (free_min < FREE_MIN) {
01866 do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
01867 free_min = FREE_MIN;
01868 }
01869
01870 freelist = 0;
01871 final_list = deferred_final_list;
01872 deferred_final_list = 0;
01873 for (i = 0; i < heaps_used; i++) {
01874 size_t free_num = 0, final_num = 0;
01875 RVALUE *free = freelist;
01876 RVALUE *final = final_list;
01877 int deferred;
01878
01879 if(heaps[i].finalize_flag) continue;
01880
01881 p = heaps[i].slot; pend = p + heaps[i].limit;
01882 while (p < pend) {
01883 if (!(p->as.basic.flags & FL_MARK)) {
01884 if (p->as.basic.flags &&
01885 ((deferred = obj_free(objspace, (VALUE)p)) ||
01886 ((FL_TEST(p, FL_FINALIZE)) && need_call_final))) {
01887 if (!deferred) {
01888 p->as.free.flags = T_ZOMBIE;
01889 RDATA(p)->dfree = 0;
01890 }
01891 p->as.free.flags |= FL_MARK;
01892 p->as.free.next = final_list;
01893 final_list = p;
01894 final_num++;
01895 }
01896 else {
01897 add_freelist(objspace, p);
01898 free_num++;
01899 }
01900 }
01901 else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
01902
01903
01904 }
01905 else {
01906 RBASIC(p)->flags &= ~FL_MARK;
01907 live++;
01908 }
01909 p++;
01910 }
01911 if (final_num + free_num == heaps[i].limit && freed > do_heap_free) {
01912 RVALUE *pp;
01913
01914 for (pp = final_list; pp != final; pp = pp->as.free.next) {
01915 RDATA(pp)->dmark = (void (*)())(VALUE)&heaps[i];
01916 pp->as.free.flags |= FL_SINGLETON;
01917 }
01918 heaps[i].limit = final_num;
01919 heaps[i].finalize_flag = TRUE;
01920 freelist = free;
01921 }
01922 else {
01923 freed += free_num;
01924 }
01925 }
01926 GC_PROF_SET_MALLOC_INFO;
01927 if (malloc_increase > malloc_limit) {
01928 malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)live / (live + freed));
01929 if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
01930 }
01931 malloc_increase = 0;
01932 if (freed < free_min) {
01933 set_heaps_increment(objspace);
01934 heaps_increment(objspace);
01935 }
01936 during_gc = 0;
01937
01938
01939 if (final_list) {
01940 GC_PROF_SET_HEAP_INFO;
01941 deferred_final_list = final_list;
01942 RUBY_VM_SET_FINALIZER_INTERRUPT(GET_THREAD());
01943 }
01944 else{
01945 free_unused_heaps(objspace);
01946 GC_PROF_SET_HEAP_INFO;
01947 }
01948 }
01949
01950 void
01951 rb_gc_force_recycle(VALUE p)
01952 {
01953 rb_objspace_t *objspace = &rb_objspace;
01954 add_freelist(objspace, (RVALUE *)p);
01955 }
01956
01957 static inline void
01958 make_deferred(RVALUE *p)
01959 {
01960 p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
01961 }
01962
01963 static inline void
01964 make_io_deferred(RVALUE *p)
01965 {
01966 rb_io_t *fptr = p->as.file.fptr;
01967 make_deferred(p);
01968 p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
01969 p->as.data.data = fptr;
01970 }
01971
01972 static int
01973 obj_free(rb_objspace_t *objspace, VALUE obj)
01974 {
01975 switch (BUILTIN_TYPE(obj)) {
01976 case T_NIL:
01977 case T_FIXNUM:
01978 case T_TRUE:
01979 case T_FALSE:
01980 rb_bug("obj_free() called for broken object");
01981 break;
01982 }
01983
01984 if (FL_TEST(obj, FL_EXIVAR)) {
01985 rb_free_generic_ivar((VALUE)obj);
01986 FL_UNSET(obj, FL_EXIVAR);
01987 }
01988
01989 switch (BUILTIN_TYPE(obj)) {
01990 case T_OBJECT:
01991 if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
01992 RANY(obj)->as.object.as.heap.ivptr) {
01993 xfree(RANY(obj)->as.object.as.heap.ivptr);
01994 }
01995 break;
01996 case T_MODULE:
01997 case T_CLASS:
01998 rb_clear_cache_by_class((VALUE)obj);
01999 rb_free_m_table(RCLASS_M_TBL(obj));
02000 if (RCLASS_IV_TBL(obj)) {
02001 st_free_table(RCLASS_IV_TBL(obj));
02002 }
02003 if (RCLASS_IV_INDEX_TBL(obj)) {
02004 st_free_table(RCLASS_IV_INDEX_TBL(obj));
02005 }
02006 xfree(RANY(obj)->as.klass.ptr);
02007 break;
02008 case T_STRING:
02009 rb_str_free(obj);
02010 break;
02011 case T_ARRAY:
02012 rb_ary_free(obj);
02013 break;
02014 case T_HASH:
02015 if (RANY(obj)->as.hash.ntbl) {
02016 st_free_table(RANY(obj)->as.hash.ntbl);
02017 }
02018 break;
02019 case T_REGEXP:
02020 if (RANY(obj)->as.regexp.ptr) {
02021 onig_free(RANY(obj)->as.regexp.ptr);
02022 }
02023 break;
02024 case T_DATA:
02025 if (DATA_PTR(obj)) {
02026 if (RTYPEDDATA_P(obj)) {
02027 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->dfree;
02028 }
02029 if ((long)RANY(obj)->as.data.dfree == -1) {
02030 xfree(DATA_PTR(obj));
02031 }
02032 else if (RANY(obj)->as.data.dfree) {
02033 make_deferred(RANY(obj));
02034 return 1;
02035 }
02036 }
02037 break;
02038 case T_MATCH:
02039 if (RANY(obj)->as.match.rmatch) {
02040 struct rmatch *rm = RANY(obj)->as.match.rmatch;
02041 onig_region_free(&rm->regs, 0);
02042 if (rm->char_offset)
02043 xfree(rm->char_offset);
02044 xfree(rm);
02045 }
02046 break;
02047 case T_FILE:
02048 if (RANY(obj)->as.file.fptr) {
02049 make_io_deferred(RANY(obj));
02050 return 1;
02051 }
02052 break;
02053 case T_RATIONAL:
02054 case T_COMPLEX:
02055 break;
02056 case T_ICLASS:
02057
02058 xfree(RANY(obj)->as.klass.ptr);
02059 break;
02060
02061 case T_FLOAT:
02062 break;
02063
02064 case T_BIGNUM:
02065 if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
02066 xfree(RBIGNUM_DIGITS(obj));
02067 }
02068 break;
02069 case T_NODE:
02070 switch (nd_type(obj)) {
02071 case NODE_SCOPE:
02072 if (RANY(obj)->as.node.u1.tbl) {
02073 xfree(RANY(obj)->as.node.u1.tbl);
02074 }
02075 break;
02076 case NODE_ALLOCA:
02077 xfree(RANY(obj)->as.node.u1.node);
02078 break;
02079 }
02080 break;
02081
02082 case T_STRUCT:
02083 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
02084 RANY(obj)->as.rstruct.as.heap.ptr) {
02085 xfree(RANY(obj)->as.rstruct.as.heap.ptr);
02086 }
02087 break;
02088
02089 default:
02090 rb_bug("gc_sweep(): unknown data type 0x%x(%p)",
02091 BUILTIN_TYPE(obj), (void*)obj);
02092 }
02093
02094 return 0;
02095 }
02096
02097 #define GC_NOTIFY 0
02098
02099 void rb_vm_mark(void *ptr);
02100
02101 #if STACK_GROW_DIRECTION < 0
02102 #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_END, end = STACK_START)
02103 #elif STACK_GROW_DIRECTION > 0
02104 #define GET_STACK_BOUNDS(start, end, appendix) (start = STACK_START, end = STACK_END+appendix)
02105 #else
02106 #define GET_STACK_BOUNDS(start, end, appendix) \
02107 ((STACK_END < STACK_START) ? \
02108 (start = STACK_END, end = STACK_START) : (start = STACK_START, end = STACK_END+appendix))
02109 #endif
02110
02111 static void
02112 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
02113 {
02114 rb_jmp_buf save_regs_gc_mark;
02115 VALUE *stack_start, *stack_end;
02116
02117 FLUSH_REGISTER_WINDOWS;
02118
02119 rb_setjmp(save_regs_gc_mark);
02120
02121 SET_STACK_END;
02122 GET_STACK_BOUNDS(stack_start, stack_end, 1);
02123
02124 mark_locations_array(objspace,
02125 (VALUE*)save_regs_gc_mark,
02126 sizeof(save_regs_gc_mark) / sizeof(VALUE));
02127
02128 rb_gc_mark_locations(stack_start, stack_end);
02129 #ifdef __ia64
02130 rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02131 #endif
02132 #if defined(__mc68000__)
02133 mark_locations_array((VALUE*)((char*)STACK_END + 2),
02134 (STACK_START - STACK_END));
02135 #endif
02136 }
02137
02138 void rb_gc_mark_encodings(void);
02139
02140 static int
02141 garbage_collect(rb_objspace_t *objspace)
02142 {
02143 struct gc_list *list;
02144 rb_thread_t *th = GET_THREAD();
02145 INIT_GC_PROF_PARAMS;
02146
02147 if (GC_NOTIFY) printf("start garbage_collect()\n");
02148
02149 if (!heaps) {
02150 return FALSE;
02151 }
02152
02153 if (dont_gc || during_gc) {
02154 if (!freelist) {
02155 if (!heaps_increment(objspace)) {
02156 set_heaps_increment(objspace);
02157 heaps_increment(objspace);
02158 }
02159 }
02160 return TRUE;
02161 }
02162 during_gc++;
02163 objspace->count++;
02164
02165 GC_PROF_TIMER_START;
02166 GC_PROF_MARK_TIMER_START;
02167 SET_STACK_END;
02168
02169 init_mark_stack(objspace);
02170
02171 th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
02172
02173 if (finalizer_table) {
02174 mark_tbl(objspace, finalizer_table, 0);
02175 }
02176
02177 mark_current_machine_context(objspace, th);
02178
02179 rb_gc_mark_threads();
02180 rb_gc_mark_symbols();
02181 rb_gc_mark_encodings();
02182
02183
02184 for (list = global_List; list; list = list->next) {
02185 rb_gc_mark_maybe(*list->varptr);
02186 }
02187 rb_mark_end_proc();
02188 rb_gc_mark_global_tbl();
02189
02190 mark_tbl(objspace, rb_class_tbl, 0);
02191
02192
02193 rb_mark_generic_ivar_tbl();
02194
02195 rb_gc_mark_parser();
02196
02197
02198 while (!MARK_STACK_EMPTY) {
02199 if (mark_stack_overflow) {
02200 gc_mark_all(objspace);
02201 }
02202 else {
02203 gc_mark_rest(objspace);
02204 }
02205 }
02206 GC_PROF_MARK_TIMER_STOP;
02207
02208 GC_PROF_SWEEP_TIMER_START;
02209 gc_sweep(objspace);
02210 GC_PROF_SWEEP_TIMER_STOP;
02211
02212
02213 if (th->vm->unlinked_method_entry_list) {
02214 rb_sweep_method_entry(th->vm);
02215 }
02216
02217 GC_PROF_TIMER_STOP;
02218 if (GC_NOTIFY) printf("end garbage_collect()\n");
02219 return TRUE;
02220 }
02221
02222 int
02223 rb_garbage_collect(void)
02224 {
02225 return garbage_collect(&rb_objspace);
02226 }
02227
02228 void
02229 rb_gc_mark_machine_stack(rb_thread_t *th)
02230 {
02231 rb_objspace_t *objspace = &rb_objspace;
02232 VALUE *stack_start, *stack_end;
02233
02234 GET_STACK_BOUNDS(stack_start, stack_end, 0);
02235 rb_gc_mark_locations(stack_start, stack_end);
02236 #ifdef __ia64
02237 rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
02238 #endif
02239 }
02240
02241
02242
02243
02244
02245
02246
02247
02248
02249
02250
02251
02252 VALUE
02253 rb_gc_start(void)
02254 {
02255 rb_gc();
02256 return Qnil;
02257 }
02258
02259 #undef Init_stack
02260
02261 void
02262 Init_stack(volatile VALUE *addr)
02263 {
02264 ruby_init_stack(addr);
02265 }
02266
02267
02268
02269
02270
02271
02272
02273
02274
02275
02276
02277
02278
02279
02280
02281
02282
02283
02284
02285
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298 void
02299 Init_heap(void)
02300 {
02301 init_heap(&rb_objspace);
02302 }
02303
02304
02305
02306
02307
02308
02309
02310
02311
02312
02313
02314
02315
02316
02317
02318
02319
02320
02321
02322
02323
02324
02325
02326
02327
02328
02329
02330
02331
02332
02333
02334
02335
02336
02337
02338
02339
02340 void
02341 rb_objspace_each_objects(int (*callback)(void *vstart, void *vend,
02342 size_t stride, void *d),
02343 void *data)
02344 {
02345 size_t i;
02346 RVALUE *membase = 0;
02347 RVALUE *pstart, *pend;
02348 rb_objspace_t *objspace = &rb_objspace;
02349 volatile VALUE v;
02350
02351 i = 0;
02352 while (i < heaps_used) {
02353 while (0 < i && (uintptr_t)membase < (uintptr_t)heaps[i-1].membase)
02354 i--;
02355 while (i < heaps_used && (uintptr_t)heaps[i].membase <= (uintptr_t)membase )
02356 i++;
02357 if (heaps_used <= i)
02358 break;
02359 membase = heaps[i].membase;
02360
02361 pstart = heaps[i].slot;
02362 pend = pstart + heaps[i].limit;
02363
02364 for (; pstart != pend; pstart++) {
02365 if (pstart->as.basic.flags) {
02366 v = (VALUE)pstart;
02367 break;
02368 }
02369 }
02370 if (pstart != pend) {
02371 if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
02372 return;
02373 }
02374 }
02375 }
02376
02377 return;
02378 }
02379
02380 struct os_each_struct {
02381 size_t num;
02382 VALUE of;
02383 };
02384
02385 static int
02386 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
02387 {
02388 struct os_each_struct *oes = (struct os_each_struct *)data;
02389 RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
02390 volatile VALUE v;
02391
02392 for (; p != pend; p++) {
02393 if (p->as.basic.flags) {
02394 switch (BUILTIN_TYPE(p)) {
02395 case T_NONE:
02396 case T_ICLASS:
02397 case T_NODE:
02398 case T_ZOMBIE:
02399 continue;
02400 case T_CLASS:
02401 if (FL_TEST(p, FL_SINGLETON))
02402 continue;
02403 default:
02404 if (!p->as.basic.klass) continue;
02405 v = (VALUE)p;
02406 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
02407 rb_yield(v);
02408 oes->num++;
02409 }
02410 }
02411 }
02412 }
02413
02414 return 0;
02415 }
02416
02417 static VALUE
02418 os_obj_of(VALUE of)
02419 {
02420 struct os_each_struct oes;
02421
02422 oes.num = 0;
02423 oes.of = of;
02424 rb_objspace_each_objects(os_obj_of_i, &oes);
02425 return SIZET2NUM(oes.num);
02426 }
02427
02428
02429
02430
02431
02432
02433
02434
02435
02436
02437
02438
02439
02440
02441
02442
02443
02444
02445
02446
02447
02448
02449
02450
02451
02452
02453
02454
02455
02456
02457
02458
02459
02460
02461
02462
02463
02464 static VALUE
02465 os_each_obj(int argc, VALUE *argv, VALUE os)
02466 {
02467 VALUE of;
02468
02469 rb_secure(4);
02470 if (argc == 0) {
02471 of = 0;
02472 }
02473 else {
02474 rb_scan_args(argc, argv, "01", &of);
02475 }
02476 RETURN_ENUMERATOR(os, 1, &of);
02477 return os_obj_of(of);
02478 }
02479
02480
02481
02482
02483
02484
02485
02486
02487
02488 static VALUE
02489 undefine_final(VALUE os, VALUE obj)
02490 {
02491 rb_objspace_t *objspace = &rb_objspace;
02492 if (OBJ_FROZEN(obj)) rb_error_frozen("object");
02493 if (finalizer_table) {
02494 st_delete(finalizer_table, (st_data_t*)&obj, 0);
02495 }
02496 FL_UNSET(obj, FL_FINALIZE);
02497 return obj;
02498 }
02499
02500
02501
02502
02503
02504
02505
02506
02507
02508
02509 static VALUE
02510 define_final(int argc, VALUE *argv, VALUE os)
02511 {
02512 rb_objspace_t *objspace = &rb_objspace;
02513 VALUE obj, block, table;
02514
02515 rb_scan_args(argc, argv, "11", &obj, &block);
02516 if (OBJ_FROZEN(obj)) rb_error_frozen("object");
02517 if (argc == 1) {
02518 block = rb_block_proc();
02519 }
02520 else if (!rb_respond_to(block, rb_intern("call"))) {
02521 rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
02522 rb_obj_classname(block));
02523 }
02524 if (!FL_ABLE(obj)) {
02525 rb_raise(rb_eArgError, "cannot define finalizer for %s",
02526 rb_obj_classname(obj));
02527 }
02528 RBASIC(obj)->flags |= FL_FINALIZE;
02529
02530 block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
02531 OBJ_FREEZE(block);
02532
02533 if (!finalizer_table) {
02534 finalizer_table = st_init_numtable();
02535 }
02536 if (st_lookup(finalizer_table, obj, &table)) {
02537 rb_ary_push(table, block);
02538 }
02539 else {
02540 table = rb_ary_new3(1, block);
02541 RBASIC(table)->klass = 0;
02542 st_add_direct(finalizer_table, obj, table);
02543 }
02544 return block;
02545 }
02546
02547 void
02548 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
02549 {
02550 rb_objspace_t *objspace = &rb_objspace;
02551 VALUE table;
02552
02553 if (!finalizer_table) return;
02554 if (!FL_TEST(obj, FL_FINALIZE)) return;
02555 if (st_lookup(finalizer_table, obj, &table)) {
02556 st_insert(finalizer_table, dest, table);
02557 }
02558 FL_SET(dest, FL_FINALIZE);
02559 }
02560
02561 static VALUE
02562 run_single_final(VALUE arg)
02563 {
02564 VALUE *args = (VALUE *)arg;
02565 rb_eval_cmd(args[0], args[1], (int)args[2]);
02566 return Qnil;
02567 }
02568
02569 static void
02570 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE objid, VALUE table)
02571 {
02572 long i;
02573 int status;
02574 VALUE args[3];
02575
02576 args[1] = 0;
02577 args[2] = (VALUE)rb_safe_level();
02578 if (!args[1] && RARRAY_LEN(table) > 0) {
02579 args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
02580 }
02581 for (i=0; i<RARRAY_LEN(table); i++) {
02582 VALUE final = RARRAY_PTR(table)[i];
02583 args[0] = RARRAY_PTR(final)[1];
02584 args[2] = FIX2INT(RARRAY_PTR(final)[0]);
02585 rb_protect(run_single_final, (VALUE)args, &status);
02586 }
02587 }
02588
02589 static void
02590 run_final(rb_objspace_t *objspace, VALUE obj)
02591 {
02592 VALUE table, objid;
02593 RUBY_DATA_FUNC free_func = 0;
02594
02595 objid = rb_obj_id(obj);
02596 RBASIC(obj)->klass = 0;
02597
02598 if (RTYPEDDATA_P(obj)) {
02599 free_func = RTYPEDDATA_TYPE(obj)->dfree;
02600 }
02601 else {
02602 free_func = RDATA(obj)->dfree;
02603 }
02604 if (free_func) {
02605 (*free_func)(DATA_PTR(obj));
02606 }
02607
02608 if (finalizer_table &&
02609 st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
02610 run_finalizer(objspace, obj, objid, table);
02611 }
02612 }
02613
02614 static void
02615 finalize_deferred(rb_objspace_t *objspace)
02616 {
02617 RVALUE *p = deferred_final_list;
02618 deferred_final_list = 0;
02619
02620 if (p) {
02621 finalize_list(objspace, p);
02622 }
02623 }
02624
02625 static void
02626 gc_finalize_deferred(rb_objspace_t *objspace)
02627 {
02628 finalize_deferred(objspace);
02629 free_unused_heaps(objspace);
02630 }
02631
02632 void
02633 rb_gc_finalize_deferred(void)
02634 {
02635 gc_finalize_deferred(&rb_objspace);
02636 }
02637
02638 static int
02639 chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
02640 {
02641 RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
02642 if ((p->as.basic.flags & (FL_FINALIZE|FL_MARK)) == FL_FINALIZE) {
02643 if (BUILTIN_TYPE(p) != T_ZOMBIE) {
02644 p->as.free.flags = FL_MARK | T_ZOMBIE;
02645 RDATA(p)->dfree = 0;
02646 }
02647 p->as.free.next = *final_list;
02648 *final_list = p;
02649 }
02650 return ST_CONTINUE;
02651 }
02652
02653 struct force_finalize_list {
02654 VALUE obj;
02655 VALUE table;
02656 struct force_finalize_list *next;
02657 };
02658
02659 static int
02660 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
02661 {
02662 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
02663 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
02664 curr->obj = key;
02665 curr->table = val;
02666 curr->next = *prev;
02667 *prev = curr;
02668 return ST_CONTINUE;
02669 }
02670
02671 void
02672 rb_gc_call_finalizer_at_exit(void)
02673 {
02674 rb_objspace_call_finalizer(&rb_objspace);
02675 }
02676
02677 void
02678 rb_objspace_call_finalizer(rb_objspace_t *objspace)
02679 {
02680 RVALUE *p, *pend;
02681 RVALUE *final_list = 0;
02682 size_t i;
02683
02684
02685 if (finalizer_table) {
02686 do {
02687
02688
02689 finalize_deferred(objspace);
02690 mark_tbl(objspace, finalizer_table, 0);
02691 st_foreach(finalizer_table, chain_finalized_object,
02692 (st_data_t)&deferred_final_list);
02693 } while (deferred_final_list);
02694
02695 while (finalizer_table->num_entries) {
02696 struct force_finalize_list *list = 0;
02697 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
02698 while (list) {
02699 struct force_finalize_list *curr = list;
02700 run_finalizer(objspace, curr->obj, rb_obj_id(curr->obj), curr->table);
02701 st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
02702 list = curr->next;
02703 xfree(curr);
02704 }
02705 }
02706 st_free_table(finalizer_table);
02707 finalizer_table = 0;
02708 }
02709
02710 during_gc++;
02711
02712 for (i = 0; i < heaps_used; i++) {
02713 p = heaps[i].slot; pend = p + heaps[i].limit;
02714 while (p < pend) {
02715 if (BUILTIN_TYPE(p) == T_DATA &&
02716 DATA_PTR(p) && RANY(p)->as.data.dfree &&
02717 RANY(p)->as.basic.klass != rb_cThread && RANY(p)->as.basic.klass != rb_cMutex) {
02718 p->as.free.flags = 0;
02719 if (RTYPEDDATA_P(p)) {
02720 RDATA(p)->dfree = RANY(p)->as.typeddata.type->dfree;
02721 }
02722 if ((long)RANY(p)->as.data.dfree == -1) {
02723 xfree(DATA_PTR(p));
02724 }
02725 else if (RANY(p)->as.data.dfree) {
02726 make_deferred(RANY(p));
02727 RANY(p)->as.free.next = final_list;
02728 final_list = p;
02729 }
02730 }
02731 else if (BUILTIN_TYPE(p) == T_FILE) {
02732 if (RANY(p)->as.file.fptr) {
02733 make_io_deferred(RANY(p));
02734 RANY(p)->as.free.next = final_list;
02735 final_list = p;
02736 }
02737 }
02738 p++;
02739 }
02740 }
02741 during_gc = 0;
02742 if (final_list) {
02743 finalize_list(objspace, final_list);
02744 }
02745 }
02746
02747 void
02748 rb_gc(void)
02749 {
02750 rb_objspace_t *objspace = &rb_objspace;
02751 garbage_collect(objspace);
02752 gc_finalize_deferred(objspace);
02753 }
02754
02755
02756
02757
02758
02759
02760
02761
02762
02763
02764
02765
02766
02767
02768 static VALUE
02769 id2ref(VALUE obj, VALUE objid)
02770 {
02771 #if SIZEOF_LONG == SIZEOF_VOIDP
02772 #define NUM2PTR(x) NUM2ULONG(x)
02773 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
02774 #define NUM2PTR(x) NUM2ULL(x)
02775 #endif
02776 rb_objspace_t *objspace = &rb_objspace;
02777 VALUE ptr;
02778 void *p0;
02779
02780 rb_secure(4);
02781 ptr = NUM2PTR(objid);
02782 p0 = (void *)ptr;
02783
02784 if (ptr == Qtrue) return Qtrue;
02785 if (ptr == Qfalse) return Qfalse;
02786 if (ptr == Qnil) return Qnil;
02787 if (FIXNUM_P(ptr)) return (VALUE)ptr;
02788 ptr = objid ^ FIXNUM_FLAG;
02789
02790 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
02791 ID symid = ptr / sizeof(RVALUE);
02792 if (rb_id2name(symid) == 0)
02793 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
02794 return ID2SYM(symid);
02795 }
02796
02797 if (!is_pointer_to_heap(objspace, (void *)ptr) ||
02798 BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
02799 rb_raise(rb_eRangeError, "%p is not id value", p0);
02800 }
02801 if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
02802 rb_raise(rb_eRangeError, "%p is recycled object", p0);
02803 }
02804 return (VALUE)ptr;
02805 }
02806
02807
02808
02809
02810
02811
02812
02813
02814
02815
02816
02817
02818
02819
02820
02821
02822
02823
02824
02825
02826
02827
02828
02829
02830
02831
02832
02833
02834 VALUE
02835 rb_obj_id(VALUE obj)
02836 {
02837
02838
02839
02840
02841
02842
02843
02844
02845
02846
02847
02848
02849
02850
02851
02852
02853
02854
02855
02856
02857
02858
02859
02860
02861
02862
02863
02864
02865 if (TYPE(obj) == T_SYMBOL) {
02866 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
02867 }
02868 if (SPECIAL_CONST_P(obj)) {
02869 return LONG2NUM((SIGNED_VALUE)obj);
02870 }
02871 return (VALUE)((SIGNED_VALUE)obj|FIXNUM_FLAG);
02872 }
02873
02874 static int
02875 set_zero(st_data_t key, st_data_t val, st_data_t arg)
02876 {
02877 VALUE k = (VALUE)key;
02878 VALUE hash = (VALUE)arg;
02879 rb_hash_aset(hash, k, INT2FIX(0));
02880 return ST_CONTINUE;
02881 }
02882
02883
02884
02885
02886
02887
02888
02889
02890
02891
02892
02893
02894
02895
02896
02897
02898
02899
02900
02901
02902
02903 static VALUE
02904 count_objects(int argc, VALUE *argv, VALUE os)
02905 {
02906 rb_objspace_t *objspace = &rb_objspace;
02907 size_t counts[T_MASK+1];
02908 size_t freed = 0;
02909 size_t total = 0;
02910 size_t i;
02911 VALUE hash;
02912
02913 if (rb_scan_args(argc, argv, "01", &hash) == 1) {
02914 if (TYPE(hash) != T_HASH)
02915 rb_raise(rb_eTypeError, "non-hash given");
02916 }
02917
02918 for (i = 0; i <= T_MASK; i++) {
02919 counts[i] = 0;
02920 }
02921
02922 for (i = 0; i < heaps_used; i++) {
02923 RVALUE *p, *pend;
02924
02925 p = heaps[i].slot; pend = p + heaps[i].limit;
02926 for (;p < pend; p++) {
02927 if (p->as.basic.flags) {
02928 counts[BUILTIN_TYPE(p)]++;
02929 }
02930 else {
02931 freed++;
02932 }
02933 }
02934 total += heaps[i].limit;
02935 }
02936
02937 if (hash == Qnil) {
02938 hash = rb_hash_new();
02939 }
02940 else if (!RHASH_EMPTY_P(hash)) {
02941 st_foreach(RHASH_TBL(hash), set_zero, hash);
02942 }
02943 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
02944 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
02945
02946 for (i = 0; i <= T_MASK; i++) {
02947 VALUE type;
02948 switch (i) {
02949 #define COUNT_TYPE(t) case t: type = ID2SYM(rb_intern(#t)); break;
02950 COUNT_TYPE(T_NONE);
02951 COUNT_TYPE(T_OBJECT);
02952 COUNT_TYPE(T_CLASS);
02953 COUNT_TYPE(T_MODULE);
02954 COUNT_TYPE(T_FLOAT);
02955 COUNT_TYPE(T_STRING);
02956 COUNT_TYPE(T_REGEXP);
02957 COUNT_TYPE(T_ARRAY);
02958 COUNT_TYPE(T_HASH);
02959 COUNT_TYPE(T_STRUCT);
02960 COUNT_TYPE(T_BIGNUM);
02961 COUNT_TYPE(T_FILE);
02962 COUNT_TYPE(T_DATA);
02963 COUNT_TYPE(T_MATCH);
02964 COUNT_TYPE(T_COMPLEX);
02965 COUNT_TYPE(T_RATIONAL);
02966 COUNT_TYPE(T_NIL);
02967 COUNT_TYPE(T_TRUE);
02968 COUNT_TYPE(T_FALSE);
02969 COUNT_TYPE(T_SYMBOL);
02970 COUNT_TYPE(T_FIXNUM);
02971 COUNT_TYPE(T_UNDEF);
02972 COUNT_TYPE(T_NODE);
02973 COUNT_TYPE(T_ICLASS);
02974 COUNT_TYPE(T_ZOMBIE);
02975 #undef COUNT_TYPE
02976 default: type = INT2NUM(i); break;
02977 }
02978 if (counts[i])
02979 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
02980 }
02981
02982 return hash;
02983 }
02984
02985
02986
02987
02988
02989
02990
02991
02992
02993
02994
02995 static VALUE
02996 gc_count(VALUE self)
02997 {
02998 return UINT2NUM((&rb_objspace)->count);
02999 }
03000
03001 #if CALC_EXACT_MALLOC_SIZE
03002
03003
03004
03005
03006
03007
03008
03009
03010
03011 static VALUE
03012 gc_malloc_allocated_size(VALUE self)
03013 {
03014 return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
03015 }
03016
03017
03018
03019
03020
03021
03022
03023
03024
03025
03026 static VALUE
03027 gc_malloc_allocations(VALUE self)
03028 {
03029 return UINT2NUM((&rb_objspace)->malloc_params.allocations);
03030 }
03031 #endif
03032
03033 static VALUE
03034 gc_profile_record_get(void)
03035 {
03036 VALUE prof;
03037 VALUE gc_profile = rb_ary_new();
03038 size_t i;
03039 rb_objspace_t *objspace = (&rb_objspace);
03040
03041 if (!objspace->profile.run) {
03042 return Qnil;
03043 }
03044
03045 for (i =0; i < objspace->profile.count; i++) {
03046 prof = rb_hash_new();
03047 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
03048 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
03049 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), rb_uint2inum(objspace->profile.record[i].heap_use_size));
03050 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), rb_uint2inum(objspace->profile.record[i].heap_total_size));
03051 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_total_objects));
03052 #if GC_PROFILE_MORE_DETAIL
03053 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
03054 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
03055 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), rb_uint2inum(objspace->profile.record[i].allocate_increase));
03056 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), rb_uint2inum(objspace->profile.record[i].allocate_limit));
03057 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), rb_uint2inum(objspace->profile.record[i].heap_use_slots));
03058 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_live_objects));
03059 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), rb_uint2inum(objspace->profile.record[i].heap_free_objects));
03060 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
03061 #endif
03062 rb_ary_push(gc_profile, prof);
03063 }
03064
03065 return gc_profile;
03066 }
03067
03068
03069
03070
03071
03072
03073
03074
03075
03076
03077
03078
03079
03080 static VALUE
03081 gc_profile_result(void)
03082 {
03083 rb_objspace_t *objspace = &rb_objspace;
03084 VALUE record;
03085 VALUE result;
03086 int i;
03087
03088 record = gc_profile_record_get();
03089 if (objspace->profile.run && objspace->profile.count) {
03090 result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
03091 rb_str_cat2(result, "Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n");
03092 for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03093 VALUE r = RARRAY_PTR(record)[i];
03094 rb_str_catf(result, "%5d %19.3f %20d %20d %20d %30.20f\n",
03095 i+1, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
03096 NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
03097 NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
03098 NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
03099 NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
03100 }
03101 #if GC_PROFILE_MORE_DETAIL
03102 rb_str_cat2(result, "\n\n");
03103 rb_str_cat2(result, "More detail.\n");
03104 rb_str_cat2(result, "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n");
03105 for (i = 0; i < (int)RARRAY_LEN(record); i++) {
03106 VALUE r = RARRAY_PTR(record)[i];
03107 rb_str_catf(result, "%5d %17d %17d %9d %14s %25.20f %25.20f\n",
03108 i+1, NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
03109 NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
03110 NUM2INT(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
03111 rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
03112 NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
03113 NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
03114 }
03115 #endif
03116 }
03117 else {
03118 result = rb_str_new2("");
03119 }
03120 return result;
03121 }
03122
03123
03124
03125
03126
03127
03128
03129
03130
03131
03132 static VALUE
03133 gc_profile_report(int argc, VALUE *argv, VALUE self)
03134 {
03135 VALUE out;
03136
03137 if (argc == 0) {
03138 out = rb_stdout;
03139 }
03140 else {
03141 rb_scan_args(argc, argv, "01", &out);
03142 }
03143 rb_io_write(out, gc_profile_result());
03144
03145 return Qnil;
03146 }
03147
03148
03149
03150
03151
03152
03153
03154
03155 static VALUE
03156 gc_profile_total_time(VALUE self)
03157 {
03158 double time = 0;
03159 rb_objspace_t *objspace = &rb_objspace;
03160 size_t i;
03161
03162 if (objspace->profile.run && objspace->profile.count) {
03163 for (i = 0; i < objspace->profile.count; i++) {
03164 time += objspace->profile.record[i].gc_time;
03165 }
03166 }
03167 return DBL2NUM(time);
03168 }
03169
03170
03171
03172
03173
03174
03175
03176 void
03177 Init_GC(void)
03178 {
03179 VALUE rb_mObSpace;
03180 VALUE rb_mProfiler;
03181
03182 rb_mGC = rb_define_module("GC");
03183 rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
03184 rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
03185 rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
03186 rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
03187 rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
03188 rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
03189 rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
03190
03191 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
03192 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
03193 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
03194 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
03195 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
03196 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
03197 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
03198 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
03199
03200 rb_mObSpace = rb_define_module("ObjectSpace");
03201 rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
03202 rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
03203
03204 rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
03205 rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
03206
03207 rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
03208
03209 nomem_error = rb_exc_new3(rb_eNoMemError,
03210 rb_obj_freeze(rb_str_new2("failed to allocate memory")));
03211 OBJ_TAINT(nomem_error);
03212 OBJ_FREEZE(nomem_error);
03213
03214 rb_define_method(rb_mKernel, "__id__", rb_obj_id, 0);
03215 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
03216
03217 rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
03218
03219 #if CALC_EXACT_MALLOC_SIZE
03220 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
03221 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
03222 #endif
03223 }
03224