Ruby 3.4.4p34 (2025-05-14 revision a38531fd3f617bf734ef7d6c595325f69985ea1d)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/cont.h"
95#include "internal/error.h"
96#include "internal/eval.h"
97#include "internal/gc.h"
98#include "internal/hash.h"
99#include "internal/imemo.h"
100#include "internal/io.h"
101#include "internal/numeric.h"
102#include "internal/object.h"
103#include "internal/proc.h"
104#include "internal/rational.h"
105#include "internal/sanitizers.h"
106#include "internal/struct.h"
107#include "internal/symbol.h"
108#include "internal/thread.h"
109#include "internal/variable.h"
110#include "internal/warnings.h"
111#include "rjit.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "vm_core.h"
125#include "vm_sync.h"
126#include "vm_callinfo.h"
127#include "ractor_core.h"
128#include "yjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133unsigned int
134rb_gc_vm_lock(void)
135{
136 unsigned int lev;
137 RB_VM_LOCK_ENTER_LEV(&lev);
138 return lev;
139}
140
141void
142rb_gc_vm_unlock(unsigned int lev)
143{
144 RB_VM_LOCK_LEAVE_LEV(&lev);
145}
146
147unsigned int
148rb_gc_cr_lock(void)
149{
150 unsigned int lev;
151 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152 return lev;
153}
154
155void
156rb_gc_cr_unlock(unsigned int lev)
157{
158 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159}
160
161unsigned int
162rb_gc_vm_lock_no_barrier(void)
163{
164 unsigned int lev = 0;
165 RB_VM_LOCK_ENTER_LEV_NB(&lev);
166 return lev;
167}
168
169void
170rb_gc_vm_unlock_no_barrier(unsigned int lev)
171{
172 RB_VM_LOCK_LEAVE_LEV(&lev);
173}
174
175void
176rb_gc_vm_barrier(void)
177{
178 rb_vm_barrier();
179}
180
181#if USE_MODULAR_GC
182void *
183rb_gc_get_ractor_newobj_cache(void)
184{
185 return GET_RACTOR()->newobj_cache;
186}
187
188void
189rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190{
191 rb_native_mutex_initialize(&context->lock);
192 context->ec = GET_EC();
193}
194
195void
196rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_lock(&context->lock);
199
200 GC_ASSERT(rb_current_execution_context(false) == NULL);
201
202#ifdef RB_THREAD_LOCAL_SPECIFIER
203 rb_current_ec_set(context->ec);
204#else
205 native_tls_set(ruby_current_ec_key, context->ec);
206#endif
207}
208
209void
210rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211{
212 rb_native_mutex_unlock(&context->lock);
213
214 GC_ASSERT(rb_current_execution_context(true) == context->ec);
215
216#ifdef RB_THREAD_LOCAL_SPECIFIER
217 rb_current_ec_set(NULL);
218#else
219 native_tls_set(ruby_current_ec_key, NULL);
220#endif
221}
222#endif
223
224bool
225rb_gc_event_hook_required_p(rb_event_flag_t event)
226{
227 return ruby_vm_event_flags & event;
228}
229
230void
231rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232{
233 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234
235 rb_execution_context_t *ec = GET_EC();
236 if (!ec->cfp) return;
237
238 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239}
240
241void *
242rb_gc_get_objspace(void)
243{
244 return GET_VM()->gc.objspace;
245}
246
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
253 func(r->newobj_cache, data);
254 }
255}
256
257void
258rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
259{
260 volatile struct {
261 VALUE errinfo;
262 VALUE final;
263 rb_control_frame_t *cfp;
264 VALUE *sp;
265 long finished;
266 } saved;
267
268 rb_execution_context_t * volatile ec = GET_EC();
269#define RESTORE_FINALIZER() (\
270 ec->cfp = saved.cfp, \
271 ec->cfp->sp = saved.sp, \
272 ec->errinfo = saved.errinfo)
273
274 saved.errinfo = ec->errinfo;
275 saved.cfp = ec->cfp;
276 saved.sp = ec->cfp->sp;
277 saved.finished = 0;
278 saved.final = Qundef;
279
280 EC_PUSH_TAG(ec);
281 enum ruby_tag_type state = EC_EXEC_TAG();
282 if (state != TAG_NONE) {
283 ++saved.finished; /* skip failed finalizer */
284
285 VALUE failed_final = saved.final;
286 saved.final = Qundef;
287 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
288 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
289 rb_ec_error_print(ec, ec->errinfo);
290 }
291 }
292
293 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
294 saved.final = callback(i, data);
295 rb_check_funcall(saved.final, idCall, 1, &objid);
296 }
297 EC_POP_TAG();
298#undef RESTORE_FINALIZER
299}
300
301void
302rb_gc_set_pending_interrupt(void)
303{
304 rb_execution_context_t *ec = GET_EC();
305 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
306}
307
308void
309rb_gc_unset_pending_interrupt(void)
310{
311 rb_execution_context_t *ec = GET_EC();
312 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
313}
314
315bool
316rb_gc_multi_ractor_p(void)
317{
318 return rb_multi_ractor_p();
319}
320
321bool rb_obj_is_main_ractor(VALUE gv);
322
323bool
324rb_gc_shutdown_call_finalizer_p(VALUE obj)
325{
326 switch (BUILTIN_TYPE(obj)) {
327 case T_DATA:
328 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
329 if (rb_obj_is_thread(obj)) return false;
330 if (rb_obj_is_mutex(obj)) return false;
331 if (rb_obj_is_fiber(obj)) return false;
332 if (rb_obj_is_main_ractor(obj)) return false;
333
334 return true;
335
336 case T_FILE:
337 return true;
338
339 case T_SYMBOL:
340 if (RSYMBOL(obj)->fstr &&
341 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
342 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
343 RSYMBOL(obj)->fstr = 0;
344 }
345 return true;
346
347 case T_NONE:
348 return false;
349
350 default:
351 return ruby_free_at_exit_p();
352 }
353}
354
355uint32_t
356rb_gc_get_shape(VALUE obj)
357{
358 return (uint32_t)rb_shape_get_shape_id(obj);
359}
360
361void
362rb_gc_set_shape(VALUE obj, uint32_t shape_id)
363{
364 rb_shape_set_shape_id(obj, (uint32_t)shape_id);
365}
366
367uint32_t
368rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
369{
370 rb_shape_t *orig_shape = rb_shape_get_shape(obj);
371
372 if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID;
373
374 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID));
375 rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
376
377 if (!new_shape) return 0;
378
379 return (uint32_t)rb_shape_id(new_shape);
380}
381
382void rb_vm_update_references(void *ptr);
383
384#define rb_setjmp(env) RUBY_SETJMP(env)
385#define rb_jmp_buf rb_jmpbuf_t
386#undef rb_data_object_wrap
387
388#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
389#define MAP_ANONYMOUS MAP_ANON
390#endif
391
392#define unless_objspace(objspace) \
393 void *objspace; \
394 rb_vm_t *unless_objspace_vm = GET_VM(); \
395 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
396 else /* return; or objspace will be warned uninitialized */
397
398#define RMOVED(obj) ((struct RMoved *)(obj))
399
400#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
401 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
402 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
403 } \
404} while (0)
405
406#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
407
408#if RUBY_MARK_FREE_DEBUG
409int ruby_gc_debug_indent = 0;
410#endif
411
412#ifndef RGENGC_OBJ_INFO
413# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
414#endif
415
416#ifndef CALC_EXACT_MALLOC_SIZE
417# define CALC_EXACT_MALLOC_SIZE 0
418#endif
419
421
422static size_t malloc_offset = 0;
423#if defined(HAVE_MALLOC_USABLE_SIZE)
424static size_t
425gc_compute_malloc_offset(void)
426{
427 // Different allocators use different metadata storage strategies which result in different
428 // ideal sizes.
429 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
430 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
431 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
432 // waste memory.
433 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
434 // no wasted memory.
435 size_t offset = 0;
436 for (offset = 0; offset <= 16; offset += 8) {
437 size_t allocated = (64 - offset);
438 void *test_ptr = malloc(allocated);
439 size_t wasted = malloc_usable_size(test_ptr) - allocated;
440 free(test_ptr);
441
442 if (wasted == 0) {
443 return offset;
444 }
445 }
446 return 0;
447}
448#else
449static size_t
450gc_compute_malloc_offset(void)
451{
452 // If we don't have malloc_usable_size, we use powers of 2.
453 return 0;
454}
455#endif
456
457size_t
458rb_malloc_grow_capa(size_t current, size_t type_size)
459{
460 size_t current_capacity = current;
461 if (current_capacity < 4) {
462 current_capacity = 4;
463 }
464 current_capacity *= type_size;
465
466 // We double the current capacity.
467 size_t new_capacity = (current_capacity * 2);
468
469 // And round up to the next power of 2 if it's not already one.
470 if (rb_popcount64(new_capacity) != 1) {
471 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
472 }
473
474 new_capacity -= malloc_offset;
475 new_capacity /= type_size;
476 if (current > new_capacity) {
477 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
478 }
479 RUBY_ASSERT(new_capacity > current);
480 return new_capacity;
481}
482
483static inline struct rbimpl_size_mul_overflow_tag
484size_add_overflow(size_t x, size_t y)
485{
486 size_t z;
487 bool p;
488#if 0
489
490#elif defined(ckd_add)
491 p = ckd_add(&z, x, y);
492
493#elif __has_builtin(__builtin_add_overflow)
494 p = __builtin_add_overflow(x, y, &z);
495
496#elif defined(DSIZE_T)
497 RB_GNUC_EXTENSION DSIZE_T dx = x;
498 RB_GNUC_EXTENSION DSIZE_T dy = y;
499 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
500 p = dz > SIZE_MAX;
501 z = (size_t)dz;
502
503#else
504 z = x + y;
505 p = z < y;
506
507#endif
508 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
509}
510
511static inline struct rbimpl_size_mul_overflow_tag
512size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
513{
514 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
515 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
516 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
517}
518
519static inline struct rbimpl_size_mul_overflow_tag
520size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
521{
522 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
523 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
524 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
525 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
526}
527
528PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
529
530static inline size_t
531size_mul_or_raise(size_t x, size_t y, VALUE exc)
532{
533 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
534 if (LIKELY(!t.left)) {
535 return t.right;
536 }
537 else if (rb_during_gc()) {
538 rb_memerror(); /* or...? */
539 }
540 else {
541 gc_raise(
542 exc,
543 "integer overflow: %"PRIuSIZE
544 " * %"PRIuSIZE
545 " > %"PRIuSIZE,
546 x, y, (size_t)SIZE_MAX);
547 }
548}
549
550size_t
551rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
552{
553 return size_mul_or_raise(x, y, exc);
554}
555
556static inline size_t
557size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
558{
559 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
560 if (LIKELY(!t.left)) {
561 return t.right;
562 }
563 else if (rb_during_gc()) {
564 rb_memerror(); /* or...? */
565 }
566 else {
567 gc_raise(
568 exc,
569 "integer overflow: %"PRIuSIZE
570 " * %"PRIuSIZE
571 " + %"PRIuSIZE
572 " > %"PRIuSIZE,
573 x, y, z, (size_t)SIZE_MAX);
574 }
575}
576
577size_t
578rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
579{
580 return size_mul_add_or_raise(x, y, z, exc);
581}
582
583static inline size_t
584size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
585{
586 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
587 if (LIKELY(!t.left)) {
588 return t.right;
589 }
590 else if (rb_during_gc()) {
591 rb_memerror(); /* or...? */
592 }
593 else {
594 gc_raise(
595 exc,
596 "integer overflow: %"PRIdSIZE
597 " * %"PRIdSIZE
598 " + %"PRIdSIZE
599 " * %"PRIdSIZE
600 " > %"PRIdSIZE,
601 x, y, z, w, (size_t)SIZE_MAX);
602 }
603}
604
605#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
606/* trick the compiler into thinking a external signal handler uses this */
607volatile VALUE rb_gc_guarded_val;
608volatile VALUE *
609rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
610{
611 rb_gc_guarded_val = val;
612
613 return ptr;
614}
615#endif
616
617static const char *obj_type_name(VALUE obj);
618#include "gc/default/default.c"
619
620#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
621# error "Modular GC requires dlopen"
622#elif USE_MODULAR_GC
623#include <dlfcn.h>
624
625typedef struct gc_function_map {
626 // Bootup
627 void *(*objspace_alloc)(void);
628 void (*objspace_init)(void *objspace_ptr);
629 void (*objspace_free)(void *objspace_ptr);
630 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
631 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
632 void (*set_params)(void *objspace_ptr);
633 void (*init)(void);
634 size_t *(*heap_sizes)(void *objspace_ptr);
635 // Shutdown
636 void (*shutdown_free_objects)(void *objspace_ptr);
637 // GC
638 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
639 bool (*during_gc_p)(void *objspace_ptr);
640 void (*prepare_heap)(void *objspace_ptr);
641 void (*gc_enable)(void *objspace_ptr);
642 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
643 bool (*gc_enabled_p)(void *objspace_ptr);
644 VALUE (*config_get)(void *objpace_ptr);
645 void (*config_set)(void *objspace_ptr, VALUE hash);
646 void (*stress_set)(void *objspace_ptr, VALUE flag);
647 VALUE (*stress_get)(void *objspace_ptr);
648 // Object allocation
649 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
650 size_t (*obj_slot_size)(VALUE obj);
651 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
652 bool (*size_allocatable_p)(size_t size);
653 // Malloc
654 void *(*malloc)(void *objspace_ptr, size_t size);
655 void *(*calloc)(void *objspace_ptr, size_t size);
656 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
657 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
658 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
659 // Marking
660 void (*mark)(void *objspace_ptr, VALUE obj);
661 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
662 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
663 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
664 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
665 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
666 // Compaction
667 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
668 VALUE (*location)(void *objspace_ptr, VALUE value);
669 // Write barriers
670 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
671 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
672 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
673 // Heap walking
674 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
675 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
676 // Finalizers
677 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
678 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
679 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
680 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
681 void (*shutdown_call_finalizer)(void *objspace_ptr);
682 // Object ID
683 VALUE (*object_id)(void *objspace_ptr, VALUE obj);
684 VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
685 // Forking
686 void (*before_fork)(void *objspace_ptr);
687 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
688 // Statistics
689 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
690 bool (*get_measure_total_time)(void *objspace_ptr);
691 unsigned long long (*get_total_time)(void *objspace_ptr);
692 size_t (*gc_count)(void *objspace_ptr);
693 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
694 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
695 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
696 const char *(*active_gc_name)(void);
697 // Miscellaneous
698 size_t (*obj_flags)(void *objspace_ptr, VALUE obj, ID* flags, size_t max);
699 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
700 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
701 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
702 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
703
704 bool modular_gc_loaded_p;
705} rb_gc_function_map_t;
706
707static rb_gc_function_map_t rb_gc_functions;
708
709# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
710# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
711
712static void
713ruby_modular_gc_init(void)
714{
715 // Assert that the directory path ends with a /
716 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
717
718 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
719
720 rb_gc_function_map_t gc_functions = { 0 };
721
722 char *gc_so_path = NULL;
723 void *handle = NULL;
724 if (gc_so_file) {
725 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
726 * not load a shared object outside of the directory. */
727 for (size_t i = 0; i < strlen(gc_so_file); i++) {
728 char c = gc_so_file[i];
729 if (isalnum(c)) continue;
730 switch (c) {
731 case '-':
732 case '_':
733 break;
734 default:
735 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
736 exit(1);
737 }
738 }
739
740 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
741#ifdef LOAD_RELATIVE
742 Dl_info dli;
743 size_t prefix_len = 0;
744 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
745 const char *base = strrchr(dli.dli_fname, '/');
746 if (base) {
747 size_t tail = 0;
748# define end_with_p(lit) \
749 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
750 memcmp(base - tail, lit, tail) == 0)
751
752 prefix_len = base - dli.dli_fname;
753 if (end_with_p("/bin") || end_with_p("/lib")) {
754 prefix_len -= tail;
755 }
756 prefix_len += MODULAR_GC_DIR[0] != '/';
757 gc_so_path_size += prefix_len;
758 }
759 }
760#endif
761 gc_so_path = alloca(gc_so_path_size);
762 {
763 size_t gc_so_path_idx = 0;
764#define GC_SO_PATH_APPEND(str) do { \
765 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
766} while (0)
767#ifdef LOAD_RELATIVE
768 if (prefix_len > 0) {
769 memcpy(gc_so_path, dli.dli_fname, prefix_len);
770 gc_so_path_idx = prefix_len;
771 }
772#endif
773 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
774 GC_SO_PATH_APPEND(gc_so_file);
775 GC_SO_PATH_APPEND(DLEXT);
776 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
777#undef GC_SO_PATH_APPEND
778 }
779
780 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
781 if (!handle) {
782 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
783 exit(1);
784 }
785
786 gc_functions.modular_gc_loaded_p = true;
787 }
788
789# define load_modular_gc_func(name) do { \
790 if (handle) { \
791 const char *func_name = "rb_gc_impl_" #name; \
792 gc_functions.name = dlsym(handle, func_name); \
793 if (!gc_functions.name) { \
794 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
795 exit(1); \
796 } \
797 } \
798 else { \
799 gc_functions.name = rb_gc_impl_##name; \
800 } \
801} while (0)
802
803 // Bootup
804 load_modular_gc_func(objspace_alloc);
805 load_modular_gc_func(objspace_init);
806 load_modular_gc_func(objspace_free);
807 load_modular_gc_func(ractor_cache_alloc);
808 load_modular_gc_func(ractor_cache_free);
809 load_modular_gc_func(set_params);
810 load_modular_gc_func(init);
811 load_modular_gc_func(heap_sizes);
812 // Shutdown
813 load_modular_gc_func(shutdown_free_objects);
814 // GC
815 load_modular_gc_func(start);
816 load_modular_gc_func(during_gc_p);
817 load_modular_gc_func(prepare_heap);
818 load_modular_gc_func(gc_enable);
819 load_modular_gc_func(gc_disable);
820 load_modular_gc_func(gc_enabled_p);
821 load_modular_gc_func(config_set);
822 load_modular_gc_func(config_get);
823 load_modular_gc_func(stress_set);
824 load_modular_gc_func(stress_get);
825 // Object allocation
826 load_modular_gc_func(new_obj);
827 load_modular_gc_func(obj_slot_size);
828 load_modular_gc_func(heap_id_for_size);
829 load_modular_gc_func(size_allocatable_p);
830 // Malloc
831 load_modular_gc_func(malloc);
832 load_modular_gc_func(calloc);
833 load_modular_gc_func(realloc);
834 load_modular_gc_func(free);
835 load_modular_gc_func(adjust_memory_usage);
836 // Marking
837 load_modular_gc_func(mark);
838 load_modular_gc_func(mark_and_move);
839 load_modular_gc_func(mark_and_pin);
840 load_modular_gc_func(mark_maybe);
841 load_modular_gc_func(mark_weak);
842 load_modular_gc_func(remove_weak);
843 // Compaction
844 load_modular_gc_func(object_moved_p);
845 load_modular_gc_func(location);
846 // Write barriers
847 load_modular_gc_func(writebarrier);
848 load_modular_gc_func(writebarrier_unprotect);
849 load_modular_gc_func(writebarrier_remember);
850 // Heap walking
851 load_modular_gc_func(each_objects);
852 load_modular_gc_func(each_object);
853 // Finalizers
854 load_modular_gc_func(make_zombie);
855 load_modular_gc_func(define_finalizer);
856 load_modular_gc_func(undefine_finalizer);
857 load_modular_gc_func(copy_finalizer);
858 load_modular_gc_func(shutdown_call_finalizer);
859 // Object ID
860 load_modular_gc_func(object_id);
861 load_modular_gc_func(object_id_to_ref);
862 // Forking
863 load_modular_gc_func(before_fork);
864 load_modular_gc_func(after_fork);
865 // Statistics
866 load_modular_gc_func(set_measure_total_time);
867 load_modular_gc_func(get_measure_total_time);
868 load_modular_gc_func(get_total_time);
869 load_modular_gc_func(gc_count);
870 load_modular_gc_func(latest_gc_info);
871 load_modular_gc_func(stat);
872 load_modular_gc_func(stat_heap);
873 load_modular_gc_func(active_gc_name);
874 // Miscellaneous
875 load_modular_gc_func(obj_flags);
876 load_modular_gc_func(pointer_to_heap_p);
877 load_modular_gc_func(garbage_object_p);
878 load_modular_gc_func(set_event_hook);
879 load_modular_gc_func(copy_attributes);
880
881# undef load_modular_gc_func
882
883 rb_gc_functions = gc_functions;
884}
885
886// Bootup
887# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
888# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
889# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
890# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
891# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
892# define rb_gc_impl_set_params rb_gc_functions.set_params
893# define rb_gc_impl_init rb_gc_functions.init
894# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
895// Shutdown
896# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
897// GC
898# define rb_gc_impl_start rb_gc_functions.start
899# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
900# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
901# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
902# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
903# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
904# define rb_gc_impl_config_get rb_gc_functions.config_get
905# define rb_gc_impl_config_set rb_gc_functions.config_set
906# define rb_gc_impl_stress_set rb_gc_functions.stress_set
907# define rb_gc_impl_stress_get rb_gc_functions.stress_get
908// Object allocation
909# define rb_gc_impl_new_obj rb_gc_functions.new_obj
910# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
911# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
912# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
913// Malloc
914# define rb_gc_impl_malloc rb_gc_functions.malloc
915# define rb_gc_impl_calloc rb_gc_functions.calloc
916# define rb_gc_impl_realloc rb_gc_functions.realloc
917# define rb_gc_impl_free rb_gc_functions.free
918# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
919// Marking
920# define rb_gc_impl_mark rb_gc_functions.mark
921# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
922# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
923# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
924# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
925# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
926// Compaction
927# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
928# define rb_gc_impl_location rb_gc_functions.location
929// Write barriers
930# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
931# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
932# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
933// Heap walking
934# define rb_gc_impl_each_objects rb_gc_functions.each_objects
935# define rb_gc_impl_each_object rb_gc_functions.each_object
936// Finalizers
937# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
938# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
939# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
940# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
941# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
942// Object ID
943# define rb_gc_impl_object_id rb_gc_functions.object_id
944# define rb_gc_impl_object_id_to_ref rb_gc_functions.object_id_to_ref
945// Forking
946# define rb_gc_impl_before_fork rb_gc_functions.before_fork
947# define rb_gc_impl_after_fork rb_gc_functions.after_fork
948// Statistics
949# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
950# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
951# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
952# define rb_gc_impl_gc_count rb_gc_functions.gc_count
953# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
954# define rb_gc_impl_stat rb_gc_functions.stat
955# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
956# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
957// Miscellaneous
958# define rb_gc_impl_obj_flags rb_gc_functions.obj_flags
959# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
960# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
961# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
962# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
963#endif
964
965#ifdef RUBY_ASAN_ENABLED
966static void
967asan_death_callback(void)
968{
969 if (GET_VM()) {
970 rb_bug_without_die("ASAN error");
971 }
972}
973#endif
974
975static VALUE initial_stress = Qfalse;
976
977void *
978rb_objspace_alloc(void)
979{
980#if USE_MODULAR_GC
981 ruby_modular_gc_init();
982#endif
983
984 void *objspace = rb_gc_impl_objspace_alloc();
985 ruby_current_vm_ptr->gc.objspace = objspace;
986
987 rb_gc_impl_objspace_init(objspace);
988 rb_gc_impl_stress_set(objspace, initial_stress);
989
990#ifdef RUBY_ASAN_ENABLED
991 __sanitizer_set_death_callback(asan_death_callback);
992#endif
993
994 return objspace;
995}
996
997void
998rb_objspace_free(void *objspace)
999{
1000 rb_gc_impl_objspace_free(objspace);
1001}
1002
1003size_t
1004rb_gc_obj_slot_size(VALUE obj)
1005{
1006 return rb_gc_impl_obj_slot_size(obj);
1007}
1008
1009static inline void
1010gc_validate_pc(void) {
1011#if RUBY_DEBUG
1012 rb_execution_context_t *ec = GET_EC();
1013 const rb_control_frame_t *cfp = ec->cfp;
1014 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
1015 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
1016 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
1017 }
1018#endif
1019}
1020
1021static inline VALUE
1022newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
1023{
1024 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
1025
1026 gc_validate_pc();
1027
1028 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1029 unsigned int lev;
1030 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1031 {
1032 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1033
1034 /* We must disable GC here because the callback could call xmalloc
1035 * which could potentially trigger a GC, and a lot of code is unsafe
1036 * to trigger a GC right after an object has been allocated because
1037 * they perform initialization for the object and assume that the
1038 * GC does not trigger before then. */
1039 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1040 {
1041 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1042 }
1043 if (!gc_disabled) rb_gc_enable();
1044 }
1045 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1046 }
1047
1048 return obj;
1049}
1050
1051VALUE
1052rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1053{
1054 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1055 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1056}
1057
1058VALUE
1059rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1060{
1061 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1062 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1063}
1064
1065#define UNEXPECTED_NODE(func) \
1066 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1067 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1068
1069static inline void
1070rb_data_object_check(VALUE klass)
1071{
1072 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1073 rb_undef_alloc_func(klass);
1074 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1075 }
1076}
1077
1078VALUE
1079rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1080{
1082 if (klass) rb_data_object_check(klass);
1083 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
1084}
1085
1086VALUE
1087rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1088{
1089 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1090 DATA_PTR(obj) = xcalloc(1, size);
1091 return obj;
1092}
1093
1094static VALUE
1095typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1096{
1097 RBIMPL_NONNULL_ARG(type);
1098 if (klass) rb_data_object_check(klass);
1099 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1100 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
1101}
1102
1103VALUE
1104rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1105{
1106 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1107 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1108 }
1109
1110 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1111}
1112
1113VALUE
1114rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1115{
1116 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1117 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1118 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1119 }
1120
1121 size_t embed_size = offsetof(struct RTypedData, data) + size;
1122 if (rb_gc_size_allocatable_p(embed_size)) {
1123 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1124 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1125 return obj;
1126 }
1127 }
1128
1129 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1130 DATA_PTR(obj) = xcalloc(1, size);
1131 return obj;
1132}
1133
1134static size_t
1135rb_objspace_data_type_memsize(VALUE obj)
1136{
1137 size_t size = 0;
1138 if (RTYPEDDATA_P(obj)) {
1139 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1140 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1141
1142 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1143#ifdef HAVE_MALLOC_USABLE_SIZE
1144 size += malloc_usable_size((void *)ptr);
1145#endif
1146 }
1147
1148 if (ptr && type->function.dsize) {
1149 size += type->function.dsize(ptr);
1150 }
1151 }
1152
1153 return size;
1154}
1155
1156const char *
1157rb_objspace_data_type_name(VALUE obj)
1158{
1159 if (RTYPEDDATA_P(obj)) {
1160 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1161 }
1162 else {
1163 return 0;
1164 }
1165}
1166
1167static enum rb_id_table_iterator_result
1168cvar_table_free_i(VALUE value, void *ctx)
1169{
1170 xfree((void *)value);
1171 return ID_TABLE_CONTINUE;
1172}
1173
1174static inline void
1175make_io_zombie(void *objspace, VALUE obj)
1176{
1177 rb_io_t *fptr = RFILE(obj)->fptr;
1178 rb_gc_impl_make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
1179}
1180
1181static bool
1182rb_data_free(void *objspace, VALUE obj)
1183{
1184 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1185 if (data) {
1186 int free_immediately = false;
1187 void (*dfree)(void *);
1188
1189 if (RTYPEDDATA_P(obj)) {
1190 free_immediately = (RTYPEDDATA(obj)->type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1191 dfree = RTYPEDDATA(obj)->type->function.dfree;
1192 }
1193 else {
1194 dfree = RDATA(obj)->dfree;
1195 }
1196
1197 if (dfree) {
1198 if (dfree == RUBY_DEFAULT_FREE) {
1199 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1200 xfree(data);
1201 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1202 }
1203 }
1204 else if (free_immediately) {
1205 (*dfree)(data);
1206 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1207 xfree(data);
1208 }
1209
1210 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1211 }
1212 else {
1213 rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, dfree, data);
1214 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1215 return FALSE;
1216 }
1217 }
1218 else {
1219 RB_DEBUG_COUNTER_INC(obj_data_empty);
1220 }
1221 }
1222
1223 return true;
1224}
1225
1226void
1227rb_gc_obj_free_vm_weak_references(VALUE obj)
1228{
1229 if (FL_TEST(obj, FL_EXIVAR)) {
1231 FL_UNSET(obj, FL_EXIVAR);
1232 }
1233
1234 switch (BUILTIN_TYPE(obj)) {
1235 case T_STRING:
1236 if (FL_TEST(obj, RSTRING_FSTR)) {
1237 st_data_t fstr = (st_data_t)obj;
1238 st_delete(rb_vm_fstring_table(), &fstr, NULL);
1239 RB_DEBUG_COUNTER_INC(obj_str_fstr);
1240
1241 FL_UNSET(obj, RSTRING_FSTR);
1242 }
1243 break;
1244 case T_SYMBOL:
1245 rb_gc_free_dsymbol(obj);
1246 break;
1247 case T_IMEMO:
1248 switch (imemo_type(obj)) {
1249 case imemo_callinfo:
1250 rb_vm_ci_free((const struct rb_callinfo *)obj);
1251 break;
1252 case imemo_ment:
1253 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
1254 break;
1255 default:
1256 break;
1257 }
1258 break;
1259 default:
1260 break;
1261 }
1262}
1263
1264bool
1265rb_gc_obj_free(void *objspace, VALUE obj)
1266{
1267 RB_DEBUG_COUNTER_INC(obj_free);
1268
1269 switch (BUILTIN_TYPE(obj)) {
1270 case T_NIL:
1271 case T_FIXNUM:
1272 case T_TRUE:
1273 case T_FALSE:
1274 rb_bug("obj_free() called for broken object");
1275 break;
1276 default:
1277 break;
1278 }
1279
1280 switch (BUILTIN_TYPE(obj)) {
1281 case T_OBJECT:
1282 if (rb_shape_obj_too_complex(obj)) {
1283 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1284 st_free_table(ROBJECT_IV_HASH(obj));
1285 }
1286 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1287 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1288 }
1289 else {
1290 xfree(ROBJECT(obj)->as.heap.ivptr);
1291 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1292 }
1293 break;
1294 case T_MODULE:
1295 case T_CLASS:
1296 rb_id_table_free(RCLASS_M_TBL(obj));
1297 rb_cc_table_free(obj);
1298 if (rb_shape_obj_too_complex(obj)) {
1299 st_free_table((st_table *)RCLASS_IVPTR(obj));
1300 }
1301 else {
1302 xfree(RCLASS_IVPTR(obj));
1303 }
1304
1305 if (RCLASS_CONST_TBL(obj)) {
1306 rb_free_const_table(RCLASS_CONST_TBL(obj));
1307 }
1308 if (RCLASS_CVC_TBL(obj)) {
1309 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
1310 rb_id_table_free(RCLASS_CVC_TBL(obj));
1311 }
1312 rb_class_remove_subclass_head(obj);
1313 rb_class_remove_from_module_subclasses(obj);
1314 rb_class_remove_from_super_subclasses(obj);
1315 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1316 xfree(RCLASS_SUPERCLASSES(obj));
1317 }
1318
1319 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1320 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1321 break;
1322 case T_STRING:
1323 rb_str_free(obj);
1324 break;
1325 case T_ARRAY:
1326 rb_ary_free(obj);
1327 break;
1328 case T_HASH:
1329#if USE_DEBUG_COUNTER
1330 switch (RHASH_SIZE(obj)) {
1331 case 0:
1332 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1333 break;
1334 case 1:
1335 RB_DEBUG_COUNTER_INC(obj_hash_1);
1336 break;
1337 case 2:
1338 RB_DEBUG_COUNTER_INC(obj_hash_2);
1339 break;
1340 case 3:
1341 RB_DEBUG_COUNTER_INC(obj_hash_3);
1342 break;
1343 case 4:
1344 RB_DEBUG_COUNTER_INC(obj_hash_4);
1345 break;
1346 case 5:
1347 case 6:
1348 case 7:
1349 case 8:
1350 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1351 break;
1352 default:
1353 GC_ASSERT(RHASH_SIZE(obj) > 8);
1354 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1355 }
1356
1357 if (RHASH_AR_TABLE_P(obj)) {
1358 if (RHASH_AR_TABLE(obj) == NULL) {
1359 RB_DEBUG_COUNTER_INC(obj_hash_null);
1360 }
1361 else {
1362 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1363 }
1364 }
1365 else {
1366 RB_DEBUG_COUNTER_INC(obj_hash_st);
1367 }
1368#endif
1369
1370 rb_hash_free(obj);
1371 break;
1372 case T_REGEXP:
1373 if (RREGEXP(obj)->ptr) {
1374 onig_free(RREGEXP(obj)->ptr);
1375 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1376 }
1377 break;
1378 case T_DATA:
1379 if (!rb_data_free(objspace, obj)) return false;
1380 break;
1381 case T_MATCH:
1382 {
1383 rb_matchext_t *rm = RMATCH_EXT(obj);
1384#if USE_DEBUG_COUNTER
1385 if (rm->regs.num_regs >= 8) {
1386 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1387 }
1388 else if (rm->regs.num_regs >= 4) {
1389 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1390 }
1391 else if (rm->regs.num_regs >= 1) {
1392 RB_DEBUG_COUNTER_INC(obj_match_under4);
1393 }
1394#endif
1395 onig_region_free(&rm->regs, 0);
1396 xfree(rm->char_offset);
1397
1398 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1399 }
1400 break;
1401 case T_FILE:
1402 if (RFILE(obj)->fptr) {
1403 make_io_zombie(objspace, obj);
1404 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1405 return FALSE;
1406 }
1407 break;
1408 case T_RATIONAL:
1409 RB_DEBUG_COUNTER_INC(obj_rational);
1410 break;
1411 case T_COMPLEX:
1412 RB_DEBUG_COUNTER_INC(obj_complex);
1413 break;
1414 case T_MOVED:
1415 break;
1416 case T_ICLASS:
1417 /* Basically , T_ICLASS shares table with the module */
1418 if (RICLASS_OWNS_M_TBL_P(obj)) {
1419 /* Method table is not shared for origin iclasses of classes */
1420 rb_id_table_free(RCLASS_M_TBL(obj));
1421 }
1422 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
1423 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
1424 }
1425 rb_class_remove_subclass_head(obj);
1426 rb_cc_table_free(obj);
1427 rb_class_remove_from_module_subclasses(obj);
1428 rb_class_remove_from_super_subclasses(obj);
1429
1430 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1431 break;
1432
1433 case T_FLOAT:
1434 RB_DEBUG_COUNTER_INC(obj_float);
1435 break;
1436
1437 case T_BIGNUM:
1438 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1439 xfree(BIGNUM_DIGITS(obj));
1440 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1441 }
1442 else {
1443 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1444 }
1445 break;
1446
1447 case T_NODE:
1448 UNEXPECTED_NODE(obj_free);
1449 break;
1450
1451 case T_STRUCT:
1452 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1453 RSTRUCT(obj)->as.heap.ptr == NULL) {
1454 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1455 }
1456 else {
1457 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1458 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1459 }
1460 break;
1461
1462 case T_SYMBOL:
1463 RB_DEBUG_COUNTER_INC(obj_symbol);
1464 break;
1465
1466 case T_IMEMO:
1467 rb_imemo_free((VALUE)obj);
1468 break;
1469
1470 default:
1471 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1472 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1473 }
1474
1475 if (FL_TEST(obj, FL_FINALIZE)) {
1476 rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, 0, 0);
1477 return FALSE;
1478 }
1479 else {
1480 return TRUE;
1481 }
1482}
1483
1484void
1485rb_objspace_set_event_hook(const rb_event_flag_t event)
1486{
1487 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1488}
1489
1490static int
1491internal_object_p(VALUE obj)
1492{
1493 void *ptr = asan_unpoison_object_temporary(obj);
1494
1495 if (RBASIC(obj)->flags) {
1496 switch (BUILTIN_TYPE(obj)) {
1497 case T_NODE:
1498 UNEXPECTED_NODE(internal_object_p);
1499 break;
1500 case T_NONE:
1501 case T_MOVED:
1502 case T_IMEMO:
1503 case T_ICLASS:
1504 case T_ZOMBIE:
1505 break;
1506 case T_CLASS:
1507 if (!RBASIC(obj)->klass) break;
1508 if (RCLASS_SINGLETON_P(obj)) {
1509 return rb_singleton_class_internal_p(obj);
1510 }
1511 return 0;
1512 default:
1513 if (!RBASIC(obj)->klass) break;
1514 return 0;
1515 }
1516 }
1517 if (ptr || !RBASIC(obj)->flags) {
1518 rb_asan_poison_object(obj);
1519 }
1520 return 1;
1521}
1522
1523int
1524rb_objspace_internal_object_p(VALUE obj)
1525{
1526 return internal_object_p(obj);
1527}
1528
1530 size_t num;
1531 VALUE of;
1532};
1533
1534static int
1535os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1536{
1537 struct os_each_struct *oes = (struct os_each_struct *)data;
1538
1539 VALUE v = (VALUE)vstart;
1540 for (; v != (VALUE)vend; v += stride) {
1541 if (!internal_object_p(v)) {
1542 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1543 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1544 rb_yield(v);
1545 oes->num++;
1546 }
1547 }
1548 }
1549 }
1550
1551 return 0;
1552}
1553
1554static VALUE
1555os_obj_of(VALUE of)
1556{
1557 struct os_each_struct oes;
1558
1559 oes.num = 0;
1560 oes.of = of;
1561 rb_objspace_each_objects(os_obj_of_i, &oes);
1562 return SIZET2NUM(oes.num);
1563}
1564
1565/*
1566 * call-seq:
1567 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1568 * ObjectSpace.each_object([module]) -> an_enumerator
1569 *
1570 * Calls the block once for each living, nonimmediate object in this
1571 * Ruby process. If <i>module</i> is specified, calls the block
1572 * for only those classes or modules that match (or are a subclass of)
1573 * <i>module</i>. Returns the number of objects found. Immediate
1574 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1575 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1576 * never returned. In the example below, #each_object returns both
1577 * the numbers we defined and several constants defined in the Math
1578 * module.
1579 *
1580 * If no block is given, an enumerator is returned instead.
1581 *
1582 * a = 102.7
1583 * b = 95 # Won't be returned
1584 * c = 12345678987654321
1585 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1586 * puts "Total count: #{count}"
1587 *
1588 * <em>produces:</em>
1589 *
1590 * 12345678987654321
1591 * 102.7
1592 * 2.71828182845905
1593 * 3.14159265358979
1594 * 2.22044604925031e-16
1595 * 1.7976931348623157e+308
1596 * 2.2250738585072e-308
1597 * Total count: 7
1598 *
1599 */
1600
1601static VALUE
1602os_each_obj(int argc, VALUE *argv, VALUE os)
1603{
1604 VALUE of;
1605
1606 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1607 RETURN_ENUMERATOR(os, 1, &of);
1608 return os_obj_of(of);
1609}
1610
1611/*
1612 * call-seq:
1613 * ObjectSpace.undefine_finalizer(obj)
1614 *
1615 * Removes all finalizers for <i>obj</i>.
1616 *
1617 */
1618
1619static VALUE
1620undefine_final(VALUE os, VALUE obj)
1621{
1622 return rb_undefine_finalizer(obj);
1623}
1624
1625VALUE
1626rb_undefine_finalizer(VALUE obj)
1627{
1628 rb_check_frozen(obj);
1629
1630 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1631
1632 return obj;
1633}
1634
1635static void
1636should_be_callable(VALUE block)
1637{
1638 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1639 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1640 rb_obj_class(block));
1641 }
1642}
1643
1644static void
1645should_be_finalizable(VALUE obj)
1646{
1647 if (!FL_ABLE(obj)) {
1648 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1649 rb_obj_classname(obj));
1650 }
1651 rb_check_frozen(obj);
1652}
1653
1654void
1655rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1656{
1657 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1658}
1659
1660/*
1661 * call-seq:
1662 * ObjectSpace.define_finalizer(obj, aProc=proc())
1663 *
1664 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1665 * was destroyed. The object ID of the <i>obj</i> will be passed
1666 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1667 * method, make sure it can be called with a single argument.
1668 *
1669 * The return value is an array <code>[0, aProc]</code>.
1670 *
1671 * The two recommended patterns are to either create the finaliser proc
1672 * in a non-instance method where it can safely capture the needed state,
1673 * or to use a custom callable object that stores the needed state
1674 * explicitly as instance variables.
1675 *
1676 * class Foo
1677 * def initialize(data_needed_for_finalization)
1678 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1679 * end
1680 *
1681 * def self.create_finalizer(data_needed_for_finalization)
1682 * proc {
1683 * puts "finalizing #{data_needed_for_finalization}"
1684 * }
1685 * end
1686 * end
1687 *
1688 * class Bar
1689 * class Remover
1690 * def initialize(data_needed_for_finalization)
1691 * @data_needed_for_finalization = data_needed_for_finalization
1692 * end
1693 *
1694 * def call(id)
1695 * puts "finalizing #{@data_needed_for_finalization}"
1696 * end
1697 * end
1698 *
1699 * def initialize(data_needed_for_finalization)
1700 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1701 * end
1702 * end
1703 *
1704 * Note that if your finalizer references the object to be
1705 * finalized it will never be run on GC, although it will still be
1706 * run at exit. You will get a warning if you capture the object
1707 * to be finalized as the receiver of the finalizer.
1708 *
1709 * class CapturesSelf
1710 * def initialize(name)
1711 * ObjectSpace.define_finalizer(self, proc {
1712 * # this finalizer will only be run on exit
1713 * puts "finalizing #{name}"
1714 * })
1715 * end
1716 * end
1717 *
1718 * Also note that finalization can be unpredictable and is never guaranteed
1719 * to be run except on exit.
1720 */
1721
1722static VALUE
1723define_final(int argc, VALUE *argv, VALUE os)
1724{
1725 VALUE obj, block;
1726
1727 rb_scan_args(argc, argv, "11", &obj, &block);
1728 if (argc == 1) {
1729 block = rb_block_proc();
1730 }
1731
1732 if (rb_callable_receiver(block) == obj) {
1733 rb_warn("finalizer references object to be finalized");
1734 }
1735
1736 return rb_define_finalizer(obj, block);
1737}
1738
1739VALUE
1740rb_define_finalizer(VALUE obj, VALUE block)
1741{
1742 should_be_finalizable(obj);
1743 should_be_callable(block);
1744
1745 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1746
1747 block = rb_ary_new3(2, INT2FIX(0), block);
1748 OBJ_FREEZE(block);
1749 return block;
1750}
1751
1752void
1753rb_objspace_call_finalizer(void)
1754{
1755 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1756}
1757
1758void
1759rb_objspace_free_objects(void *objspace)
1760{
1761 rb_gc_impl_shutdown_free_objects(objspace);
1762}
1763
1764int
1765rb_objspace_garbage_object_p(VALUE obj)
1766{
1767 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1768}
1769
1770bool
1771rb_gc_pointer_to_heap_p(VALUE obj)
1772{
1773 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1774}
1775
1776/*
1777 * call-seq:
1778 * ObjectSpace._id2ref(object_id) -> an_object
1779 *
1780 * Converts an object id to a reference to the object. May not be
1781 * called on an object id passed as a parameter to a finalizer.
1782 *
1783 * s = "I am a string" #=> "I am a string"
1784 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
1785 * r == s #=> true
1786 *
1787 * On multi-ractor mode, if the object is not shareable, it raises
1788 * RangeError.
1789 */
1790
1791static VALUE
1792id2ref(VALUE objid)
1793{
1794#if SIZEOF_LONG == SIZEOF_VOIDP
1795#define NUM2PTR(x) NUM2ULONG(x)
1796#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1797#define NUM2PTR(x) NUM2ULL(x)
1798#endif
1799 objid = rb_to_int(objid);
1800 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
1801 VALUE ptr = NUM2PTR(objid);
1802 if (SPECIAL_CONST_P(ptr)) {
1803 if (ptr == Qtrue) return Qtrue;
1804 if (ptr == Qfalse) return Qfalse;
1805 if (NIL_P(ptr)) return Qnil;
1806 if (FIXNUM_P(ptr)) return ptr;
1807 if (FLONUM_P(ptr)) return ptr;
1808
1809 if (SYMBOL_P(ptr)) {
1810 // Check that the symbol is valid
1811 if (rb_static_id_valid_p(SYM2ID(ptr))) {
1812 return ptr;
1813 }
1814 else {
1815 rb_raise(rb_eRangeError, "%p is not symbol id value", (void *)ptr);
1816 }
1817 }
1818
1819 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
1820 }
1821 }
1822
1823 VALUE obj = rb_gc_impl_object_id_to_ref(rb_gc_get_objspace(), objid);
1824 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
1825 return obj;
1826 }
1827 else {
1828 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
1829 }
1830}
1831
1832/* :nodoc: */
1833static VALUE
1834os_id2ref(VALUE os, VALUE objid)
1835{
1836 return id2ref(objid);
1837}
1838
1839static VALUE
1840rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(void *, VALUE))
1841{
1842 if (SPECIAL_CONST_P(obj)) {
1843#if SIZEOF_LONG == SIZEOF_VOIDP
1844 return LONG2NUM((SIGNED_VALUE)obj);
1845#else
1846 return LL2NUM((SIGNED_VALUE)obj);
1847#endif
1848 }
1849
1850 return get_heap_object_id(objspace, obj);
1851}
1852
1853static VALUE
1854nonspecial_obj_id(void *_objspace, VALUE obj)
1855{
1856#if SIZEOF_LONG == SIZEOF_VOIDP
1857 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
1858#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1859 return LL2NUM((SIGNED_VALUE)(obj) / 2);
1860#else
1861# error not supported
1862#endif
1863}
1864
1865VALUE
1866rb_memory_id(VALUE obj)
1867{
1868 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
1869}
1870
1871/*
1872 * Document-method: __id__
1873 * Document-method: object_id
1874 *
1875 * call-seq:
1876 * obj.__id__ -> integer
1877 * obj.object_id -> integer
1878 *
1879 * Returns an integer identifier for +obj+.
1880 *
1881 * The same number will be returned on all calls to +object_id+ for a given
1882 * object, and no two active objects will share an id.
1883 *
1884 * Note: that some objects of builtin classes are reused for optimization.
1885 * This is the case for immediate values and frozen string literals.
1886 *
1887 * BasicObject implements +__id__+, Kernel implements +object_id+.
1888 *
1889 * Immediate values are not passed by reference but are passed by value:
1890 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
1891 *
1892 * Object.new.object_id == Object.new.object_id # => false
1893 * (21 * 2).object_id == (21 * 2).object_id # => true
1894 * "hello".object_id == "hello".object_id # => false
1895 * "hi".freeze.object_id == "hi".freeze.object_id # => true
1896 */
1897
1898VALUE
1899rb_obj_id(VALUE obj)
1900{
1901 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
1902 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
1903 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
1904 * any immediates. */
1905 return rb_find_object_id(rb_gc_get_objspace(), obj, rb_gc_impl_object_id);
1906}
1907
1908static enum rb_id_table_iterator_result
1909cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
1910{
1911 size_t *total_size = data_ptr;
1912 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
1913 *total_size += sizeof(*ccs);
1914 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
1915 return ID_TABLE_CONTINUE;
1916}
1917
1918static size_t
1919cc_table_memsize(struct rb_id_table *cc_table)
1920{
1921 size_t total = rb_id_table_memsize(cc_table);
1922 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
1923 return total;
1924}
1925
1926size_t
1927rb_obj_memsize_of(VALUE obj)
1928{
1929 size_t size = 0;
1930
1931 if (SPECIAL_CONST_P(obj)) {
1932 return 0;
1933 }
1934
1935 if (FL_TEST(obj, FL_EXIVAR)) {
1936 size += rb_generic_ivar_memsize(obj);
1937 }
1938
1939 switch (BUILTIN_TYPE(obj)) {
1940 case T_OBJECT:
1941 if (rb_shape_obj_too_complex(obj)) {
1942 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
1943 }
1944 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
1945 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
1946 }
1947 break;
1948 case T_MODULE:
1949 case T_CLASS:
1950 if (RCLASS_M_TBL(obj)) {
1951 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1952 }
1953 // class IV sizes are allocated as powers of two
1954 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
1955 if (RCLASS_CVC_TBL(obj)) {
1956 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
1957 }
1958 if (RCLASS_EXT(obj)->const_tbl) {
1959 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
1960 }
1961 if (RCLASS_CC_TBL(obj)) {
1962 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1963 }
1964 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1965 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
1966 }
1967 break;
1968 case T_ICLASS:
1969 if (RICLASS_OWNS_M_TBL_P(obj)) {
1970 if (RCLASS_M_TBL(obj)) {
1971 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1972 }
1973 }
1974 if (RCLASS_CC_TBL(obj)) {
1975 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1976 }
1977 break;
1978 case T_STRING:
1979 size += rb_str_memsize(obj);
1980 break;
1981 case T_ARRAY:
1982 size += rb_ary_memsize(obj);
1983 break;
1984 case T_HASH:
1985 if (RHASH_ST_TABLE_P(obj)) {
1986 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
1987 /* st_table is in the slot */
1988 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
1989 }
1990 break;
1991 case T_REGEXP:
1992 if (RREGEXP_PTR(obj)) {
1993 size += onig_memsize(RREGEXP_PTR(obj));
1994 }
1995 break;
1996 case T_DATA:
1997 size += rb_objspace_data_type_memsize(obj);
1998 break;
1999 case T_MATCH:
2000 {
2001 rb_matchext_t *rm = RMATCH_EXT(obj);
2002 size += onig_region_memsize(&rm->regs);
2003 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2004 }
2005 break;
2006 case T_FILE:
2007 if (RFILE(obj)->fptr) {
2008 size += rb_io_memsize(RFILE(obj)->fptr);
2009 }
2010 break;
2011 case T_RATIONAL:
2012 case T_COMPLEX:
2013 break;
2014 case T_IMEMO:
2015 size += rb_imemo_memsize(obj);
2016 break;
2017
2018 case T_FLOAT:
2019 case T_SYMBOL:
2020 break;
2021
2022 case T_BIGNUM:
2023 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2024 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2025 }
2026 break;
2027
2028 case T_NODE:
2029 UNEXPECTED_NODE(obj_memsize_of);
2030 break;
2031
2032 case T_STRUCT:
2033 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2034 RSTRUCT(obj)->as.heap.ptr) {
2035 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2036 }
2037 break;
2038
2039 case T_ZOMBIE:
2040 case T_MOVED:
2041 break;
2042
2043 default:
2044 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2045 BUILTIN_TYPE(obj), (void*)obj);
2046 }
2047
2048 return size + rb_gc_obj_slot_size(obj);
2049}
2050
2051static int
2052set_zero(st_data_t key, st_data_t val, st_data_t arg)
2053{
2054 VALUE k = (VALUE)key;
2055 VALUE hash = (VALUE)arg;
2056 rb_hash_aset(hash, k, INT2FIX(0));
2057 return ST_CONTINUE;
2058}
2059
2061 size_t counts[T_MASK+1];
2062 size_t freed;
2063 size_t total;
2064};
2065
2066static void
2067count_objects_i(VALUE obj, void *d)
2068{
2069 struct count_objects_data *data = (struct count_objects_data *)d;
2070
2071 if (RBASIC(obj)->flags) {
2072 data->counts[BUILTIN_TYPE(obj)]++;
2073 }
2074 else {
2075 data->freed++;
2076 }
2077
2078 data->total++;
2079}
2080
2081/*
2082 * call-seq:
2083 * ObjectSpace.count_objects([result_hash]) -> hash
2084 *
2085 * Counts all objects grouped by type.
2086 *
2087 * It returns a hash, such as:
2088 * {
2089 * :TOTAL=>10000,
2090 * :FREE=>3011,
2091 * :T_OBJECT=>6,
2092 * :T_CLASS=>404,
2093 * # ...
2094 * }
2095 *
2096 * The contents of the returned hash are implementation specific.
2097 * It may be changed in future.
2098 *
2099 * The keys starting with +:T_+ means live objects.
2100 * For example, +:T_ARRAY+ is the number of arrays.
2101 * +:FREE+ means object slots which is not used now.
2102 * +:TOTAL+ means sum of above.
2103 *
2104 * If the optional argument +result_hash+ is given,
2105 * it is overwritten and returned. This is intended to avoid probe effect.
2106 *
2107 * h = {}
2108 * ObjectSpace.count_objects(h)
2109 * puts h
2110 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2111 *
2112 * This method is only expected to work on C Ruby.
2113 *
2114 */
2115
2116static VALUE
2117count_objects(int argc, VALUE *argv, VALUE os)
2118{
2119 struct count_objects_data data = { 0 };
2120 VALUE hash = Qnil;
2121
2122 if (rb_check_arity(argc, 0, 1) == 1) {
2123 hash = argv[0];
2124 if (!RB_TYPE_P(hash, T_HASH))
2125 rb_raise(rb_eTypeError, "non-hash given");
2126 }
2127
2128 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2129
2130 if (NIL_P(hash)) {
2131 hash = rb_hash_new();
2132 }
2133 else if (!RHASH_EMPTY_P(hash)) {
2134 rb_hash_stlike_foreach(hash, set_zero, hash);
2135 }
2136 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2137 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2138
2139 for (size_t i = 0; i <= T_MASK; i++) {
2140 VALUE type = type_sym(i);
2141 if (data.counts[i])
2142 rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2143 }
2144
2145 return hash;
2146}
2147
2148#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2149
2150#define STACK_START (ec->machine.stack_start)
2151#define STACK_END (ec->machine.stack_end)
2152#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2153
2154#if STACK_GROW_DIRECTION < 0
2155# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2156#elif STACK_GROW_DIRECTION > 0
2157# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2158#else
2159# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2160 : (size_t)(STACK_END - STACK_START + 1))
2161#endif
2162#if !STACK_GROW_DIRECTION
2163int ruby_stack_grow_direction;
2164int
2165ruby_get_stack_grow_direction(volatile VALUE *addr)
2166{
2167 VALUE *end;
2168 SET_MACHINE_STACK_END(&end);
2169
2170 if (end > addr) return ruby_stack_grow_direction = 1;
2171 return ruby_stack_grow_direction = -1;
2172}
2173#endif
2174
2175size_t
2177{
2178 rb_execution_context_t *ec = GET_EC();
2179 SET_STACK_END;
2180 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2181 return STACK_LENGTH;
2182}
2183
2184#define PREVENT_STACK_OVERFLOW 1
2185#ifndef PREVENT_STACK_OVERFLOW
2186#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2187# define PREVENT_STACK_OVERFLOW 1
2188#else
2189# define PREVENT_STACK_OVERFLOW 0
2190#endif
2191#endif
2192#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2193static int
2194stack_check(rb_execution_context_t *ec, int water_mark)
2195{
2196 SET_STACK_END;
2197
2198 size_t length = STACK_LENGTH;
2199 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2200
2201 return length > maximum_length;
2202}
2203#else
2204#define stack_check(ec, water_mark) FALSE
2205#endif
2206
2207#define STACKFRAME_FOR_CALL_CFUNC 2048
2208
2209int
2210rb_ec_stack_check(rb_execution_context_t *ec)
2211{
2212 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2213}
2214
2215int
2217{
2218 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2219}
2220
2221/* ==================== Marking ==================== */
2222
2223#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2224 if (!RB_SPECIAL_CONST_P(obj)) { \
2225 rb_vm_t *vm = GET_VM(); \
2226 void *objspace = vm->gc.objspace; \
2227 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2228 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2229 (func)(objspace, (obj_or_ptr)); \
2230 } \
2231 else if (check_obj ? \
2232 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2233 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2234 true) { \
2235 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2236 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2237 vm->gc.mark_func_data = NULL; \
2238 mark_func_data->mark_func((obj), mark_func_data->data); \
2239 vm->gc.mark_func_data = mark_func_data; \
2240 } \
2241 } \
2242} while (0)
2243
2244static inline void
2245gc_mark_internal(VALUE obj)
2246{
2247 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2248}
2249
2250void
2251rb_gc_mark_movable(VALUE obj)
2252{
2253 gc_mark_internal(obj);
2254}
2255
2256void
2257rb_gc_mark_and_move(VALUE *ptr)
2258{
2259 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2260}
2261
2262static inline void
2263gc_mark_and_pin_internal(VALUE obj)
2264{
2265 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2266}
2267
2268void
2269rb_gc_mark(VALUE obj)
2270{
2271 gc_mark_and_pin_internal(obj);
2272}
2273
2274static inline void
2275gc_mark_maybe_internal(VALUE obj)
2276{
2277 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2278}
2279
2280void
2281rb_gc_mark_maybe(VALUE obj)
2282{
2283 gc_mark_maybe_internal(obj);
2284}
2285
2286void
2287rb_gc_mark_weak(VALUE *ptr)
2288{
2289 if (RB_SPECIAL_CONST_P(*ptr)) return;
2290
2291 rb_vm_t *vm = GET_VM();
2292 void *objspace = vm->gc.objspace;
2293 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2294 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2295
2296 rb_gc_impl_mark_weak(objspace, ptr);
2297 }
2298 else {
2299 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2300 }
2301}
2302
2303void
2304rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2305{
2306 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2307}
2308
2309ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2310static void
2311each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2312{
2313 VALUE v;
2314 while (n--) {
2315 v = *x;
2316 cb(v, data);
2317 x++;
2318 }
2319}
2320
2321static void
2322each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2323{
2324 if (end <= start) return;
2325 each_location(start, end - start, cb, data);
2326}
2327
2328static void
2329gc_mark_maybe_each_location(VALUE obj, void *data)
2330{
2331 gc_mark_maybe_internal(obj);
2332}
2333
2334void
2335rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2336{
2337 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2338}
2339
2340void
2341rb_gc_mark_values(long n, const VALUE *values)
2342{
2343 for (long i = 0; i < n; i++) {
2344 gc_mark_internal(values[i]);
2345 }
2346}
2347
2348void
2349rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2350{
2351 for (long i = 0; i < n; i++) {
2352 gc_mark_and_pin_internal(values[i]);
2353 }
2354}
2355
2356static int
2357mark_key(st_data_t key, st_data_t value, st_data_t data)
2358{
2359 gc_mark_and_pin_internal((VALUE)key);
2360
2361 return ST_CONTINUE;
2362}
2363
2364void
2365rb_mark_set(st_table *tbl)
2366{
2367 if (!tbl) return;
2368
2369 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2370}
2371
2372static int
2373mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2374{
2375 gc_mark_internal((VALUE)key);
2376 gc_mark_internal((VALUE)value);
2377
2378 return ST_CONTINUE;
2379}
2380
2381static int
2382pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2383{
2384 gc_mark_and_pin_internal((VALUE)key);
2385 gc_mark_and_pin_internal((VALUE)value);
2386
2387 return ST_CONTINUE;
2388}
2389
2390static int
2391pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2392{
2393 gc_mark_and_pin_internal((VALUE)key);
2394 gc_mark_internal((VALUE)value);
2395
2396 return ST_CONTINUE;
2397}
2398
2399static void
2400mark_hash(VALUE hash)
2401{
2402 if (rb_hash_compare_by_id_p(hash)) {
2403 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2404 }
2405 else {
2406 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2407 }
2408
2409 gc_mark_internal(RHASH(hash)->ifnone);
2410}
2411
2412void
2413rb_mark_hash(st_table *tbl)
2414{
2415 if (!tbl) return;
2416
2417 st_foreach(tbl, pin_key_pin_value, 0);
2418}
2419
2420static enum rb_id_table_iterator_result
2421mark_method_entry_i(VALUE me, void *objspace)
2422{
2423 gc_mark_internal(me);
2424
2425 return ID_TABLE_CONTINUE;
2426}
2427
2428static void
2429mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2430{
2431 if (tbl) {
2432 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2433 }
2434}
2435
2436#if STACK_GROW_DIRECTION < 0
2437#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2438#elif STACK_GROW_DIRECTION > 0
2439#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2440#else
2441#define GET_STACK_BOUNDS(start, end, appendix) \
2442 ((STACK_END < STACK_START) ? \
2443 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2444#endif
2445
2446static void
2447gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2448{
2449 gc_mark_maybe_internal(obj);
2450
2451#ifdef RUBY_ASAN_ENABLED
2452 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2453 void *fake_frame_start;
2454 void *fake_frame_end;
2455 bool is_fake_frame = asan_get_fake_stack_extents(
2456 ec->machine.asan_fake_stack_handle, obj,
2457 ec->machine.stack_start, ec->machine.stack_end,
2458 &fake_frame_start, &fake_frame_end
2459 );
2460 if (is_fake_frame) {
2461 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2462 }
2463#endif
2464}
2465
2466static VALUE
2467gc_location_internal(void *objspace, VALUE value)
2468{
2469 if (SPECIAL_CONST_P(value)) {
2470 return value;
2471 }
2472
2473 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2474
2475 return rb_gc_impl_location(objspace, value);
2476}
2477
2478VALUE
2479rb_gc_location(VALUE value)
2480{
2481 return gc_location_internal(rb_gc_get_objspace(), value);
2482}
2483
2484#if defined(__wasm__)
2485
2486
2487static VALUE *rb_stack_range_tmp[2];
2488
2489static void
2490rb_mark_locations(void *begin, void *end)
2491{
2492 rb_stack_range_tmp[0] = begin;
2493 rb_stack_range_tmp[1] = end;
2494}
2495
2496void
2497rb_gc_save_machine_context(void)
2498{
2499 // no-op
2500}
2501
2502# if defined(__EMSCRIPTEN__)
2503
2504static void
2505mark_current_machine_context(const rb_execution_context_t *ec)
2506{
2507 emscripten_scan_stack(rb_mark_locations);
2508 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2509
2510 emscripten_scan_registers(rb_mark_locations);
2511 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2512}
2513# else // use Asyncify version
2514
2515static void
2516mark_current_machine_context(const rb_execution_context_t *ec)
2517{
2518 VALUE *stack_start, *stack_end;
2519 SET_STACK_END;
2520 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2521 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2522
2523 rb_wasm_scan_locals(rb_mark_locations);
2524 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2525}
2526
2527# endif
2528
2529#else // !defined(__wasm__)
2530
2531void
2532rb_gc_save_machine_context(void)
2533{
2534 rb_thread_t *thread = GET_THREAD();
2535
2536 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2537}
2538
2539
2540static void
2541mark_current_machine_context(const rb_execution_context_t *ec)
2542{
2543 rb_gc_mark_machine_context(ec);
2544}
2545#endif
2546
2547void
2548rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2549{
2550 VALUE *stack_start, *stack_end;
2551
2552 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2553 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2554
2555 void *data =
2556#ifdef RUBY_ASAN_ENABLED
2557 /* gc_mark_machine_stack_location_maybe() uses data as const */
2558 (rb_execution_context_t *)ec;
2559#else
2560 NULL;
2561#endif
2562
2563 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2564 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2565 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2566}
2567
2568static int
2569rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2570{
2571 gc_mark_and_pin_internal((VALUE)value);
2572
2573 return ST_CONTINUE;
2574}
2575
2576void
2577rb_mark_tbl(st_table *tbl)
2578{
2579 if (!tbl || tbl->num_entries == 0) return;
2580
2581 st_foreach(tbl, rb_mark_tbl_i, 0);
2582}
2583
2584static void
2585gc_mark_tbl_no_pin(st_table *tbl)
2586{
2587 if (!tbl || tbl->num_entries == 0) return;
2588
2589 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
2590}
2591
2592void
2593rb_mark_tbl_no_pin(st_table *tbl)
2594{
2595 gc_mark_tbl_no_pin(tbl);
2596}
2597
2598static enum rb_id_table_iterator_result
2599mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2600{
2601 struct rb_cvar_class_tbl_entry *entry;
2602
2603 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2604
2605 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2606 gc_mark_internal((VALUE)entry->cref);
2607
2608 return ID_TABLE_CONTINUE;
2609}
2610
2611static void
2612mark_cvc_tbl(void *objspace, VALUE klass)
2613{
2614 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
2615 if (tbl) {
2616 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2617 }
2618}
2619
2620static bool
2621gc_declarative_marking_p(const rb_data_type_t *type)
2622{
2623 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
2624}
2625
2626static enum rb_id_table_iterator_result
2627mark_const_table_i(VALUE value, void *objspace)
2628{
2629 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2630
2631 gc_mark_internal(ce->value);
2632 gc_mark_internal(ce->file);
2633
2634 return ID_TABLE_CONTINUE;
2635}
2636
2637void
2638rb_gc_mark_roots(void *objspace, const char **categoryp)
2639{
2640 rb_execution_context_t *ec = GET_EC();
2641 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2642
2643#define MARK_CHECKPOINT(category) do { \
2644 if (categoryp) *categoryp = category; \
2645} while (0)
2646
2647 MARK_CHECKPOINT("vm");
2648 rb_vm_mark(vm);
2649 if (vm->self) gc_mark_internal(vm->self);
2650
2651 MARK_CHECKPOINT("end_proc");
2652 rb_mark_end_proc();
2653
2654 MARK_CHECKPOINT("global_tbl");
2655 rb_gc_mark_global_tbl();
2656
2657#if USE_YJIT
2658 void rb_yjit_root_mark(void); // in Rust
2659
2660 if (rb_yjit_enabled_p) {
2661 MARK_CHECKPOINT("YJIT");
2662 rb_yjit_root_mark();
2663 }
2664#endif
2665
2666 MARK_CHECKPOINT("machine_context");
2667 mark_current_machine_context(ec);
2668
2669 MARK_CHECKPOINT("finish");
2670
2671#undef MARK_CHECKPOINT
2672}
2673
2674#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
2675
2676void
2677rb_gc_mark_children(void *objspace, VALUE obj)
2678{
2679 if (FL_TEST(obj, FL_EXIVAR)) {
2680 rb_mark_generic_ivar(obj);
2681 }
2682
2683 switch (BUILTIN_TYPE(obj)) {
2684 case T_FLOAT:
2685 case T_BIGNUM:
2686 case T_SYMBOL:
2687 /* Not immediates, but does not have references and singleton class.
2688 *
2689 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
2690 * ("symbol.c: remove rb_gc_mark_symbols()") */
2691 return;
2692
2693 case T_NIL:
2694 case T_FIXNUM:
2695 rb_bug("rb_gc_mark() called for broken object");
2696 break;
2697
2698 case T_NODE:
2699 UNEXPECTED_NODE(rb_gc_mark);
2700 break;
2701
2702 case T_IMEMO:
2703 rb_imemo_mark_and_move(obj, false);
2704 return;
2705
2706 default:
2707 break;
2708 }
2709
2710 gc_mark_internal(RBASIC(obj)->klass);
2711
2712 switch (BUILTIN_TYPE(obj)) {
2713 case T_CLASS:
2714 if (FL_TEST(obj, FL_SINGLETON)) {
2715 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
2716 }
2717 // Continue to the shared T_CLASS/T_MODULE
2718 case T_MODULE:
2719 if (RCLASS_SUPER(obj)) {
2720 gc_mark_internal(RCLASS_SUPER(obj));
2721 }
2722
2723 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2724 mark_cvc_tbl(objspace, obj);
2725 rb_cc_table_mark(obj);
2726 if (rb_shape_obj_too_complex(obj)) {
2727 gc_mark_tbl_no_pin((st_table *)RCLASS_IVPTR(obj));
2728 }
2729 else {
2730 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
2731 gc_mark_internal(RCLASS_IVPTR(obj)[i]);
2732 }
2733 }
2734
2735 if (RCLASS_CONST_TBL(obj)) {
2736 rb_id_table_foreach_values(RCLASS_CONST_TBL(obj), mark_const_table_i, objspace);
2737 }
2738
2739 gc_mark_internal(RCLASS_EXT(obj)->classpath);
2740 break;
2741
2742 case T_ICLASS:
2743 if (RICLASS_OWNS_M_TBL_P(obj)) {
2744 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2745 }
2746 if (RCLASS_SUPER(obj)) {
2747 gc_mark_internal(RCLASS_SUPER(obj));
2748 }
2749
2750 if (RCLASS_INCLUDER(obj)) {
2751 gc_mark_internal(RCLASS_INCLUDER(obj));
2752 }
2753 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
2754 rb_cc_table_mark(obj);
2755 break;
2756
2757 case T_ARRAY:
2758 if (ARY_SHARED_P(obj)) {
2759 VALUE root = ARY_SHARED_ROOT(obj);
2760 gc_mark_internal(root);
2761 }
2762 else {
2763 long len = RARRAY_LEN(obj);
2764 const VALUE *ptr = RARRAY_CONST_PTR(obj);
2765 for (long i = 0; i < len; i++) {
2766 gc_mark_internal(ptr[i]);
2767 }
2768 }
2769 break;
2770
2771 case T_HASH:
2772 mark_hash(obj);
2773 break;
2774
2775 case T_STRING:
2776 if (STR_SHARED_P(obj)) {
2777 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
2778 /* Embedded shared strings cannot be moved because this string
2779 * points into the slot of the shared string. There may be code
2780 * using the RSTRING_PTR on the stack, which would pin this
2781 * string but not pin the shared string, causing it to move. */
2782 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
2783 }
2784 else {
2785 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
2786 }
2787 }
2788 break;
2789
2790 case T_DATA: {
2791 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
2792
2793 if (ptr) {
2794 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
2795 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
2796
2797 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
2798 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
2799 }
2800 }
2801 else {
2802 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
2803 RTYPEDDATA(obj)->type->function.dmark :
2804 RDATA(obj)->dmark;
2805 if (mark_func) (*mark_func)(ptr);
2806 }
2807 }
2808
2809 break;
2810 }
2811
2812 case T_OBJECT: {
2813 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
2814
2815 if (rb_shape_obj_too_complex(obj)) {
2816 gc_mark_tbl_no_pin(ROBJECT_IV_HASH(obj));
2817 }
2818 else {
2819 const VALUE * const ptr = ROBJECT_IVPTR(obj);
2820
2821 uint32_t len = ROBJECT_IV_COUNT(obj);
2822 for (uint32_t i = 0; i < len; i++) {
2823 gc_mark_internal(ptr[i]);
2824 }
2825 }
2826
2827 if (shape) {
2828 VALUE klass = RBASIC_CLASS(obj);
2829
2830 // Increment max_iv_count if applicable, used to determine size pool allocation
2831 attr_index_t num_of_ivs = shape->next_iv_index;
2832 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
2833 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
2834 }
2835 }
2836
2837 break;
2838 }
2839
2840 case T_FILE:
2841 if (RFILE(obj)->fptr) {
2842 gc_mark_internal(RFILE(obj)->fptr->self);
2843 gc_mark_internal(RFILE(obj)->fptr->pathv);
2844 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
2845 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
2846 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
2847 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
2848 gc_mark_internal(RFILE(obj)->fptr->write_lock);
2849 gc_mark_internal(RFILE(obj)->fptr->timeout);
2850 }
2851 break;
2852
2853 case T_REGEXP:
2854 gc_mark_internal(RREGEXP(obj)->src);
2855 break;
2856
2857 case T_MATCH:
2858 gc_mark_internal(RMATCH(obj)->regexp);
2859 if (RMATCH(obj)->str) {
2860 gc_mark_internal(RMATCH(obj)->str);
2861 }
2862 break;
2863
2864 case T_RATIONAL:
2865 gc_mark_internal(RRATIONAL(obj)->num);
2866 gc_mark_internal(RRATIONAL(obj)->den);
2867 break;
2868
2869 case T_COMPLEX:
2870 gc_mark_internal(RCOMPLEX(obj)->real);
2871 gc_mark_internal(RCOMPLEX(obj)->imag);
2872 break;
2873
2874 case T_STRUCT: {
2875 const long len = RSTRUCT_LEN(obj);
2876 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
2877
2878 for (long i = 0; i < len; i++) {
2879 gc_mark_internal(ptr[i]);
2880 }
2881
2882 break;
2883 }
2884
2885 default:
2886 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
2887 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
2888 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
2889 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2890 BUILTIN_TYPE(obj), (void *)obj,
2891 rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
2892 }
2893}
2894
2895size_t
2896rb_gc_obj_optimal_size(VALUE obj)
2897{
2898 switch (BUILTIN_TYPE(obj)) {
2899 case T_ARRAY:
2900 return rb_ary_size_as_embedded(obj);
2901
2902 case T_OBJECT:
2903 if (rb_shape_obj_too_complex(obj)) {
2904 return sizeof(struct RObject);
2905 }
2906 else {
2907 return rb_obj_embedded_size(ROBJECT_IV_CAPACITY(obj));
2908 }
2909
2910 case T_STRING:
2911 return rb_str_size_as_embedded(obj);
2912
2913 case T_HASH:
2914 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
2915
2916 default:
2917 return 0;
2918 }
2919}
2920
2921void
2922rb_gc_writebarrier(VALUE a, VALUE b)
2923{
2924 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
2925}
2926
2927void
2928rb_gc_writebarrier_unprotect(VALUE obj)
2929{
2930 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
2931}
2932
2933/*
2934 * remember `obj' if needed.
2935 */
2936void
2937rb_gc_writebarrier_remember(VALUE obj)
2938{
2939 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
2940}
2941
2942void
2943rb_gc_copy_attributes(VALUE dest, VALUE obj)
2944{
2945 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
2946}
2947
2948int
2949rb_gc_modular_gc_loaded_p(void)
2950{
2951#if USE_MODULAR_GC
2952 return rb_gc_functions.modular_gc_loaded_p;
2953#else
2954 return false;
2955#endif
2956}
2957
2958const char *
2959rb_gc_active_gc_name(void)
2960{
2961 const char *gc_name = rb_gc_impl_active_gc_name();
2962
2963 const size_t len = strlen(gc_name);
2964 if (len > RB_GC_MAX_NAME_LEN) {
2965 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
2966 RB_GC_MAX_NAME_LEN, len, gc_name);
2967 }
2968
2969 return gc_name;
2970}
2971
2972// TODO: rearchitect this function to work for a generic GC
2973size_t
2974rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
2975{
2976 return rb_gc_impl_obj_flags(rb_gc_get_objspace(), obj, flags, max);
2977}
2978
2979/* GC */
2980
2981void *
2982rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
2983{
2984 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
2985}
2986
2987void
2988rb_gc_ractor_cache_free(void *cache)
2989{
2990 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
2991}
2992
2993void
2994rb_gc_register_mark_object(VALUE obj)
2995{
2996 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
2997 return;
2998
2999 rb_vm_register_global_object(obj);
3000}
3001
3002void
3003rb_gc_register_address(VALUE *addr)
3004{
3005 rb_vm_t *vm = GET_VM();
3006
3007 VALUE obj = *addr;
3008
3009 struct global_object_list *tmp = ALLOC(struct global_object_list);
3010 tmp->next = vm->global_object_list;
3011 tmp->varptr = addr;
3012 vm->global_object_list = tmp;
3013
3014 /*
3015 * Because some C extensions have assignment-then-register bugs,
3016 * we guard `obj` here so that it would not get swept defensively.
3017 */
3018 RB_GC_GUARD(obj);
3019 if (0 && !SPECIAL_CONST_P(obj)) {
3020 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3021 rb_obj_class(obj));
3022 rb_print_backtrace(stderr);
3023 }
3024}
3025
3026void
3027rb_gc_unregister_address(VALUE *addr)
3028{
3029 rb_vm_t *vm = GET_VM();
3030 struct global_object_list *tmp = vm->global_object_list;
3031
3032 if (tmp->varptr == addr) {
3033 vm->global_object_list = tmp->next;
3034 xfree(tmp);
3035 return;
3036 }
3037 while (tmp->next) {
3038 if (tmp->next->varptr == addr) {
3039 struct global_object_list *t = tmp->next;
3040
3041 tmp->next = tmp->next->next;
3042 xfree(t);
3043 break;
3044 }
3045 tmp = tmp->next;
3046 }
3047}
3048
3049void
3051{
3052 rb_gc_register_address(var);
3053}
3054
3055static VALUE
3056gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3057{
3058 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3059
3060 return Qnil;
3061}
3062
3063/*
3064 * rb_objspace_each_objects() is special C API to walk through
3065 * Ruby object space. This C API is too difficult to use it.
3066 * To be frank, you should not use it. Or you need to read the
3067 * source code of this function and understand what this function does.
3068 *
3069 * 'callback' will be called several times (the number of heap page,
3070 * at current implementation) with:
3071 * vstart: a pointer to the first living object of the heap_page.
3072 * vend: a pointer to next to the valid heap_page area.
3073 * stride: a distance to next VALUE.
3074 *
3075 * If callback() returns non-zero, the iteration will be stopped.
3076 *
3077 * This is a sample callback code to iterate liveness objects:
3078 *
3079 * static int
3080 * sample_callback(void *vstart, void *vend, int stride, void *data)
3081 * {
3082 * VALUE v = (VALUE)vstart;
3083 * for (; v != (VALUE)vend; v += stride) {
3084 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3085 * // do something with live object 'v'
3086 * }
3087 * }
3088 * return 0; // continue to iteration
3089 * }
3090 *
3091 * Note: 'vstart' is not a top of heap_page. This point the first
3092 * living object to grasp at least one object to avoid GC issue.
3093 * This means that you can not walk through all Ruby object page
3094 * including freed object page.
3095 *
3096 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3097 * However, there are possibilities to pass variable values with
3098 * 'stride' with some reasons. You must use stride instead of
3099 * use some constant value in the iteration.
3100 */
3101void
3102rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3103{
3104 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3105}
3106
3107static void
3108gc_ref_update_array(void *objspace, VALUE v)
3109{
3110 if (ARY_SHARED_P(v)) {
3111 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3112
3113 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3114
3115 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3116 // If the root is embedded and its location has changed
3117 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3118 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3119 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3120 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3121 }
3122 }
3123 else {
3124 long len = RARRAY_LEN(v);
3125
3126 if (len > 0) {
3127 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3128 for (long i = 0; i < len; i++) {
3129 UPDATE_IF_MOVED(objspace, ptr[i]);
3130 }
3131 }
3132
3133 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3134 if (rb_ary_embeddable_p(v)) {
3135 rb_ary_make_embedded(v);
3136 }
3137 }
3138 }
3139}
3140
3141static void
3142gc_ref_update_object(void *objspace, VALUE v)
3143{
3144 VALUE *ptr = ROBJECT_IVPTR(v);
3145
3146 if (rb_shape_obj_too_complex(v)) {
3147 gc_ref_update_table_values_only(ROBJECT_IV_HASH(v));
3148 return;
3149 }
3150
3151 size_t slot_size = rb_gc_obj_slot_size(v);
3152 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
3153 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3154 // Object can be re-embedded
3155 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
3156 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3157 xfree(ptr);
3158 ptr = ROBJECT(v)->as.ary;
3159 }
3160
3161 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
3162 UPDATE_IF_MOVED(objspace, ptr[i]);
3163 }
3164}
3165
3166void
3167rb_gc_ref_update_table_values_only(st_table *tbl)
3168{
3169 gc_ref_update_table_values_only(tbl);
3170}
3171
3172/* Update MOVED references in a VALUE=>VALUE st_table */
3173void
3174rb_gc_update_tbl_refs(st_table *ptr)
3175{
3176 gc_update_table_refs(ptr);
3177}
3178
3179static void
3180gc_ref_update_hash(void *objspace, VALUE v)
3181{
3182 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3183}
3184
3185static void
3186gc_update_values(void *objspace, long n, VALUE *values)
3187{
3188 for (long i = 0; i < n; i++) {
3189 UPDATE_IF_MOVED(objspace, values[i]);
3190 }
3191}
3192
3193void
3194rb_gc_update_values(long n, VALUE *values)
3195{
3196 gc_update_values(rb_gc_get_objspace(), n, values);
3197}
3198
3199static enum rb_id_table_iterator_result
3200check_id_table_move(VALUE value, void *data)
3201{
3202 void *objspace = (void *)data;
3203
3204 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3205 return ID_TABLE_REPLACE;
3206 }
3207
3208 return ID_TABLE_CONTINUE;
3209}
3210
3211void
3212rb_gc_prepare_heap_process_object(VALUE obj)
3213{
3214 switch (BUILTIN_TYPE(obj)) {
3215 case T_STRING:
3216 // Precompute the string coderange. This both save time for when it will be
3217 // eventually needed, and avoid mutating heap pages after a potential fork.
3219 break;
3220 default:
3221 break;
3222 }
3223}
3224
3225void
3226rb_gc_prepare_heap(void)
3227{
3228 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3229}
3230
3231size_t
3232rb_gc_heap_id_for_size(size_t size)
3233{
3234 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3235}
3236
3237bool
3238rb_gc_size_allocatable_p(size_t size)
3239{
3240 return rb_gc_impl_size_allocatable_p(size);
3241}
3242
3243static enum rb_id_table_iterator_result
3244update_id_table(VALUE *value, void *data, int existing)
3245{
3246 void *objspace = (void *)data;
3247
3248 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3249 *value = gc_location_internal(objspace, (VALUE)*value);
3250 }
3251
3252 return ID_TABLE_CONTINUE;
3253}
3254
3255static void
3256update_m_tbl(void *objspace, struct rb_id_table *tbl)
3257{
3258 if (tbl) {
3259 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3260 }
3261}
3262
3263static enum rb_id_table_iterator_result
3264update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3265{
3266 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3267 VM_ASSERT(vm_ccs_p(ccs));
3268
3269 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3270 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3271 }
3272
3273 for (int i=0; i<ccs->len; i++) {
3274 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3275 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3276 }
3277 }
3278
3279 // do not replace
3280 return ID_TABLE_CONTINUE;
3281}
3282
3283static void
3284update_cc_tbl(void *objspace, VALUE klass)
3285{
3286 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
3287 if (tbl) {
3288 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3289 }
3290}
3291
3292static enum rb_id_table_iterator_result
3293update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3294{
3295 struct rb_cvar_class_tbl_entry *entry;
3296
3297 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3298
3299 if (entry->cref) {
3300 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3301 }
3302
3303 entry->class_value = gc_location_internal(objspace, entry->class_value);
3304
3305 return ID_TABLE_CONTINUE;
3306}
3307
3308static void
3309update_cvc_tbl(void *objspace, VALUE klass)
3310{
3311 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
3312 if (tbl) {
3313 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3314 }
3315}
3316
3317static enum rb_id_table_iterator_result
3318update_const_table(VALUE value, void *objspace)
3319{
3320 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3321
3322 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3323 ce->value = gc_location_internal(objspace, ce->value);
3324 }
3325
3326 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3327 ce->file = gc_location_internal(objspace, ce->file);
3328 }
3329
3330 return ID_TABLE_CONTINUE;
3331}
3332
3333static void
3334update_const_tbl(void *objspace, struct rb_id_table *tbl)
3335{
3336 if (!tbl) return;
3337 rb_id_table_foreach_values(tbl, update_const_table, objspace);
3338}
3339
3340static void
3341update_subclass_entries(void *objspace, rb_subclass_entry_t *entry)
3342{
3343 while (entry) {
3344 UPDATE_IF_MOVED(objspace, entry->klass);
3345 entry = entry->next;
3346 }
3347}
3348
3349static void
3350update_class_ext(void *objspace, rb_classext_t *ext)
3351{
3352 UPDATE_IF_MOVED(objspace, ext->origin_);
3353 UPDATE_IF_MOVED(objspace, ext->includer);
3354 UPDATE_IF_MOVED(objspace, ext->refined_class);
3355 update_subclass_entries(objspace, ext->subclasses);
3356}
3357
3358static void
3359update_superclasses(void *objspace, VALUE obj)
3360{
3361 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3362 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
3363 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
3364 }
3365 }
3366}
3367
3368extern rb_symbols_t ruby_global_symbols;
3369#define global_symbols ruby_global_symbols
3370
3371#if USE_MODULAR_GC
3372struct global_vm_table_foreach_data {
3373 vm_table_foreach_callback_func callback;
3374 vm_table_update_callback_func update_callback;
3375 void *data;
3376};
3377
3378static int
3379vm_weak_table_foreach_key(st_data_t key, st_data_t value, st_data_t data, int error)
3380{
3381 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3382
3383 return iter_data->callback((VALUE)key, iter_data->data);
3384}
3385
3386static int
3387vm_weak_table_foreach_update_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3388{
3389 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3390
3391 return iter_data->update_callback((VALUE *)key, iter_data->data);
3392}
3393
3394static int
3395vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3396{
3397 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3398
3399 if (STATIC_SYM_P(value)) {
3400 return ST_CONTINUE;
3401 }
3402 else {
3403 return iter_data->callback((VALUE)value, iter_data->data);
3404 }
3405}
3406
3407static int
3408vm_weak_table_foreach_update_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3409{
3410 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3411
3412 return iter_data->update_callback((VALUE *)value, iter_data->data);
3413}
3414
3415static int
3416vm_weak_table_gen_ivar_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3417{
3418 int retval = vm_weak_table_foreach_key(key, value, data, error);
3419 if (retval == ST_DELETE) {
3420 FL_UNSET((VALUE)key, FL_EXIVAR);
3421 }
3422 return retval;
3423}
3424
3425static int
3426vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3427{
3428 GC_ASSERT(RB_TYPE_P((VALUE)key, T_STRING));
3429
3430 int retval = vm_weak_table_foreach_key(key, value, data, error);
3431 if (retval == ST_DELETE) {
3432 FL_UNSET((VALUE)key, RSTRING_FSTR);
3433 }
3434 return retval;
3435}
3436
3437struct st_table *rb_generic_ivtbl_get(void);
3438
3439void
3440rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3441 vm_table_update_callback_func update_callback,
3442 void *data,
3443 enum rb_gc_vm_weak_tables table)
3444{
3445 rb_vm_t *vm = GET_VM();
3446
3447 struct global_vm_table_foreach_data foreach_data = {
3448 .callback = callback,
3449 .update_callback = update_callback,
3450 .data = data
3451 };
3452
3453 switch (table) {
3454 case RB_GC_VM_CI_TABLE: {
3455 st_foreach_with_replace(
3456 vm->ci_table,
3457 vm_weak_table_foreach_key,
3458 vm_weak_table_foreach_update_key,
3459 (st_data_t)&foreach_data
3460 );
3461 break;
3462 }
3463 case RB_GC_VM_OVERLOADED_CME_TABLE: {
3464 st_foreach_with_replace(
3465 vm->overloaded_cme_table,
3466 vm_weak_table_foreach_key,
3467 vm_weak_table_foreach_update_key,
3468 (st_data_t)&foreach_data
3469 );
3470 break;
3471 }
3472 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
3473 st_foreach_with_replace(
3474 global_symbols.str_sym,
3475 vm_weak_table_str_sym_foreach,
3476 vm_weak_table_foreach_update_value,
3477 (st_data_t)&foreach_data
3478 );
3479 break;
3480 }
3481 case RB_GC_VM_GENERIC_IV_TABLE: {
3482 st_table *generic_iv_tbl = rb_generic_ivtbl_get();
3483 st_foreach_with_replace(
3484 generic_iv_tbl,
3485 vm_weak_table_gen_ivar_foreach,
3486 vm_weak_table_foreach_update_key,
3487 (st_data_t)&foreach_data
3488 );
3489 break;
3490 }
3491 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
3492 st_table *frozen_strings = GET_VM()->frozen_strings;
3493 st_foreach_with_replace(
3494 frozen_strings,
3495 vm_weak_table_frozen_strings_foreach,
3496 vm_weak_table_foreach_update_key,
3497 (st_data_t)&foreach_data
3498 );
3499 break;
3500 }
3501 default:
3502 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
3503 }
3504}
3505#endif
3506
3507void
3508rb_gc_update_vm_references(void *objspace)
3509{
3510 rb_execution_context_t *ec = GET_EC();
3511 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3512
3513 rb_vm_update_references(vm);
3514 rb_gc_update_global_tbl();
3515 global_symbols.ids = gc_location_internal(objspace, global_symbols.ids);
3516 global_symbols.dsymbol_fstr_hash = gc_location_internal(objspace, global_symbols.dsymbol_fstr_hash);
3517 gc_update_table_refs(global_symbols.str_sym);
3518
3519#if USE_YJIT
3520 void rb_yjit_root_update_references(void); // in Rust
3521
3522 if (rb_yjit_enabled_p) {
3523 rb_yjit_root_update_references();
3524 }
3525#endif
3526}
3527
3528void
3529rb_gc_update_object_references(void *objspace, VALUE obj)
3530{
3531 if (FL_TEST(obj, FL_EXIVAR)) {
3532 rb_ref_update_generic_ivar(obj);
3533 }
3534
3535 switch (BUILTIN_TYPE(obj)) {
3536 case T_CLASS:
3537 if (FL_TEST(obj, FL_SINGLETON)) {
3538 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
3539 }
3540 // Continue to the shared T_CLASS/T_MODULE
3541 case T_MODULE:
3542 if (RCLASS_SUPER((VALUE)obj)) {
3543 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3544 }
3545 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3546 update_cc_tbl(objspace, obj);
3547 update_cvc_tbl(objspace, obj);
3548 update_superclasses(objspace, obj);
3549
3550 if (rb_shape_obj_too_complex(obj)) {
3551 gc_ref_update_table_values_only(RCLASS_IV_HASH(obj));
3552 }
3553 else {
3554 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
3555 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
3556 }
3557 }
3558
3559 update_class_ext(objspace, RCLASS_EXT(obj));
3560 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3561
3562 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
3563 break;
3564
3565 case T_ICLASS:
3566 if (RICLASS_OWNS_M_TBL_P(obj)) {
3567 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3568 }
3569 if (RCLASS_SUPER((VALUE)obj)) {
3570 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3571 }
3572 update_class_ext(objspace, RCLASS_EXT(obj));
3573 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
3574 update_cc_tbl(objspace, obj);
3575 break;
3576
3577 case T_IMEMO:
3578 rb_imemo_mark_and_move(obj, true);
3579 return;
3580
3581 case T_NIL:
3582 case T_FIXNUM:
3583 case T_NODE:
3584 case T_MOVED:
3585 case T_NONE:
3586 /* These can't move */
3587 return;
3588
3589 case T_ARRAY:
3590 gc_ref_update_array(objspace, obj);
3591 break;
3592
3593 case T_HASH:
3594 gc_ref_update_hash(objspace, obj);
3595 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
3596 break;
3597
3598 case T_STRING:
3599 {
3600 if (STR_SHARED_P(obj)) {
3601 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
3602 }
3603
3604 /* If, after move the string is not embedded, and can fit in the
3605 * slot it's been placed in, then re-embed it. */
3606 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
3607 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
3608 rb_str_make_embedded(obj);
3609 }
3610 }
3611
3612 break;
3613 }
3614 case T_DATA:
3615 /* Call the compaction callback, if it exists */
3616 {
3617 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3618 if (ptr) {
3619 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
3620 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3621
3622 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3623 VALUE *ref = (VALUE *)((char *)ptr + offset);
3624 *ref = gc_location_internal(objspace, *ref);
3625 }
3626 }
3627 else if (RTYPEDDATA_P(obj)) {
3628 RUBY_DATA_FUNC compact_func = RTYPEDDATA(obj)->type->function.dcompact;
3629 if (compact_func) (*compact_func)(ptr);
3630 }
3631 }
3632 }
3633 break;
3634
3635 case T_OBJECT:
3636 gc_ref_update_object(objspace, obj);
3637 break;
3638
3639 case T_FILE:
3640 if (RFILE(obj)->fptr) {
3641 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
3642 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
3643 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
3644 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
3645 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
3646 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
3647 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
3648 }
3649 break;
3650 case T_REGEXP:
3651 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
3652 break;
3653
3654 case T_SYMBOL:
3655 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
3656 break;
3657
3658 case T_FLOAT:
3659 case T_BIGNUM:
3660 break;
3661
3662 case T_MATCH:
3663 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
3664
3665 if (RMATCH(obj)->str) {
3666 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
3667 }
3668 break;
3669
3670 case T_RATIONAL:
3671 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
3672 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
3673 break;
3674
3675 case T_COMPLEX:
3676 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
3677 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
3678
3679 break;
3680
3681 case T_STRUCT:
3682 {
3683 long i, len = RSTRUCT_LEN(obj);
3684 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
3685
3686 for (i = 0; i < len; i++) {
3687 UPDATE_IF_MOVED(objspace, ptr[i]);
3688 }
3689 }
3690 break;
3691 default:
3692 rb_bug("unreachable");
3693 break;
3694 }
3695
3696 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
3697}
3698
3699VALUE
3700rb_gc_start(void)
3701{
3702 rb_gc();
3703 return Qnil;
3704}
3705
3706void
3707rb_gc(void)
3708{
3709 unless_objspace(objspace) { return; }
3710
3711 rb_gc_impl_start(objspace, true, true, true, false);
3712}
3713
3714int
3715rb_during_gc(void)
3716{
3717 unless_objspace(objspace) { return FALSE; }
3718
3719 return rb_gc_impl_during_gc_p(objspace);
3720}
3721
3722size_t
3723rb_gc_count(void)
3724{
3725 return rb_gc_impl_gc_count(rb_gc_get_objspace());
3726}
3727
3728static VALUE
3729gc_count(rb_execution_context_t *ec, VALUE self)
3730{
3731 return SIZET2NUM(rb_gc_count());
3732}
3733
3734VALUE
3735rb_gc_latest_gc_info(VALUE key)
3736{
3737 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
3738 rb_raise(rb_eTypeError, "non-hash or symbol given");
3739 }
3740
3741 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
3742
3743 if (val == Qundef) {
3744 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
3745 }
3746
3747 return val;
3748}
3749
3750static VALUE
3751gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
3752{
3753 if (NIL_P(arg)) {
3754 arg = rb_hash_new();
3755 }
3756 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3757 rb_raise(rb_eTypeError, "non-hash or symbol given");
3758 }
3759
3760 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3761
3762 if (ret == Qundef) {
3763 GC_ASSERT(SYMBOL_P(arg));
3764
3765 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3766 }
3767
3768 return ret;
3769}
3770
3771size_t
3772rb_gc_stat(VALUE arg)
3773{
3774 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3775 rb_raise(rb_eTypeError, "non-hash or symbol given");
3776 }
3777
3778 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3779
3780 if (ret == Qundef) {
3781 GC_ASSERT(SYMBOL_P(arg));
3782
3783 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3784 }
3785
3786 if (SYMBOL_P(arg)) {
3787 return NUM2SIZET(ret);
3788 }
3789 else {
3790 return 0;
3791 }
3792}
3793
3794static VALUE
3795gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
3796{
3797 if (NIL_P(arg)) {
3798 arg = rb_hash_new();
3799 }
3800
3801 if (NIL_P(heap_name)) {
3802 if (!RB_TYPE_P(arg, T_HASH)) {
3803 rb_raise(rb_eTypeError, "non-hash given");
3804 }
3805 }
3806 else if (FIXNUM_P(heap_name)) {
3807 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
3808 rb_raise(rb_eTypeError, "non-hash or symbol given");
3809 }
3810 }
3811 else {
3812 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
3813 }
3814
3815 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
3816
3817 if (ret == Qundef) {
3818 GC_ASSERT(SYMBOL_P(arg));
3819
3820 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3821 }
3822
3823 return ret;
3824}
3825
3826static VALUE
3827gc_config_get(rb_execution_context_t *ec, VALUE self)
3828{
3829 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
3830 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
3831
3832 return cfg_hash;
3833}
3834
3835static VALUE
3836gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
3837{
3838 void *objspace = rb_gc_get_objspace();
3839
3840 rb_gc_impl_config_set(objspace, hash);
3841
3842 return rb_gc_impl_config_get(objspace);
3843}
3844
3845static VALUE
3846gc_stress_get(rb_execution_context_t *ec, VALUE self)
3847{
3848 return rb_gc_impl_stress_get(rb_gc_get_objspace());
3849}
3850
3851static VALUE
3852gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
3853{
3854 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
3855
3856 return flag;
3857}
3858
3859void
3860rb_gc_initial_stress_set(VALUE flag)
3861{
3862 initial_stress = flag;
3863}
3864
3865size_t *
3866rb_gc_heap_sizes(void)
3867{
3868 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
3869}
3870
3871VALUE
3872rb_gc_enable(void)
3873{
3874 return rb_objspace_gc_enable(rb_gc_get_objspace());
3875}
3876
3877VALUE
3878rb_objspace_gc_enable(void *objspace)
3879{
3880 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3881 rb_gc_impl_gc_enable(objspace);
3882 return RBOOL(disabled);
3883}
3884
3885static VALUE
3886gc_enable(rb_execution_context_t *ec, VALUE _)
3887{
3888 return rb_gc_enable();
3889}
3890
3891static VALUE
3892gc_disable_no_rest(void *objspace)
3893{
3894 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3895 rb_gc_impl_gc_disable(objspace, false);
3896 return RBOOL(disabled);
3897}
3898
3899VALUE
3900rb_gc_disable_no_rest(void)
3901{
3902 return gc_disable_no_rest(rb_gc_get_objspace());
3903}
3904
3905VALUE
3906rb_gc_disable(void)
3907{
3908 return rb_objspace_gc_disable(rb_gc_get_objspace());
3909}
3910
3911VALUE
3912rb_objspace_gc_disable(void *objspace)
3913{
3914 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3915 rb_gc_impl_gc_disable(objspace, true);
3916 return RBOOL(disabled);
3917}
3918
3919static VALUE
3920gc_disable(rb_execution_context_t *ec, VALUE _)
3921{
3922 return rb_gc_disable();
3923}
3924
3925// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
3926void
3927ruby_gc_set_params(void)
3928{
3929 rb_gc_impl_set_params(rb_gc_get_objspace());
3930}
3931
3932void
3933rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
3934{
3935 RB_VM_LOCK_ENTER();
3936 {
3937 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
3938
3939 if (!RB_SPECIAL_CONST_P(obj)) {
3940 rb_vm_t *vm = GET_VM();
3941 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3942 struct gc_mark_func_data_struct mfd = {
3943 .mark_func = func,
3944 .data = data,
3945 };
3946
3947 vm->gc.mark_func_data = &mfd;
3948 rb_gc_mark_children(rb_gc_get_objspace(), obj);
3949 vm->gc.mark_func_data = prev_mfd;
3950 }
3951 }
3952 RB_VM_LOCK_LEAVE();
3953}
3954
3956 const char *category;
3957 void (*func)(const char *category, VALUE, void *);
3958 void *data;
3959};
3960
3961static void
3962root_objects_from(VALUE obj, void *ptr)
3963{
3964 const struct root_objects_data *data = (struct root_objects_data *)ptr;
3965 (*data->func)(data->category, obj, data->data);
3966}
3967
3968void
3969rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
3970{
3971 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
3972
3973 rb_vm_t *vm = GET_VM();
3974
3975 struct root_objects_data data = {
3976 .func = func,
3977 .data = passing_data,
3978 };
3979
3980 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3981 struct gc_mark_func_data_struct mfd = {
3982 .mark_func = root_objects_from,
3983 .data = &data,
3984 };
3985
3986 vm->gc.mark_func_data = &mfd;
3987 rb_gc_save_machine_context();
3988 rb_gc_mark_roots(vm->gc.objspace, &data.category);
3989 vm->gc.mark_func_data = prev_mfd;
3990}
3991
3992/*
3993 ------------------------------ DEBUG ------------------------------
3994*/
3995
3996static const char *
3997type_name(int type, VALUE obj)
3998{
3999 switch (type) {
4000#define TYPE_NAME(t) case (t): return #t;
4001 TYPE_NAME(T_NONE);
4002 TYPE_NAME(T_OBJECT);
4003 TYPE_NAME(T_CLASS);
4004 TYPE_NAME(T_MODULE);
4005 TYPE_NAME(T_FLOAT);
4006 TYPE_NAME(T_STRING);
4007 TYPE_NAME(T_REGEXP);
4008 TYPE_NAME(T_ARRAY);
4009 TYPE_NAME(T_HASH);
4010 TYPE_NAME(T_STRUCT);
4011 TYPE_NAME(T_BIGNUM);
4012 TYPE_NAME(T_FILE);
4013 TYPE_NAME(T_MATCH);
4014 TYPE_NAME(T_COMPLEX);
4015 TYPE_NAME(T_RATIONAL);
4016 TYPE_NAME(T_NIL);
4017 TYPE_NAME(T_TRUE);
4018 TYPE_NAME(T_FALSE);
4019 TYPE_NAME(T_SYMBOL);
4020 TYPE_NAME(T_FIXNUM);
4021 TYPE_NAME(T_UNDEF);
4022 TYPE_NAME(T_IMEMO);
4023 TYPE_NAME(T_ICLASS);
4024 TYPE_NAME(T_MOVED);
4025 TYPE_NAME(T_ZOMBIE);
4026 case T_DATA:
4027 if (obj && rb_objspace_data_type_name(obj)) {
4028 return rb_objspace_data_type_name(obj);
4029 }
4030 return "T_DATA";
4031#undef TYPE_NAME
4032 }
4033 return "unknown";
4034}
4035
4036static const char *
4037obj_type_name(VALUE obj)
4038{
4039 return type_name(TYPE(obj), obj);
4040}
4041
4042const char *
4043rb_method_type_name(rb_method_type_t type)
4044{
4045 switch (type) {
4046 case VM_METHOD_TYPE_ISEQ: return "iseq";
4047 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4048 case VM_METHOD_TYPE_IVAR: return "ivar";
4049 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4050 case VM_METHOD_TYPE_ALIAS: return "alias";
4051 case VM_METHOD_TYPE_REFINED: return "refined";
4052 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4053 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4054 case VM_METHOD_TYPE_MISSING: return "missing";
4055 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4056 case VM_METHOD_TYPE_UNDEF: return "undef";
4057 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4058 }
4059 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4060}
4061
4062static void
4063rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4064{
4065 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4066 VALUE path = rb_iseq_path(iseq);
4067 int n = ISEQ_BODY(iseq)->location.first_lineno;
4068 snprintf(buff, buff_size, " %s@%s:%d",
4069 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4070 RSTRING_PTR(path), n);
4071 }
4072}
4073
4074static int
4075str_len_no_raise(VALUE str)
4076{
4077 long len = RSTRING_LEN(str);
4078 if (len < 0) return 0;
4079 if (len > INT_MAX) return INT_MAX;
4080 return (int)len;
4081}
4082
4083#define BUFF_ARGS buff + pos, buff_size - pos
4084#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4085#define APPEND_S(s) do { \
4086 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4087 goto end; \
4088 } \
4089 else { \
4090 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4091 } \
4092 } while (0)
4093#define C(c, s) ((c) != 0 ? (s) : " ")
4094
4095static size_t
4096rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4097{
4098 size_t pos = 0;
4099
4100 if (SPECIAL_CONST_P(obj)) {
4101 APPEND_F("%s", obj_type_name(obj));
4102
4103 if (FIXNUM_P(obj)) {
4104 APPEND_F(" %ld", FIX2LONG(obj));
4105 }
4106 else if (SYMBOL_P(obj)) {
4107 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4108 }
4109 }
4110 else {
4111 // const int age = RVALUE_AGE_GET(obj);
4112
4113 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4114 // TODO: fixme
4115 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4116 // (void *)obj, age,
4117 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4118 // C(RVALUE_MARK_BITMAP(obj), "M"),
4119 // C(RVALUE_PIN_BITMAP(obj), "P"),
4120 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4121 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4122 // C(rb_objspace_garbage_object_p(obj), "G"),
4123 // obj_type_name(obj));
4124 }
4125 else {
4126 /* fake */
4127 // APPEND_F("%p [%dXXXX] %s",
4128 // (void *)obj, age,
4129 // obj_type_name(obj));
4130 }
4131
4132 if (internal_object_p(obj)) {
4133 /* ignore */
4134 }
4135 else if (RBASIC(obj)->klass == 0) {
4136 APPEND_S("(temporary internal)");
4137 }
4138 else if (RTEST(RBASIC(obj)->klass)) {
4139 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4140 if (!NIL_P(class_path)) {
4141 APPEND_F("(%s)", RSTRING_PTR(class_path));
4142 }
4143 }
4144 }
4145 end:
4146
4147 return pos;
4148}
4149
4150const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4151
4152static size_t
4153rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4154{
4155 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4156 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4157
4158 switch (type) {
4159 case T_NODE:
4160 UNEXPECTED_NODE(rb_raw_obj_info);
4161 break;
4162 case T_ARRAY:
4163 if (ARY_SHARED_P(obj)) {
4164 APPEND_S("shared -> ");
4165 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4166 }
4167 else if (ARY_EMBED_P(obj)) {
4168 APPEND_F("[%s%s] len: %ld (embed)",
4169 C(ARY_EMBED_P(obj), "E"),
4170 C(ARY_SHARED_P(obj), "S"),
4171 RARRAY_LEN(obj));
4172 }
4173 else {
4174 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4175 C(ARY_EMBED_P(obj), "E"),
4176 C(ARY_SHARED_P(obj), "S"),
4177 RARRAY_LEN(obj),
4178 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4179 (void *)RARRAY_CONST_PTR(obj));
4180 }
4181 break;
4182 case T_STRING: {
4183 if (STR_SHARED_P(obj)) {
4184 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4185 }
4186 else {
4187 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4188
4189 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4190 }
4191 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4192 break;
4193 }
4194 case T_SYMBOL: {
4195 VALUE fstr = RSYMBOL(obj)->fstr;
4196 ID id = RSYMBOL(obj)->id;
4197 if (RB_TYPE_P(fstr, T_STRING)) {
4198 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4199 }
4200 else {
4201 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4202 }
4203 break;
4204 }
4205 case T_MOVED: {
4206 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4207 break;
4208 }
4209 case T_HASH: {
4210 APPEND_F("[%c] %"PRIdSIZE,
4211 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4212 RHASH_SIZE(obj));
4213 break;
4214 }
4215 case T_CLASS:
4216 case T_MODULE:
4217 {
4218 VALUE class_path = rb_class_path_cached(obj);
4219 if (!NIL_P(class_path)) {
4220 APPEND_F("%s", RSTRING_PTR(class_path));
4221 }
4222 else {
4223 APPEND_S("(anon)");
4224 }
4225 break;
4226 }
4227 case T_ICLASS:
4228 {
4229 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4230 if (!NIL_P(class_path)) {
4231 APPEND_F("src:%s", RSTRING_PTR(class_path));
4232 }
4233 break;
4234 }
4235 case T_OBJECT:
4236 {
4237 if (rb_shape_obj_too_complex(obj)) {
4238 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
4239 APPEND_F("(too_complex) len:%zu", hash_len);
4240 }
4241 else {
4242 uint32_t len = ROBJECT_IV_CAPACITY(obj);
4243
4244 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4245 APPEND_F("(embed) len:%d", len);
4246 }
4247 else {
4248 VALUE *ptr = ROBJECT_IVPTR(obj);
4249 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4250 }
4251 }
4252 }
4253 break;
4254 case T_DATA: {
4255 const struct rb_block *block;
4256 const rb_iseq_t *iseq;
4257 if (rb_obj_is_proc(obj) &&
4258 (block = vm_proc_block(obj)) != NULL &&
4259 (vm_block_type(block) == block_type_iseq) &&
4260 (iseq = vm_block_iseq(block)) != NULL) {
4261 rb_raw_iseq_info(BUFF_ARGS, iseq);
4262 }
4263 else if (rb_ractor_p(obj)) {
4264 rb_ractor_t *r = (void *)DATA_PTR(obj);
4265 if (r) {
4266 APPEND_F("r:%d", r->pub.id);
4267 }
4268 }
4269 else {
4270 const char * const type_name = rb_objspace_data_type_name(obj);
4271 if (type_name) {
4272 APPEND_F("%s", type_name);
4273 }
4274 }
4275 break;
4276 }
4277 case T_IMEMO: {
4278 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4279
4280 switch (imemo_type(obj)) {
4281 case imemo_ment:
4282 {
4283 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4284
4285 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4286 rb_id2name(me->called_id),
4287 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4288 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4289 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4290 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4291 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4292 me->def ? rb_method_type_name(me->def->type) : "NULL",
4293 me->def ? me->def->aliased : -1,
4294 (void *)me->owner, // obj_info(me->owner),
4295 (void *)me->defined_class); //obj_info(me->defined_class)));
4296
4297 if (me->def) {
4298 switch (me->def->type) {
4299 case VM_METHOD_TYPE_ISEQ:
4300 APPEND_S(" (iseq:");
4301 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4302 APPEND_S(")");
4303 break;
4304 default:
4305 break;
4306 }
4307 }
4308
4309 break;
4310 }
4311 case imemo_iseq: {
4312 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4313 rb_raw_iseq_info(BUFF_ARGS, iseq);
4314 break;
4315 }
4316 case imemo_callinfo:
4317 {
4318 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4319 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4320 rb_id2name(vm_ci_mid(ci)),
4321 vm_ci_flag(ci),
4322 vm_ci_argc(ci),
4323 vm_ci_kwarg(ci) ? "available" : "NULL");
4324 break;
4325 }
4326 case imemo_callcache:
4327 {
4328 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4329 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4330 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4331
4332 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4333 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4334 cme ? rb_id2name(cme->called_id) : "<NULL>",
4335 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4336 (void *)cme,
4337 (void *)(uintptr_t)vm_cc_call(cc));
4338 break;
4339 }
4340 default:
4341 break;
4342 }
4343 }
4344 default:
4345 break;
4346 }
4347 }
4348 end:
4349
4350 return pos;
4351}
4352
4353#undef C
4354
4355void
4356rb_asan_poison_object(VALUE obj)
4357{
4358 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4359 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4360}
4361
4362void
4363rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4364{
4365 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4366 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4367}
4368
4369void *
4370rb_asan_poisoned_object_p(VALUE obj)
4371{
4372 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4373 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4374}
4375
4376#define asan_unpoisoning_object(obj) \
4377 for (void *poisoned = asan_unpoison_object_temporary(obj), \
4378 *unpoisoning = &poisoned; /* flag to loop just once */ \
4379 unpoisoning; \
4380 unpoisoning = asan_poison_object_restore(obj, poisoned))
4381
4382const char *
4383rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4384{
4385 asan_unpoisoning_object(obj) {
4386 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4387 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4388 if (pos >= buff_size) {} // truncated
4389 }
4390
4391 return buff;
4392}
4393
4394#undef APPEND_S
4395#undef APPEND_F
4396#undef BUFF_ARGS
4397
4398#if RGENGC_OBJ_INFO
4399#define OBJ_INFO_BUFFERS_NUM 10
4400#define OBJ_INFO_BUFFERS_SIZE 0x100
4401static rb_atomic_t obj_info_buffers_index = 0;
4402static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
4403
4404/* Increments *var atomically and resets *var to 0 when maxval is
4405 * reached. Returns the wraparound old *var value (0...maxval). */
4406static rb_atomic_t
4407atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4408{
4409 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4410 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4411 const rb_atomic_t newval = oldval + 1;
4412 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4413 oldval %= maxval;
4414 }
4415 return oldval;
4416}
4417
4418static const char *
4419obj_info(VALUE obj)
4420{
4421 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
4422 char *const buff = obj_info_buffers[index];
4423 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
4424}
4425#else
4426static const char *
4427obj_info(VALUE obj)
4428{
4429 return obj_type_name(obj);
4430}
4431#endif
4432
4433/*
4434 ------------------------ Extended allocator ------------------------
4435*/
4436
4438 VALUE exc;
4439 const char *fmt;
4440 va_list *ap;
4441};
4442
4443static void *
4444gc_vraise(void *ptr)
4445{
4446 struct gc_raise_tag *argv = ptr;
4447 rb_vraise(argv->exc, argv->fmt, *argv->ap);
4448 UNREACHABLE_RETURN(NULL);
4449}
4450
4451static void
4452gc_raise(VALUE exc, const char *fmt, ...)
4453{
4454 va_list ap;
4455 va_start(ap, fmt);
4456 struct gc_raise_tag argv = {
4457 exc, fmt, &ap,
4458 };
4459
4460 if (ruby_thread_has_gvl_p()) {
4461 gc_vraise(&argv);
4463 }
4464 else if (ruby_native_thread_p()) {
4465 rb_thread_call_with_gvl(gc_vraise, &argv);
4467 }
4468 else {
4469 /* Not in a ruby thread */
4470 fprintf(stderr, "%s", "[FATAL] ");
4471 vfprintf(stderr, fmt, ap);
4472 }
4473
4474 va_end(ap);
4475 abort();
4476}
4477
4478NORETURN(static void negative_size_allocation_error(const char *));
4479static void
4480negative_size_allocation_error(const char *msg)
4481{
4482 gc_raise(rb_eNoMemError, "%s", msg);
4483}
4484
4485static void *
4486ruby_memerror_body(void *dummy)
4487{
4488 rb_memerror();
4489 return 0;
4490}
4491
4492NORETURN(static void ruby_memerror(void));
4494static void
4495ruby_memerror(void)
4496{
4497 if (ruby_thread_has_gvl_p()) {
4498 rb_memerror();
4499 }
4500 else {
4501 if (ruby_native_thread_p()) {
4502 rb_thread_call_with_gvl(ruby_memerror_body, 0);
4503 }
4504 else {
4505 /* no ruby thread */
4506 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4507 }
4508 }
4509
4510 /* We have discussions whether we should die here; */
4511 /* We might rethink about it later. */
4512 exit(EXIT_FAILURE);
4513}
4514
4515void
4516rb_memerror(void)
4517{
4518 /* the `GET_VM()->special_exceptions` below assumes that
4519 * the VM is reachable from the current thread. We should
4520 * definitely make sure of that. */
4521 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
4522
4523 rb_execution_context_t *ec = GET_EC();
4524 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
4525
4526 if (!exc ||
4527 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
4528 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
4529 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4530 exit(EXIT_FAILURE);
4531 }
4532 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4533 rb_ec_raised_clear(ec);
4534 }
4535 else {
4536 rb_ec_raised_set(ec, RAISED_NOMEMORY);
4537 exc = ruby_vm_special_exception_copy(exc);
4538 }
4539 ec->errinfo = exc;
4540 EC_JUMP_TAG(ec, TAG_RAISE);
4541}
4542
4543bool
4544rb_memerror_reentered(void)
4545{
4546 rb_execution_context_t *ec = GET_EC();
4547 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
4548}
4549
4550void
4551rb_malloc_info_show_results(void)
4552{
4553}
4554
4555static void *
4556handle_malloc_failure(void *ptr)
4557{
4558 if (LIKELY(ptr)) {
4559 return ptr;
4560 }
4561 else {
4562 ruby_memerror();
4563 UNREACHABLE_RETURN(ptr);
4564 }
4565}
4566
4567static void *ruby_xmalloc_body(size_t size);
4568
4569void *
4570ruby_xmalloc(size_t size)
4571{
4572 return handle_malloc_failure(ruby_xmalloc_body(size));
4573}
4574
4575static void *
4576ruby_xmalloc_body(size_t size)
4577{
4578 if ((ssize_t)size < 0) {
4579 negative_size_allocation_error("too large allocation size");
4580 }
4581
4582 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
4583}
4584
4585void
4586ruby_malloc_size_overflow(size_t count, size_t elsize)
4587{
4588 rb_raise(rb_eArgError,
4589 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
4590 count, elsize);
4591}
4592
4593static void *ruby_xmalloc2_body(size_t n, size_t size);
4594
4595void *
4596ruby_xmalloc2(size_t n, size_t size)
4597{
4598 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
4599}
4600
4601static void *
4602ruby_xmalloc2_body(size_t n, size_t size)
4603{
4604 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4605}
4606
4607static void *ruby_xcalloc_body(size_t n, size_t size);
4608
4609void *
4610ruby_xcalloc(size_t n, size_t size)
4611{
4612 return handle_malloc_failure(ruby_xcalloc_body(n, size));
4613}
4614
4615static void *
4616ruby_xcalloc_body(size_t n, size_t size)
4617{
4618 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4619}
4620
4621static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
4622
4623#ifdef ruby_sized_xrealloc
4624#undef ruby_sized_xrealloc
4625#endif
4626void *
4627ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
4628{
4629 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
4630}
4631
4632static void *
4633ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
4634{
4635 if ((ssize_t)new_size < 0) {
4636 negative_size_allocation_error("too large allocation size");
4637 }
4638
4639 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
4640}
4641
4642void *
4643ruby_xrealloc(void *ptr, size_t new_size)
4644{
4645 return ruby_sized_xrealloc(ptr, new_size, 0);
4646}
4647
4648static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
4649
4650#ifdef ruby_sized_xrealloc2
4651#undef ruby_sized_xrealloc2
4652#endif
4653void *
4654ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
4655{
4656 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
4657}
4658
4659static void *
4660ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
4661{
4662 size_t len = xmalloc2_size(n, size);
4663 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
4664}
4665
4666void *
4667ruby_xrealloc2(void *ptr, size_t n, size_t size)
4668{
4669 return ruby_sized_xrealloc2(ptr, n, size, 0);
4670}
4671
4672#ifdef ruby_sized_xfree
4673#undef ruby_sized_xfree
4674#endif
4675void
4676ruby_sized_xfree(void *x, size_t size)
4677{
4678 if (LIKELY(x)) {
4679 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
4680 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
4681 * that case. */
4682 if (LIKELY(GET_VM())) {
4683 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
4684 }
4685 else {
4686 ruby_mimfree(x);
4687 }
4688 }
4689}
4690
4691void
4692ruby_xfree(void *x)
4693{
4694 ruby_sized_xfree(x, 0);
4695}
4696
4697void *
4698rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4699{
4700 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4701 return ruby_xmalloc(w);
4702}
4703
4704void *
4705rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4706{
4707 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4708 return ruby_xcalloc(w, 1);
4709}
4710
4711void *
4712rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
4713{
4714 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4715 return ruby_xrealloc((void *)p, w);
4716}
4717
4718void *
4719rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4720{
4721 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4722 return ruby_xmalloc(u);
4723}
4724
4725void *
4726rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4727{
4728 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4729 return ruby_xcalloc(u, 1);
4730}
4731
4732/* Mimic ruby_xmalloc, but need not rb_objspace.
4733 * should return pointer suitable for ruby_xfree
4734 */
4735void *
4736ruby_mimmalloc(size_t size)
4737{
4738 void *mem;
4739#if CALC_EXACT_MALLOC_SIZE
4740 size += sizeof(struct malloc_obj_info);
4741#endif
4742 mem = malloc(size);
4743#if CALC_EXACT_MALLOC_SIZE
4744 if (!mem) {
4745 return NULL;
4746 }
4747 else
4748 /* set 0 for consistency of allocated_size/allocations */
4749 {
4750 struct malloc_obj_info *info = mem;
4751 info->size = 0;
4752 mem = info + 1;
4753 }
4754#endif
4755 return mem;
4756}
4757
4758void *
4759ruby_mimcalloc(size_t num, size_t size)
4760{
4761 void *mem;
4762#if CALC_EXACT_MALLOC_SIZE
4763 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
4764 if (UNLIKELY(t.left)) {
4765 return NULL;
4766 }
4767 size = t.right + sizeof(struct malloc_obj_info);
4768 mem = calloc1(size);
4769 if (!mem) {
4770 return NULL;
4771 }
4772 else
4773 /* set 0 for consistency of allocated_size/allocations */
4774 {
4775 struct malloc_obj_info *info = mem;
4776 info->size = 0;
4777 mem = info + 1;
4778 }
4779#else
4780 mem = calloc(num, size);
4781#endif
4782 return mem;
4783}
4784
4785void
4786ruby_mimfree(void *ptr)
4787{
4788#if CALC_EXACT_MALLOC_SIZE
4789 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
4790 ptr = info;
4791#endif
4792 free(ptr);
4793}
4794
4795void
4796rb_gc_adjust_memory_usage(ssize_t diff)
4797{
4798 unless_objspace(objspace) { return; }
4799
4800 rb_gc_impl_adjust_memory_usage(objspace, diff);
4801}
4802
4803const char *
4804rb_obj_info(VALUE obj)
4805{
4806 return obj_info(obj);
4807}
4808
4809void
4810rb_obj_info_dump(VALUE obj)
4811{
4812 char buff[0x100];
4813 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
4814}
4815
4816void
4817rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
4818{
4819 char buff[0x100];
4820 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
4821}
4822
4823void
4824rb_gc_before_fork(void)
4825{
4826 rb_gc_impl_before_fork(rb_gc_get_objspace());
4827}
4828
4829void
4830rb_gc_after_fork(rb_pid_t pid)
4831{
4832 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
4833}
4834
4835/*
4836 * Document-module: ObjectSpace
4837 *
4838 * The ObjectSpace module contains a number of routines
4839 * that interact with the garbage collection facility and allow you to
4840 * traverse all living objects with an iterator.
4841 *
4842 * ObjectSpace also provides support for object finalizers, procs that will be
4843 * called after a specific object was destroyed by garbage collection. See
4844 * the documentation for +ObjectSpace.define_finalizer+ for important
4845 * information on how to use this method correctly.
4846 *
4847 * a = "A"
4848 * b = "B"
4849 *
4850 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4851 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
4852 *
4853 * a = nil
4854 * b = nil
4855 *
4856 * _produces:_
4857 *
4858 * Finalizer two on 537763470
4859 * Finalizer one on 537763480
4860 */
4861
4862/* Document-class: GC::Profiler
4863 *
4864 * The GC profiler provides access to information on GC runs including time,
4865 * length and object space size.
4866 *
4867 * Example:
4868 *
4869 * GC::Profiler.enable
4870 *
4871 * require 'rdoc/rdoc'
4872 *
4873 * GC::Profiler.report
4874 *
4875 * GC::Profiler.disable
4876 *
4877 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4878 */
4879
4880#include "gc.rbinc"
4881
4882void
4883Init_GC(void)
4884{
4885#undef rb_intern
4886 malloc_offset = gc_compute_malloc_offset();
4887
4888 rb_mGC = rb_define_module("GC");
4889
4890 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
4891
4892 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
4893
4894 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
4895 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
4896
4897 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
4898
4899 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
4900
4901 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
4902 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
4903
4904 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
4905
4906 rb_gc_impl_init();
4907}
4908
4909// Set a name for the anonymous virtual memory area. `addr` is the starting
4910// address of the area and `size` is its length in bytes. `name` is a
4911// NUL-terminated human-readable string.
4912//
4913// This function is usually called after calling `mmap()`. The human-readable
4914// annotation helps developers identify the call site of `mmap()` that created
4915// the memory mapping.
4916//
4917// This function currently only works on Linux 5.17 or higher. After calling
4918// this function, we can see annotations in the form of "[anon:...]" in
4919// `/proc/self/maps`, where `...` is the content of `name`. This function has
4920// no effect when called on other platforms.
4921void
4922ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
4923{
4924#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
4925 // The name length cannot exceed 80 (including the '\0').
4926 RUBY_ASSERT(strlen(name) < 80);
4927 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
4928 // We ignore errors in prctl. prctl may set errno to EINVAL for several
4929 // reasons.
4930 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
4931 // 2. addr is an invalid address.
4932 // 3. The string pointed by name is too long.
4933 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
4934 // happen if we run the compiled binary on an old kernel. In theory, all
4935 // other errors should result in a failure. But since EINVAL cannot tell
4936 // the first error from others, and this function is mainly used for
4937 // debugging, we silently ignore the error.
4938 errno = 0;
4939#endif
4940}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1095
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2635
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2176
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2216
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:98
VALUE rb_mKernel
Kernel module.
Definition object.c:65
VALUE rb_mGC
GC module.
Definition gc.c:420
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3192
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:904
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:838
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1681
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:958
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:382
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1247
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1291
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1297
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2944
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:970
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1904
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:136
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:197
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:94
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:507
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5556
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:350
Definition class.h:36
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Represents the region of a capture group.
Definition rmatch.h:65
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113