12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #ifdef HAVE_SYS_RESOURCE_H
17 #include <sys/resource.h>
19 #ifdef HAVE_THR_STKSEGMENT
24 #elif HAVE_SYS_FCNTL_H
25 #include <sys/fcntl.h>
27 #if defined(HAVE_SYS_TIME_H)
31 static void native_mutex_lock(pthread_mutex_t *lock);
32 static void native_mutex_unlock(pthread_mutex_t *lock);
33 static int native_mutex_trylock(pthread_mutex_t *lock);
34 static void native_mutex_initialize(pthread_mutex_t *lock);
35 static void native_mutex_destroy(pthread_mutex_t *lock);
41 static pthread_t timer_thread_id;
43 #define RB_CONDATTR_CLOCK_MONOTONIC 1
45 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
46 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && defined(HAVE_CLOCK_GETTIME)
47 #define USE_MONOTONIC_COND 1
49 #define USE_MONOTONIC_COND 0
81 native_mutex_lock(&vm->
gvl.
lock);
82 gvl_acquire_common(vm);
83 native_mutex_unlock(&vm->
gvl.
lock);
91 native_cond_signal(&vm->
gvl.
cond);
97 native_mutex_lock(&vm->
gvl.
lock);
98 gvl_release_common(vm);
99 native_mutex_unlock(&vm->
gvl.
lock);
105 native_mutex_lock(&vm->
gvl.
lock);
107 gvl_release_common(vm);
125 native_mutex_unlock(&vm->
gvl.
lock);
127 native_mutex_lock(&vm->
gvl.
lock);
132 gvl_acquire_common(vm);
133 native_mutex_unlock(&vm->
gvl.
lock);
139 native_mutex_initialize(&vm->
gvl.
lock);
140 native_cond_initialize(&vm->
gvl.
cond, RB_CONDATTR_CLOCK_MONOTONIC);
141 native_cond_initialize(&vm->
gvl.
switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
154 native_cond_destroy(&vm->
gvl.
cond);
155 native_mutex_destroy(&vm->
gvl.
lock);
165 #define NATIVE_MUTEX_LOCK_DEBUG 0
168 mutex_debug(
const char *
msg, pthread_mutex_t *lock)
170 if (NATIVE_MUTEX_LOCK_DEBUG) {
172 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
174 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
175 fprintf(stdout,
"%s: %p\n", msg, (
void *)lock);
176 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
181 native_mutex_lock(pthread_mutex_t *lock)
184 mutex_debug(
"lock", lock);
185 if ((r = pthread_mutex_lock(lock)) != 0) {
191 native_mutex_unlock(pthread_mutex_t *lock)
194 mutex_debug(
"unlock", lock);
195 if ((r = pthread_mutex_unlock(lock)) != 0) {
201 native_mutex_trylock(pthread_mutex_t *lock)
204 mutex_debug(
"trylock", lock);
205 if ((r = pthread_mutex_trylock(lock)) != 0) {
217 native_mutex_initialize(pthread_mutex_t *lock)
219 int r = pthread_mutex_init(lock, 0);
220 mutex_debug(
"init", lock);
227 native_mutex_destroy(pthread_mutex_t *lock)
229 int r = pthread_mutex_destroy(lock);
230 mutex_debug(
"destroy", lock);
240 pthread_condattr_t attr;
242 pthread_condattr_init(&attr);
244 #if USE_MONOTONIC_COND
245 cond->clockid = CLOCK_REALTIME;
246 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
247 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
249 cond->clockid = CLOCK_MONOTONIC;
254 r = pthread_cond_init(&cond->
cond, &attr);
255 pthread_condattr_destroy(&attr);
266 int r = pthread_cond_destroy(&cond->
cond);
287 r = pthread_cond_signal(&cond->
cond);
288 }
while (r == EAGAIN);
299 r = pthread_cond_broadcast(&cond->
cond);
300 }
while (r == EAGAIN);
309 int r = pthread_cond_wait(&cond->
cond, mutex);
327 r = pthread_cond_timedwait(&cond->
cond, mutex, ts);
328 }
while (r == EINTR);
337 #if SIZEOF_TIME_T == SIZEOF_LONG
339 #elif SIZEOF_TIME_T == SIZEOF_INT
341 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
344 # error cannot find integer type which size is same as time_t.
347 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
357 #if USE_MONOTONIC_COND
358 if (cond->clockid == CLOCK_MONOTONIC) {
359 ret = clock_gettime(cond->clockid, &now);
365 if (cond->clockid != CLOCK_REALTIME)
366 rb_bug(
"unsupported clockid %d", cond->clockid);
372 now.tv_sec = tv.tv_sec;
373 now.tv_nsec = tv.tv_usec * 1000;
375 #if USE_MONOTONIC_COND
378 timeout.tv_sec = now.tv_sec;
379 timeout.tv_nsec = now.tv_nsec;
380 timeout.tv_sec += timeout_rel.tv_sec;
381 timeout.tv_nsec += timeout_rel.tv_nsec;
383 if (timeout.tv_nsec >= 1000*1000*1000) {
385 timeout.tv_nsec -= 1000*1000*1000;
388 if (timeout.tv_sec < now.tv_sec)
394 #define native_cleanup_push pthread_cleanup_push
395 #define native_cleanup_pop pthread_cleanup_pop
396 #ifdef HAVE_SCHED_YIELD
397 #define native_thread_yield() (void)sched_yield()
399 #define native_thread_yield() ((void)0)
402 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
403 #define USE_SIGNAL_THREAD_LIST 1
405 #ifdef USE_SIGNAL_THREAD_LIST
406 static void add_signal_thread_list(
rb_thread_t *th);
407 static void remove_signal_thread_list(
rb_thread_t *th);
411 static pthread_key_t ruby_native_thread_key;
420 ruby_thread_from_native(
void)
422 return pthread_getspecific(ruby_native_thread_key);
428 return pthread_setspecific(ruby_native_thread_key, th) == 0;
438 pthread_key_create(&ruby_native_thread_key,
NULL);
440 native_thread_init(th);
441 #ifdef USE_SIGNAL_THREAD_LIST
442 native_mutex_initialize(&signal_thread_list_lock);
451 ruby_thread_set_native(th);
460 #define USE_THREAD_CACHE 0
463 static rb_thread_t *register_cached_thread_and_wait(
void);
466 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
467 #define STACKADDR_AVAILABLE 1
468 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
469 #define STACKADDR_AVAILABLE 1
470 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
471 #define STACKADDR_AVAILABLE 1
472 #elif defined HAVE_PTHREAD_GETTHRDS_NP
473 #define STACKADDR_AVAILABLE 1
476 #ifdef STACKADDR_AVAILABLE
481 get_stack(
void **addr,
size_t *
size)
483 #define CHECK_ERR(expr) \
484 {int err = (expr); if (err) return err;}
485 #ifdef HAVE_PTHREAD_GETATTR_NP
489 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
490 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
491 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
494 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
495 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
497 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
499 pthread_attr_destroy(&attr);
500 #elif defined HAVE_PTHREAD_ATTR_GET_NP
502 CHECK_ERR(pthread_attr_init(&attr));
503 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
504 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
505 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
508 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
509 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
512 pthread_attr_destroy(&attr);
513 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
514 pthread_t th = pthread_self();
515 *addr = pthread_get_stackaddr_np(th);
516 *size = pthread_get_stacksize_np(th);
517 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
519 # if defined HAVE_THR_STKSEGMENT
520 CHECK_ERR(thr_stksegment(&stk));
522 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
526 #elif defined HAVE_PTHREAD_GETTHRDS_NP
527 pthread_t th = pthread_self();
528 struct __pthrdsinfo thinfo;
530 int regsiz=
sizeof(reg);
531 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
532 &thinfo,
sizeof(thinfo),
534 *addr = thinfo.__pi_stackaddr;
535 *size = thinfo.__pi_stacksize;
538 #error STACKADDR_AVAILABLE is defined but not implemented.
547 size_t stack_maxsize;
550 VALUE *register_stack_start;
552 } native_main_thread;
554 #ifdef STACK_END_ADDRESS
555 extern void *STACK_END_ADDRESS;
558 #undef ruby_init_stack
566 native_main_thread.id = pthread_self();
567 #ifdef STACK_END_ADDRESS
568 native_main_thread.stack_start = STACK_END_ADDRESS;
570 if (!native_main_thread.stack_start ||
572 native_main_thread.stack_start > addr,
573 native_main_thread.stack_start < addr)) {
574 native_main_thread.stack_start = (
VALUE *)addr;
578 if (!native_main_thread.register_stack_start ||
579 (
VALUE*)bsp < native_main_thread.register_stack_start) {
580 native_main_thread.register_stack_start = (
VALUE*)bsp;
586 #if defined(STACKADDR_AVAILABLE)
589 get_stack(&stackaddr, &size);
590 space =
STACK_DIR_UPPER((
char *)addr - (
char *)stackaddr, (
char *)stackaddr - (
char *)addr);
591 #elif defined(HAVE_GETRLIMIT)
593 if (
getrlimit(RLIMIT_STACK, &rlim) == 0) {
594 size = (size_t)rlim.rlim_cur;
596 space = size > 5 * 1024 * 1024 ? 1024 * 1024 : size / 5;
598 native_main_thread.stack_maxsize = size - space;
602 #define CHECK_ERR(expr) \
603 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
610 if (pthread_equal(curr, native_main_thread.id)) {
615 #ifdef STACKADDR_AVAILABLE
619 if (get_stack(&start, &size) == 0) {
628 th->machine_register_stack_start = native_main_thread.register_stack_start;
636 #define USE_NATIVE_THREAD_INIT 1
640 thread_start_func_1(
void *th_ptr)
647 #if !defined USE_NATIVE_THREAD_INIT
651 #if defined USE_NATIVE_THREAD_INIT
652 native_thread_init_stack(th);
654 native_thread_init(th);
656 #if defined USE_NATIVE_THREAD_INIT
666 if ((th = register_cached_thread_and_wait()) != 0) {
676 struct cached_thread_entry {
679 struct cached_thread_entry *next;
684 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
685 struct cached_thread_entry *cached_thread_root;
688 register_cached_thread_and_wait(
void)
692 struct cached_thread_entry *entry =
693 (
struct cached_thread_entry *)
malloc(
sizeof(
struct cached_thread_entry));
698 ts.
tv_sec = tv.tv_sec + 60;
699 ts.
tv_nsec = tv.tv_usec * 1000;
701 pthread_mutex_lock(&thread_cache_lock);
703 entry->th_area = &th_area;
705 entry->next = cached_thread_root;
706 cached_thread_root = entry;
708 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
711 struct cached_thread_entry *e = cached_thread_root;
712 struct cached_thread_entry *prev = cached_thread_root;
716 if (prev == cached_thread_root) {
717 cached_thread_root = e->next;
720 prev->next = e->next;
730 native_cond_destroy(&cond);
732 pthread_mutex_unlock(&thread_cache_lock);
743 struct cached_thread_entry *entry;
745 if (cached_thread_root) {
746 pthread_mutex_lock(&thread_cache_lock);
747 entry = cached_thread_root;
749 if (cached_thread_root) {
750 cached_thread_root = entry->next;
751 *entry->th_area = th;
756 native_cond_signal(entry->cond);
758 pthread_mutex_unlock(&thread_cache_lock);
766 RUBY_STACK_MIN_LIMIT = 64 * 1024,
768 RUBY_STACK_MIN_LIMIT = 512 * 1024,
770 RUBY_STACK_SPACE_LIMIT = 1024 * 1024
773 #ifdef PTHREAD_STACK_MIN
774 #define RUBY_STACK_MIN ((RUBY_STACK_MIN_LIMIT < PTHREAD_STACK_MIN) ? \
775 PTHREAD_STACK_MIN * 2 : RUBY_STACK_MIN_LIMIT)
777 #define RUBY_STACK_MIN (RUBY_STACK_MIN_LIMIT)
779 #define RUBY_STACK_SPACE (RUBY_STACK_MIN/5 > RUBY_STACK_SPACE_LIMIT ? \
780 RUBY_STACK_SPACE_LIMIT : RUBY_STACK_MIN/5)
787 if (use_cached_thread(th)) {
788 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
792 const size_t stack_size = RUBY_STACK_MIN;
793 const size_t space = RUBY_STACK_SPACE;
801 CHECK_ERR(pthread_attr_init(&attr));
803 #ifdef PTHREAD_STACK_MIN
804 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
805 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
808 #ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
809 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
811 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
813 err = pthread_create(&th->
thread_id, &attr, thread_start_func_1, th);
815 CHECK_ERR(pthread_attr_destroy(&attr));
821 native_thread_join(pthread_t th)
823 int err = pthread_join(th, 0);
830 #if USE_NATIVE_THREAD_PRIORITY
835 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
836 struct sched_param sp;
840 pthread_getschedparam(th->
thread_id, &policy, &sp);
841 max = sched_get_priority_max(policy);
842 min = sched_get_priority_min(policy);
844 if (min > priority) {
847 else if (max < priority) {
851 sp.sched_priority = priority;
852 pthread_setschedparam(th->
thread_id, policy, &sp);
861 ubf_pthread_cond_signal(
void *ptr)
864 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
879 timeout_rel.tv_nsec = timeout_tv->
tv_usec * 1000;
889 if (timeout_rel.tv_sec > 100000000) {
890 timeout_rel.tv_sec = 100000000;
891 timeout_rel.tv_nsec = 0;
894 timeout = native_cond_timeout(cond, timeout_rel);
899 pthread_mutex_lock(lock);
905 thread_debug(
"native_sleep: interrupted before sleep\n");
909 native_cond_wait(cond, lock);
911 native_cond_timedwait(cond, lock, &timeout);
916 pthread_mutex_unlock(lock);
923 #ifdef USE_SIGNAL_THREAD_LIST
924 struct signal_thread_list {
926 struct signal_thread_list *prev;
927 struct signal_thread_list *next;
930 static struct signal_thread_list signal_thread_list_anchor = {
934 #define FGLOCK(lock, body) do { \
935 native_mutex_lock(lock); \
939 native_mutex_unlock(lock); \
944 print_signal_list(
char *str)
946 struct signal_thread_list *
list =
947 signal_thread_list_anchor.next;
950 thread_debug(
"%p (%p), ", list->th, list->th->thread_id);
961 FGLOCK(&signal_thread_list_lock, {
962 struct signal_thread_list *list =
963 malloc(
sizeof(
struct signal_thread_list));
966 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
972 list->prev = &signal_thread_list_anchor;
973 list->next = signal_thread_list_anchor.next;
975 list->next->prev =
list;
977 signal_thread_list_anchor.next =
list;
987 FGLOCK(&signal_thread_list_lock, {
988 struct signal_thread_list *list =
989 (
struct signal_thread_list *)
992 list->prev->next = list->next;
994 list->next->prev = list->prev;
1013 ubf_select(
void *ptr)
1016 add_signal_thread_list(th);
1017 if (pthread_self() != timer_thread_id)
1019 ubf_select_each(th);
1023 ping_signal_thread_list(
void) {
1024 if (signal_thread_list_anchor.next) {
1025 FGLOCK(&signal_thread_list_lock, {
1026 struct signal_thread_list *
list;
1028 list = signal_thread_list_anchor.next;
1030 ubf_select_each(list->th);
1038 check_signal_thread_list(
void)
1040 if (signal_thread_list_anchor.next)
1046 static void add_signal_thread_list(
rb_thread_t *th) { }
1047 static void remove_signal_thread_list(
rb_thread_t *th) { }
1048 #define ubf_select 0
1049 static void ping_signal_thread_list(
void) {
return; }
1050 static int check_signal_thread_list(
void) {
return 0; }
1053 static int timer_thread_pipe[2] = {-1, -1};
1054 static int timer_thread_pipe_owner_process;
1058 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1067 if (timer_thread_pipe_owner_process == getpid()) {
1068 const char *buff =
"!";
1070 if ((result = write(timer_thread_pipe[1], buff, 1)) <= 0) {
1072 case EINTR:
goto retry;
1074 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1082 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1091 consume_communication_pipe(
void)
1093 #define CCP_READ_BUFF_SIZE 1024
1095 static char buff[CCP_READ_BUFF_SIZE];
1099 result = read(timer_thread_pipe[0], buff, CCP_READ_BUFF_SIZE);
1102 case EINTR:
goto retry;
1110 close_communication_pipe(
void)
1112 if (close(timer_thread_pipe[0]) < 0) {
1115 if (close(timer_thread_pipe[1]) < 0) {
1118 timer_thread_pipe[0] = timer_thread_pipe[1] = -1;
1124 #define TIME_QUANTUM_USEC (100 * 1000)
1127 thread_timer(
void *
p)
1133 if (TT_DEBUG)
WRITE_CONST(2,
"start timer thread\n");
1140 ping_signal_thread_list();
1142 need_polling = check_signal_thread_list();
1148 FD_SET(timer_thread_pipe[0], &rfds);
1150 if (gvl->
waiting > 0 || need_polling) {
1152 timeout.tv_usec = TIME_QUANTUM_USEC;
1155 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, &timeout);
1159 result = select(timer_thread_pipe[0] + 1, &rfds, 0, 0, 0);
1165 else if (result > 0) {
1166 consume_communication_pipe();
1181 if (TT_DEBUG)
WRITE_CONST(2,
"finish timer thread\n");
1186 rb_thread_create_timer_thread(
void)
1190 if (!timer_thread_id) {
1191 pthread_attr_t attr;
1194 pthread_attr_init(&attr);
1195 #ifdef PTHREAD_STACK_MIN
1196 if (PTHREAD_STACK_MIN < 4096 * 3) {
1201 pthread_attr_setstacksize(&attr,
1205 pthread_attr_setstacksize(&attr,
1211 if (timer_thread_pipe_owner_process != getpid()) {
1212 if (timer_thread_pipe[0] != -1) {
1214 close_communication_pipe();
1217 err = pipe(timer_thread_pipe);
1219 rb_bug_errno(
"thread_timer: Failed to create communication pipe for timer thread",
errno);
1223 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL)
1226 #if defined(O_NONBLOCK)
1227 oflags =
fcntl(timer_thread_pipe[1], F_GETFL);
1231 #if defined(FD_CLOEXEC)
1232 oflags =
fcntl(timer_thread_pipe[0], F_GETFD);
1233 fcntl(timer_thread_pipe[0], F_SETFD, oflags | FD_CLOEXEC);
1234 oflags =
fcntl(timer_thread_pipe[1], F_GETFD);
1235 fcntl(timer_thread_pipe[1], F_SETFD, oflags | FD_CLOEXEC);
1241 timer_thread_pipe_owner_process = getpid();
1245 if (timer_thread_id) {
1246 rb_bug(
"rb_thread_create_timer_thread: Timer thread was already created\n");
1248 err = pthread_create(&timer_thread_id, &attr, thread_timer, &
GET_VM()->gvl);
1250 fprintf(stderr,
"[FATAL] Failed to create timer thread (errno: %d)\n", err);
1253 pthread_attr_destroy(&attr);
1260 native_stop_timer_thread(
int close_anyway)
1265 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1269 native_thread_join(timer_thread_id);
1270 if (TT_DEBUG) fprintf(stderr,
"joined timer thread\n");
1271 timer_thread_id = 0;
1289 native_reset_timer_thread(
void)
1291 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1294 #ifdef HAVE_SIGALTSTACK
1296 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1300 const size_t water_mark = 1024 * 1024;
1307 #ifdef STACKADDR_AVAILABLE
1308 else if (get_stack(&base, &size) == 0) {
1316 if (size > water_mark) size = water_mark;
1318 if (size > ~(
size_t)base+1) size = ~(size_t)base+1;
1319 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
1322 if (size > (
size_t)base) size = (
size_t)base;
1323 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
1332 if (fd == timer_thread_pipe[0] ||
1333 fd == timer_thread_pipe[1]) {
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
void rb_update_max_fd(int fd)
volatile unsigned long waiting
static int max(int a, int b)
void * signal_thread_list
rb_thread_lock_t interrupt_lock
pthread_mutex_t rb_thread_lock_t
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
rb_unblock_function_t * func
rb_thread_cond_t switch_cond
void rb_async_bug_errno(const char *mesg, int errno_arg)
#define STACK_UPPER(x, a, b)
void rb_raise(VALUE exc, const char *fmt,...)
static volatile int system_working
unsigned long unsigned_time_t
static int min(int a, int b)
sighandler_t posix_signal(int signum, sighandler_t handler)
void rb_thread_wakeup_timer_thread(void)
int getrlimit(int resource, struct rlimit *rlp)
VALUE * machine_stack_start
#define GVL_UNLOCK_BEGIN()
void rb_disable_interrupt(void)
#define STACK_DIR_UPPER(a, b)
rb_thread_cond_t switch_wait_cond
int pthread_kill(pthread_t thread, int sig)
#define STACK_GROW_DIR_DETECTION
void rb_bug_errno(const char *mesg, int errno_arg)
static void timer_thread_function(void *)
void rb_sys_fail(const char *mesg)
int rb_reserved_fd_p(int fd)
void rb_enable_interrupt(void)
#define WRITE_CONST(fd, str)
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
rb_thread_cond_t sleep_cond
struct rb_encoding_entry * list
native_thread_data_t native_thread_data
static VALUE thread_start(VALUE klass, VALUE args)
#define RUBY_VM_INTERRUPTED(th)
void Init_native_thread(void)
size_t machine_stack_maxsize
void ruby_init_stack(volatile VALUE *)
#define IS_STACK_DIR_UPPER()