Lines Matching refs:th

22 #define remove_signal_thread_list(th)
26 static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
96 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
99 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
109 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
111 gvl_release(th->vm);
113 gvl_acquire(vm, th);
144 ruby_thread_set_native(rb_thread_t *th)
146 return TlsSetValue(ruby_native_thread_key, th);
152 rb_thread_t *th = GET_THREAD();
155 ruby_thread_set_native(th);
159 &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
161 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
163 thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
164 th, GET_THREAD()->thread_id,
165 th->native_thread_data.interrupt_event);
185 w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
191 thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
192 events, count, timeout, th);
193 if (th && (intr = th->native_thread_data.interrupt_event)) {
194 gvl_acquire(th->vm, th);
195 if (intr == th->native_thread_data.interrupt_event) {
197 if (RUBY_VM_INTERRUPTED(th)) {
207 gvl_release(th->vm);
214 if (ret == (DWORD)(WAIT_OBJECT_0 + count - 1) && th) {
302 native_sleep(rb_thread_t *th, struct timeval *tv)
317 native_mutex_lock(&th->interrupt_lock);
318 th->unblock.func = ubf_handle;
319 th->unblock.arg = th;
320 native_mutex_unlock(&th->interrupt_lock);
322 if (RUBY_VM_INTERRUPTED(th)) {
327 ret = w32_wait_events(0, 0, msec, th);
331 native_mutex_lock(&th->interrupt_lock);
332 th->unblock.func = 0;
333 th->unblock.arg = 0;
334 native_mutex_unlock(&th->interrupt_lock);
579 native_thread_init_stack(rb_thread_t *th)
592 th->machine_stack_start = (VALUE *)end - 1;
593 th->machine_stack_maxsize = size - space;
601 native_thread_destroy(rb_thread_t *th)
603 HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
604 thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
611 rb_thread_t *th = th_ptr;
612 volatile HANDLE thread_id = th->thread_id;
614 native_thread_init_stack(th);
615 th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
618 thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
619 th->thread_id, th->native_thread_data.interrupt_event);
621 thread_start_func_2(th, th->machine_stack_start, rb_ia64_bsp());
624 thread_debug("thread deleted (th: %p)\n", th);
629 native_thread_create(rb_thread_t *th)
632 th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
634 if ((th->thread_id) == 0) {
638 w32_resume_thread(th->thread_id);
642 thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIdSIZE"\n",
643 th, th->thread_id,
644 th->native_thread_data.interrupt_event, stack_size);
650 native_thread_join(HANDLE th)
652 w32_wait_events(&th, 1, INFINITE, 0);
658 native_thread_apply_priority(rb_thread_t *th)
660 int priority = th->priority;
661 if (th->priority > 0) {
664 else if (th->priority < 0) {
671 SetThreadPriority(th->thread_id, priority);
679 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
694 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
699 rb_w32_check_interrupt(rb_thread_t *th)
701 return w32_wait_events(0, 0, 0, th);
707 rb_thread_t *th = (rb_thread_t *)ptr;
708 thread_debug("ubf_handle: %p\n", th);
710 w32_set_event(th->native_thread_data.interrupt_event);
774 rb_thread_t *th = GET_THREAD();
775 if (!rb_thread_raised_p(th, RAISED_STACKOVERFLOW)) {
776 rb_thread_raised_set(th, RAISED_STACKOVERFLOW);