• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/kern/

Lines Matching defs:tl

133 static void wq_runitem(proc_t p, user_addr_t item, thread_t th, struct threadlist *tl,
135 static int setup_wqthread(proc_t p, thread_t th, user_addr_t item, int reuse_thread, struct threadlist *tl);
1221 struct threadlist *tl;
1225 tl = uth->uu_threadlist;
1226 wq = tl->th_workq;
1234 old_activecount = OSAddAtomic(-1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1247 if ((tl->th_flags & TH_LIST_RUNNING) &&
1248 TAILQ_EMPTY(&wq->wq_thidlelist[tl->th_affinity_tag]))
1257 KERNEL_DEBUG(0xefffd020, (int)thread, wq->wq_threads_scheduled, tl->th_affinity_tag, 0, 0);
1270 if (tl->th_unparked)
1271 OSAddAtomic(-1, (SInt32 *)&tl->th_unparked);
1273 OSAddAtomic(1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1275 KERNEL_DEBUG(0xefffd024, (int)thread, wq->wq_threads_scheduled, tl->th_affinity_tag, 0, 0);
1283 struct threadlist *tl;
1286 tl = NULL;
1305 tl = TAILQ_FIRST(&wq->wq_thidlelist[affinity_tag]);
1306 TAILQ_REMOVE(&wq->wq_thidlelist[affinity_tag], tl, th_entry);
1316 if (tl != NULL) {
1317 thread_sched_call(tl->th_thread, NULL);
1319 if ( (tl->th_flags & TH_LIST_BLOCKED) )
1320 wakeup(tl);
1328 (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, tl->th_allocsize);
1329 (void)mach_port_deallocate(get_task_ipcspace(wq->wq_task), (mach_port_name_t)tl->th_thport);
1331 thread_terminate(tl->th_thread);
1333 KERNEL_DEBUG(0xefffd030, (int)tl->th_thread, wq->wq_nthreads, tl->th_flags & TH_LIST_BLOCKED, 0, 0);
1337 thread_deallocate(tl->th_thread);
1339 kfree(tl, sizeof(struct threadlist));
1347 struct threadlist *tl;
1363 tl = kalloc(sizeof(struct threadlist));
1364 bzero(tl, sizeof(struct threadlist));
1373 tl->th_allocsize = PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE + p->p_pthsize;
1376 tl->th_allocsize,
1384 &stackaddr, tl->th_allocsize,
1395 (void) mach_vm_deallocate(wq->wq_map, stackaddr, tl->th_allocsize);
1400 kfree(tl, sizeof(struct threadlist));
1407 tl->th_thport = (void *)ipc_port_copyout_send(sright, get_task_ipcspace(wq->wq_task));
1418 tl->th_flags = TH_LIST_INITED | TH_LIST_SUSPENDED;
1420 tl->th_thread = th;
1421 tl->th_workq = wq;
1422 tl->th_stackaddr = stackaddr;
1423 tl->th_affinity_tag = affinity_tag;
1427 thread_set_cthreadself(th, (uint64_t)(tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE), IS_64BIT_PROCESS(p));
1439 uth = get_bsdthread_info(tl->th_thread);
1440 uth->uu_threadlist = (void *)tl;
1444 TAILQ_INSERT_TAIL(&wq->wq_thidlelist[tl->th_affinity_tag], tl, th_entry);
1448 KERNEL_DEBUG1(0xefffd014 | DBG_FUNC_START, (int)current_thread(), affinity_tag, wq->wq_nthreads, 0, (int)tl->th_thread);
1615 struct threadlist * tl, *tlist;
1634 TAILQ_FOREACH_SAFE(tl, &wq->wq_thrunlist, th_entry, tlist) {
1638 thread_sched_call(tl->th_thread, NULL);
1639 thread_deallocate(tl->th_thread);
1641 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1642 kfree(tl, sizeof(struct threadlist));
1645 TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlelist[i], th_entry, tlist) {
1649 thread_sched_call(tl->th_thread, NULL);
1650 thread_deallocate(tl->th_thread);
1652 TAILQ_REMOVE(&wq->wq_thidlelist[i], tl, th_entry);
1653 kfree(tl, sizeof(struct threadlist));
1731 struct threadlist *tl = NULL;
1753 tl = uth->uu_threadlist;
1755 if (wq->wq_thactivecount[tl->th_affinity_tag] == 1) {
1827 tl = TAILQ_FIRST(&wq->wq_thidlelist[affinity_tag]);
1828 TAILQ_REMOVE(&wq->wq_thidlelist[affinity_tag], tl, th_entry);
1830 th_to_run = tl->th_thread;
1831 TAILQ_INSERT_TAIL(&wq->wq_thrunlist, tl, th_entry);
1833 if ((tl->th_flags & TH_LIST_SUSPENDED) == TH_LIST_SUSPENDED) {
1834 tl->th_flags &= ~TH_LIST_SUSPENDED;
1836 } else if ((tl->th_flags & TH_LIST_BLOCKED) == TH_LIST_BLOCKED) {
1837 tl->th_flags &= ~TH_LIST_BLOCKED;
1840 tl->th_flags |= TH_LIST_RUNNING;
1871 OSAddAtomic(1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1872 OSAddAtomic(1, (SInt32 *)&tl->th_unparked);
1908 wq_runitem(p, item, th_to_run, tl, reuse_thread, wake_thread, (thread == th_to_run));
1925 tl = uth->uu_threadlist;
1926 if (tl == 0)
1929 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1930 tl->th_flags &= ~TH_LIST_RUNNING;
1932 tl->th_flags |= TH_LIST_BLOCKED;
1933 TAILQ_INSERT_HEAD(&wq->wq_thidlelist[tl->th_affinity_tag], tl, th_entry);
1935 assert_wait((caddr_t)tl, (THREAD_INTERRUPTIBLE));
1960 wq_runitem(proc_t p, user_addr_t item, thread_t th, struct threadlist *tl,
1965 KERNEL_DEBUG1(0xefffd004 | DBG_FUNC_START, (int)current_thread(), (int)item, wake_thread, tl->th_affinity_tag, (int)th);
1967 ret = setup_wqthread(p, th, item, reuse_thread, tl);
1980 wakeup(tl);
1990 setup_wqthread(proc_t p, thread_t th, user_addr_t item, int reuse_thread, struct threadlist *tl)
2004 ts64->r1 = (uint64_t)((tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE) - C_ARGSAVE_LEN - C_RED_ZONE);
2005 ts64->r3 = (uint64_t)(tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE);
2006 ts64->r4 = (uint64_t)((unsigned int)tl->th_thport);
2007 ts64->r5 = (uint64_t)(tl->th_stackaddr + PTH_DEFAULT_GUARDSIZE);
2026 ts->eax = (unsigned int)(tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE);
2027 ts->ebx = (unsigned int)tl->th_thport;
2028 ts->ecx = (unsigned int)(tl->th_stackaddr + PTH_DEFAULT_GUARDSIZE);
2035 ts->esp = (int)((vm_offset_t)((tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE) - C_32_STK_ALIGN));
2044 ts64->rdi = (uint64_t)(tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE);
2045 ts64->rsi = (uint64_t)((unsigned int)(tl->th_thport));
2046 ts64->rdx = (uint64_t)(tl->th_stackaddr + PTH_DEFAULT_GUARDSIZE);
2054 ts64->rsp = (uint64_t)((tl->th_stackaddr + PTH_DEFAULT_STACKSIZE + PTH_DEFAULT_GUARDSIZE) - C_64_REDZONE_LEN);