• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/kern/

Lines Matching defs:wq

130 static int workqueue_additem(struct workqueue *wq, int prio, user_addr_t item);
131 static int workqueue_removeitem(struct workqueue *wq, int prio, user_addr_t item);
136 static int workqueue_addnewthread(struct workqueue *wq);
137 static void workqueue_removethread(struct workqueue *wq);
1112 workqueue_timer(struct workqueue *wq, __unused int param1)
1122 KERNEL_DEBUG(0xefffd108, (int)wq, 0, 0, 0, 0);
1138 if (wq->wq_flags & (WQ_ADD_TO_POOL | WQ_TIMER_WATCH)) {
1140 if (wq->wq_flags & WQ_ADD_TO_POOL)
1143 timersub(&tv, &wq->wq_lastran_ts, &dtv);
1149 for (i = 0; i < wq->wq_affinity_max && wq->wq_nthreads < WORKQUEUE_MAXTHREADS; i++) {
1150 (void)workqueue_addnewthread(wq);
1154 timersub(&tv, &wq->wq_reduce_ts, &dtv);
1168 wq->wq_nthreads > wq->wq_affinity_max &&
1169 wq->wq_max_threads_scheduled <= (wq->wq_nthreads / 2)) {
1172 if ((nthreads_to_remove = (wq->wq_nthreads / 4)) == 0)
1175 for (i = 0; i < nthreads_to_remove && wq->wq_nthreads > wq->wq_affinity_max; i++)
1176 workqueue_removethread(wq);
1178 workqueue_lock_spin(wq->wq_proc);
1181 wq->wq_max_threads_scheduled = 0;
1182 microuptime(&wq->wq_reduce_ts);
1185 wq->wq_flags &= ~(WQ_ADD_TO_POOL | WQ_TIMER_WATCH);
1191 workqueue_run_nextitem(wq->wq_proc, THREAD_NULL);
1192 workqueue_lock_spin(wq->wq_proc);
1194 if ((wq->wq_nthreads > wq->wq_affinity_max) ||
1195 (wq->wq_flags & WQ_TIMER_WATCH)) {
1198 wq->wq_flags &= ~WQ_TIMER_RUNNING;
1200 workqueue_unlock(wq->wq_proc);
1211 workqueue_interval_timer_start(wq->wq_timer_call, wq_timer_interval_msecs);
1222 struct workqueue *wq;
1226 wq = tl->th_workq;
1234 old_activecount = OSAddAtomic(-1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1236 if (old_activecount == 1 && wq->wq_itemcount) {
1241 workqueue_lock_spin(wq->wq_proc);
1248 TAILQ_EMPTY(&wq->wq_thidlelist[tl->th_affinity_tag]))
1249 wq->wq_stalled_count++;
1251 workqueue_run_nextitem(wq->wq_proc, THREAD_NULL);
1257 KERNEL_DEBUG(0xefffd020, (int)thread, wq->wq_threads_scheduled, tl->th_affinity_tag, 0, 0);
1273 OSAddAtomic(1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1275 KERNEL_DEBUG(0xefffd024, (int)thread, wq->wq_threads_scheduled, tl->th_affinity_tag, 0, 0);
1281 workqueue_removethread(struct workqueue *wq)
1288 workqueue_lock_spin(wq->wq_proc);
1290 for (i = 0; i < wq->wq_affinity_max; i++) {
1292 affinity_tag = wq->wq_nextaffinitytag;
1295 affinity_tag = wq->wq_affinity_max - 1;
1298 wq->wq_nextaffinitytag = affinity_tag;
1304 if (!TAILQ_EMPTY(&wq->wq_thidlelist[affinity_tag]) && wq->wq_thcount[affinity_tag] > 1) {
1305 tl = TAILQ_FIRST(&wq->wq_thidlelist[affinity_tag]);
1306 TAILQ_REMOVE(&wq->wq_thidlelist[affinity_tag], tl, th_entry);
1308 wq->wq_nthreads--;
1309 wq->wq_thcount[affinity_tag]--;
1314 workqueue_unlock(wq->wq_proc);
1328 (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, tl->th_allocsize);
1329 (void)mach_port_deallocate(get_task_ipcspace(wq->wq_task), (mach_port_name_t)tl->th_thport);
1333 KERNEL_DEBUG(0xefffd030, (int)tl->th_thread, wq->wq_nthreads, tl->th_flags & TH_LIST_BLOCKED, 0, 0);
1345 workqueue_addnewthread(struct workqueue *wq)
1356 p = wq->wq_proc;
1358 kret = thread_create(wq->wq_task, &th);
1375 kret = mach_vm_map(wq->wq_map, &stackaddr,
1383 kret = mach_vm_allocate(wq->wq_map,
1392 kret = mach_vm_protect(wq->wq_map, stackaddr, PTH_DEFAULT_GUARDSIZE, FALSE, VM_PROT_NONE);
1395 (void) mach_vm_deallocate(wq->wq_map, stackaddr, tl->th_allocsize);
1407 tl->th_thport = (void *)ipc_port_copyout_send(sright, get_task_ipcspace(wq->wq_task));
1413 affinity_tag = wq->wq_nextaffinitytag;
1414 wq->wq_nextaffinitytag = (affinity_tag + 1) % wq->wq_affinity_max;
1421 tl->th_workq = wq;
1444 TAILQ_INSERT_TAIL(&wq->wq_thidlelist[tl->th_affinity_tag], tl, th_entry);
1445 wq->wq_nthreads++;
1446 wq->wq_thcount[affinity_tag]++;
1448 KERNEL_DEBUG1(0xefffd014 | DBG_FUNC_START, (int)current_thread(), affinity_tag, wq->wq_nthreads, 0, (int)tl->th_thread);
1468 struct workqueue * wq;
1490 wq = (struct workqueue *)ptr;
1491 wq->wq_flags = WQ_LIST_INITED;
1492 wq->wq_proc = p;
1493 wq->wq_affinity_max = num_cpus;
1494 wq->wq_task = current_task();
1495 wq->wq_map = current_map();
1498 wl = (struct workitemlist *)&wq->wq_list[i];
1503 witem = &wq->wq_array[(i*WORKITEM_SIZE) + j];
1507 wq->wq_thactivecount = (uint32_t *)((char *)ptr + sizeof(struct workqueue));
1508 wq->wq_thcount = (uint32_t *)&wq->wq_thactivecount[wq->wq_affinity_max];
1509 wq->wq_thidlelist = (struct wq_thidlelist *)&wq->wq_thcount[wq->wq_affinity_max];
1511 for (i = 0; i < wq->wq_affinity_max; i++)
1512 TAILQ_INIT(&wq->wq_thidlelist[i]);
1514 TAILQ_INIT(&wq->wq_thrunlist);
1516 p->p_wqptr = (void *)wq;
1521 wq->wq_timer_call = thread_call_allocate((thread_call_func_t)workqueue_timer, (thread_call_param_t)wq);
1523 for (i = 0; i < wq->wq_affinity_max; i++) {
1524 (void)workqueue_addnewthread(wq);
1527 if (wq->wq_nthreads == 0)
1531 microuptime(&wq->wq_reduce_ts);
1532 microuptime(&wq->wq_lastran_ts);
1533 wq->wq_max_threads_scheduled = 0;
1534 wq->wq_stalled_count = 0;
1549 struct workqueue *wq;
1564 if ((wq = (struct workqueue *)p->p_wqptr) == NULL) {
1568 error = workqueue_additem(wq, prio, item);
1579 if ((wq = (struct workqueue *)p->p_wqptr) == NULL) {
1583 error = workqueue_removeitem(wq, prio, item);
1594 if ((wq = (struct workqueue *)p->p_wqptr) == NULL) {
1614 struct workqueue * wq;
1622 wq = (struct workqueue *)p->p_wqptr;
1627 if (wq == NULL)
1630 if (wq->wq_flags & WQ_TIMER_RUNNING)
1631 thread_call_cancel(wq->wq_timer_call);
1632 thread_call_free(wq->wq_timer_call);
1634 TAILQ_FOREACH_SAFE(tl, &wq->wq_thrunlist, th_entry, tlist) {
1641 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1644 for (i = 0; i < wq->wq_affinity_max; i++) {
1645 TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlelist[i], th_entry, tlist) {
1652 TAILQ_REMOVE(&wq->wq_thidlelist[i], tl, th_entry);
1656 kfree(wq, p->p_wqsize);
1661 workqueue_additem(struct workqueue *wq, int prio, user_addr_t item)
1666 wl = (struct workitemlist *)&wq->wq_list[prio];
1677 if (wq->wq_itemcount == 0) {
1678 microuptime(&wq->wq_lastran_ts);
1679 wq->wq_stalled_count = 0;
1681 wq->wq_itemcount++;
1687 workqueue_removeitem(struct workqueue *wq, int prio, user_addr_t item)
1693 wl = (struct workitemlist *)&wq->wq_list[prio];
1698 wq->wq_itemcount--;
1707 if (wq->wq_itemcount == 0)
1708 wq->wq_flags &= ~(WQ_ADD_TO_POOL | WQ_TIMER_WATCH);
1721 struct workqueue *wq;
1737 wq = (struct workqueue *)p->p_wqptr;
1739 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_START, (int)thread, wq->wq_threads_scheduled, wq->wq_stalled_count, 0, 0);
1741 if (wq->wq_itemcount == 0) {
1755 if (wq->wq_thactivecount[tl->th_affinity_tag] == 1) {
1764 for (affinity_tag = 0; affinity_tag < wq->wq_affinity_max; affinity_tag++) {
1769 if (wq->wq_thactivecount[affinity_tag] == 0) {
1770 if (!TAILQ_EMPTY(&wq->wq_thidlelist[affinity_tag]))
1779 if (affinity_tag >= wq->wq_affinity_max) {
1787 if ( !(wq->wq_flags & WQ_TIMER_RUNNING) ) {
1788 wq->wq_flags |= WQ_TIMER_RUNNING;
1791 wq->wq_flags |= WQ_TIMER_WATCH;
1802 if (affinity_tag >= wq->wq_affinity_max) {
1810 if ( !(wq->wq_flags & WQ_TIMER_RUNNING) ) {
1811 wq->wq_flags |= WQ_TIMER_RUNNING;
1814 wq->wq_flags |= WQ_TIMER_WATCH;
1827 tl = TAILQ_FIRST(&wq->wq_thidlelist[affinity_tag]);
1828 TAILQ_REMOVE(&wq->wq_thidlelist[affinity_tag], tl, th_entry);
1831 TAILQ_INSERT_TAIL(&wq->wq_thrunlist, tl, th_entry);
1842 wq->wq_threads_scheduled++;
1844 if (wq->wq_threads_scheduled > wq->wq_max_threads_scheduled)
1845 wq->wq_max_threads_scheduled = wq->wq_threads_scheduled;
1849 wl = (struct workitemlist *)&wq->wq_list[i];
1855 wq->wq_itemcount--;
1871 OSAddAtomic(1, (SInt32 *)&wq->wq_thactivecount[tl->th_affinity_tag]);
1874 if (wq->wq_itemcount == 0)
1875 wq->wq_flags &= ~WQ_TIMER_WATCH;
1886 if (wq->wq_stalled_count && !(wq->wq_flags & WQ_ADD_TO_POOL)) {
1887 timersub(&tv, &wq->wq_lastran_ts, &lat_tv);
1890 wq->wq_flags |= WQ_ADD_TO_POOL;
1892 KERNEL_DEBUG(0xefffd10c, wq->wq_stalled_count, lat_tv.tv_sec, lat_tv.tv_usec, wq->wq_flags, 0);
1894 wq->wq_lastran_ts = tv;
1896 wq->wq_stalled_count = 0;
1899 KERNEL_DEBUG(0xefffd02c, wq->wq_thactivecount[0], wq->wq_thactivecount[1],
1900 wq->wq_thactivecount[2], wq->wq_thactivecount[3], 0);
1902 KERNEL_DEBUG(0xefffd02c, wq->wq_thactivecount[4], wq->wq_thactivecount[5],
1903 wq->wq_thactivecount[6], wq->wq_thactivecount[7], 0);
1912 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_END, (int)thread, (int)item, wq->wq_flags, 1, 0);
1919 wq->wq_threads_scheduled--;
1927 panic("wq thread with no threadlist ");
1929 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1933 TAILQ_INSERT_HEAD(&wq->wq_thidlelist[tl->th_affinity_tag], tl, th_entry);
1940 workqueue_interval_timer_start(wq->wq_timer_call, wq_timer_interval_msecs);
1942 KERNEL_DEBUG1(0xefffd018 | DBG_FUNC_START, (int)current_thread(), wq->wq_threads_scheduled, 0, 0, (int)th_to_park);
1952 workqueue_interval_timer_start(wq->wq_timer_call, wq_timer_interval_msecs);
1954 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_END, (int)thread, 0, wq->wq_flags, 2, 0);