Lines Matching refs:wq

127 static boolean_t workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t th, boolean_t force_oc,
130 static boolean_t workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, int priority);
140 static boolean_t workqueue_addnewthread(struct workqueue *wq, boolean_t oc_thread);
552 workqueue_interval_timer_start(struct workqueue *wq)
556 if (wq->wq_timer_interval == 0)
557 wq->wq_timer_interval = wq_stalled_window_usecs;
559 wq->wq_timer_interval = wq->wq_timer_interval * 2;
561 if (wq->wq_timer_interval > wq_max_timer_interval_usecs)
562 wq->wq_timer_interval = wq_max_timer_interval_usecs;
564 clock_interval_to_deadline(wq->wq_timer_interval, 1000, &deadline);
566 thread_call_enter_delayed(wq->wq_atimer_call, deadline);
568 KERNEL_DEBUG(0xefffd110, wq, wq->wq_reqcount, wq->wq_flags, wq->wq_timer_interval, 0);
614 #define WQ_TIMER_NEEDED(wq, start_timer) do { \
615 int oldflags = wq->wq_flags; \
618 if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_RUNNING, (UInt32 *)&wq->wq_flags)) \
626 workqueue_add_timer(struct workqueue *wq, __unused int param1)
634 KERNEL_DEBUG(0xefffd108 | DBG_FUNC_START, wq, wq->wq_flags, wq->wq_nthreads, wq->wq_thidlecount, 0);
636 p = wq->wq_proc;
658 while (wq->wq_lflags & WQL_ATIMER_BUSY) {
659 wq->wq_lflags |= WQL_ATIMER_WAITING;
661 assert_wait((caddr_t)wq, (THREAD_UNINT));
668 wq->wq_lflags |= WQL_ATIMER_BUSY;
675 while ( !(OSCompareAndSwap(wq->wq_flags, (wq->wq_flags & ~WQ_ATIMER_RUNNING), (UInt32 *)&wq->wq_flags)));
681 if ( !(wq->wq_flags & WQ_EXITING)) {
688 if (wq->wq_reqcount) {
695 if (wq->wq_requests[priority])
703 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
707 if (wq->wq_thidlecount == 0)
716 if (wq->wq_thactive_count[i][affinity_tag]) {
720 if (wq->wq_thscheduled_count[i][affinity_tag]) {
721 if (wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i][affinity_tag])) {
729 retval = workqueue_addnewthread(wq, FALSE);
733 if (wq->wq_reqcount) {
738 while (wq->wq_thidlecount && !(wq->wq_flags & WQ_EXITING)) {
743 retval = workqueue_run_nextreq(p, wq, THREAD_NULL, FALSE, FALSE, 0, 0);
749 if ( !(wq->wq_flags & WQ_EXITING) && wq->wq_reqcount) {
751 if (wq->wq_thidlecount == 0 && retval == TRUE && add_thread == TRUE)
754 if (wq->wq_thidlecount == 0 || busycount)
755 WQ_TIMER_NEEDED(wq, start_timer);
757 KERNEL_DEBUG(0xefffd108 | DBG_FUNC_NONE, wq, wq->wq_reqcount, wq->wq_thidlecount, busycount, 0);
762 if ( !(wq->wq_flags & WQ_ATIMER_RUNNING))
763 wq->wq_timer_interval = 0;
765 wq->wq_lflags &= ~WQL_ATIMER_BUSY;
767 if ((wq->wq_flags & WQ_EXITING) || (wq->wq_lflags & WQL_ATIMER_WAITING)) {
772 wq->wq_lflags &= ~WQL_ATIMER_WAITING;
773 wakeup(wq);
775 KERNEL_DEBUG(0xefffd108 | DBG_FUNC_END, wq, start_timer, wq->wq_nthreads, wq->wq_thidlecount, 0);
780 workqueue_interval_timer_start(wq);
787 struct workqueue *wq;
792 if ((wq = p->p_wqptr) == NULL || wq->wq_reqcount == 0)
797 if (wq->wq_reqcount) {
803 if (wq->wq_thread_yielded_count++ == 0)
804 wq->wq_thread_yielded_timestamp = mach_absolute_time();
806 if (wq->wq_thread_yielded_count < wq_yielded_threshold) {
810 KERNEL_DEBUG(0xefffd138 | DBG_FUNC_START, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 0, 0);
812 wq->wq_thread_yielded_count = 0;
815 elapsed = curtime - wq->wq_thread_yielded_timestamp;
820 if (wq->wq_thidlecount == 0) {
821 workqueue_addnewthread(wq, TRUE);
830 if (wq->wq_reqcount == 0) {
835 if (wq->wq_thidlecount) {
848 if (wq->wq_requests[priority])
853 wq->wq_reqcount--;
854 wq->wq_requests[priority]--;
856 if (wq->wq_ocrequests[priority]) {
857 wq->wq_ocrequests[priority]--;
862 (void)workqueue_run_nextreq(p, wq, THREAD_NULL, force_oc, overcommit, priority, affinity);
867 KERNEL_DEBUG(0xefffd138 | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 1, 0);
872 KERNEL_DEBUG(0xefffd138 | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 2, 0);
884 struct workqueue *wq;
888 wq = tl->th_workq;
896 old_activecount = OSAddAtomic(-1, &wq->wq_thactive_count[tl->th_priority][tl->th_affinity_tag]);
907 lastblocked_ptr = (UInt64 *)&wq->wq_lastblocked_ts[tl->th_priority][tl->th_affinity_tag];
918 if (wq->wq_reqcount)
919 WQ_TIMER_NEEDED(wq, start_timer);
922 workqueue_interval_timer_start(wq);
924 KERNEL_DEBUG1(0xefffd020 | DBG_FUNC_START, wq, old_activecount, tl->th_priority, tl->th_affinity_tag, thread_tid(thread));
937 OSAddAtomic(1, &wq->wq_thactive_count[tl->th_priority][tl->th_affinity_tag]);
939 KERNEL_DEBUG1(0xefffd020 | DBG_FUNC_END, wq, wq->wq_threads_scheduled, tl->th_priority, tl->th_affinity_tag, thread_tid(thread));
949 struct workqueue *wq;
956 wq = tl->th_workq;
958 TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
961 wq->wq_nthreads--;
962 wq->wq_thidlecount--;
979 workqueue_unlock(wq->wq_proc);
991 (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, tl->th_allocsize);
993 (void)mach_port_deallocate(get_task_ipcspace(wq->wq_task), tl->th_thport);
995 KERNEL_DEBUG1(0xefffd014 | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
998 KERNEL_DEBUG1(0xefffd018 | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
1015 workqueue_addnewthread(struct workqueue *wq, boolean_t oc_thread)
1025 if ((wq->wq_flags & WQ_EXITING) == WQ_EXITING)
1028 if (wq->wq_nthreads >= wq_max_threads || wq->wq_nthreads >= (CONFIG_THREAD_MAX - 20)) {
1029 wq->wq_lflags |= WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
1032 wq->wq_lflags &= ~WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
1034 if (oc_thread == FALSE && wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
1041 wq->wq_lflags |= WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
1044 if (wq->wq_constrained_threads_scheduled < wq_max_constrained_threads)
1045 wq->wq_lflags &= ~WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
1047 wq->wq_nthreads++;
1049 p = wq->wq_proc;
1052 kret = thread_create_workq(wq->wq_task, (thread_continue_t)wq_unsuspend_continue, &th);
1069 kret = mach_vm_map(wq->wq_map, &stackaddr,
1077 kret = mach_vm_allocate(wq->wq_map,
1086 kret = mach_vm_protect(wq->wq_map, stackaddr, PTH_DEFAULT_GUARDSIZE, FALSE, VM_PROT_NONE);
1089 (void) mach_vm_deallocate(wq->wq_map, stackaddr, tl->th_allocsize);
1101 tl->th_thport = ipc_port_copyout_send(sright, get_task_ipcspace(wq->wq_task));
1108 tl->th_workq = wq;
1119 TAILQ_INSERT_TAIL(&wq->wq_thidlelist, tl, th_entry);
1121 wq->wq_thidlecount++;
1123 KERNEL_DEBUG1(0xefffd014 | DBG_FUNC_START, wq, wq->wq_nthreads, 0, thread_tid(current_thread()), thread_tid(tl->th_thread));
1129 wq->wq_nthreads--;
1138 struct workqueue * wq;
1196 wq = (struct workqueue *)ptr;
1197 wq->wq_flags = WQ_LIST_INITED;
1198 wq->wq_proc = p;
1199 wq->wq_affinity_max = num_cpus;
1200 wq->wq_task = current_task();
1201 wq->wq_map = current_map();
1204 wq->wq_reqconc[i] = wq->wq_affinity_max;
1209 wq->wq_thscheduled_count[i] = (uint16_t *)nptr;
1216 wq->wq_thactive_count[i] = (uint32_t *)nptr;
1229 wq->wq_lastblocked_ts[i] = (uint64_t *)nptr;
1232 TAILQ_INIT(&wq->wq_thrunlist);
1233 TAILQ_INIT(&wq->wq_thidlelist);
1235 wq->wq_atimer_call = thread_call_allocate((thread_call_func_t)workqueue_add_timer, (thread_call_param_t)wq);
1239 p->p_wqptr = (void *)wq;
1257 struct workqueue *wq;
1287 if ((wq = (struct workqueue *)p->p_wqptr) == NULL) {
1294 wq->wq_reqcount += reqcount;
1295 wq->wq_requests[priority] += reqcount;
1297 KERNEL_DEBUG(0xefffd008 | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1299 while (wq->wq_reqcount) {
1300 if (workqueue_run_one(p, wq, overcommit, priority) == FALSE)
1304 KERNEL_DEBUG(0xefffd13c | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1307 if (workqueue_run_one(p, wq, overcommit, priority) == FALSE)
1319 wq->wq_reqcount += reqcount;
1320 wq->wq_requests[priority] += reqcount;
1321 wq->wq_ocrequests[priority] += reqcount;
1323 KERNEL_DEBUG(0xefffd140 | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1343 if ((wq = (struct workqueue *)p->p_wqptr) == NULL || (uth->uu_threadlist == NULL)) {
1349 KERNEL_DEBUG(0xefffd004 | DBG_FUNC_END, wq, 0, 0, 0, 0);
1351 (void)workqueue_run_nextreq(p, wq, th, FALSE, FALSE, 0, -1);
1377 struct workqueue * wq;
1379 wq = p->p_wqptr;
1380 if (wq != NULL) {
1398 while ( !(OSCompareAndSwap(wq->wq_flags, (wq->wq_flags | WQ_EXITING), (UInt32 *)&wq->wq_flags)));
1400 if (wq->wq_flags & WQ_ATIMER_RUNNING) {
1401 if (thread_call_cancel(wq->wq_atimer_call) == TRUE)
1402 wq->wq_flags &= ~WQ_ATIMER_RUNNING;
1404 while ((wq->wq_flags & WQ_ATIMER_RUNNING) || (wq->wq_lflags & WQL_ATIMER_BUSY)) {
1406 assert_wait((caddr_t)wq, (THREAD_UNINT));
1431 struct workqueue * wq;
1436 wq = (struct workqueue *)p->p_wqptr;
1437 if (wq != NULL) {
1449 TAILQ_FOREACH_SAFE(tl, &wq->wq_thrunlist, th_entry, tlist) {
1457 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1466 TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlelist, th_entry, tlist) {
1469 thread_call_free(wq->wq_atimer_call);
1471 kfree(wq, wq_size);
1493 workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, int priority)
1497 if (wq->wq_thidlecount == 0) {
1499 if (wq->wq_constrained_threads_scheduled < wq->wq_affinity_max)
1500 workqueue_addnewthread(wq, overcommit);
1502 workqueue_addnewthread(wq, overcommit);
1504 if (wq->wq_thidlecount == 0)
1508 ran_one = workqueue_run_nextreq(p, wq, THREAD_NULL, FALSE, overcommit, priority, -1);
1526 workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t thread,
1546 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_START, wq, thread, wq->wq_thidlecount, wq->wq_reqcount, 0);
1552 panic("wq thread with no threadlist ");
1574 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
1582 scheduled_count += wq->wq_thscheduled_count[i][affinity_tag];
1583 active_count += wq->wq_thactive_count[i][affinity_tag];
1602 if (wq->wq_reqcount) {
1604 if (wq->wq_requests[priority])
1609 if (wq->wq_ocrequests[priority] && (thread != THREAD_NULL || wq->wq_thidlecount)) {
1615 wq->wq_reqcount--;
1616 wq->wq_requests[priority]--;
1617 wq->wq_ocrequests[priority]--;
1628 if (wq->wq_reqcount == 0 || wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
1653 if (affinity_tag < wq->wq_reqconc[priority]) {
1667 tcount = wq->wq_thactive_count[i][affinity_tag];
1670 if (tcount == 0 && wq->wq_thscheduled_count[i][affinity_tag]) {
1671 if (wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i][affinity_tag]))
1685 if (wq->wq_reqconc[priority] == 1) {
1717 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
1728 if (wq->wq_thactive_count[i][affinity_tag])
1731 if (wq->wq_thscheduled_count[i][affinity_tag] &&
1732 wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i][affinity_tag])) {
1743 if (affinity_tag >= wq->wq_reqconc[priority]) {
1758 WQ_TIMER_NEEDED(wq, start_timer);
1760 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_NONE, wq, busycount, start_timer, 0, 0);
1783 if (wq->wq_thidlecount == 0) {
1789 WQ_TIMER_NEEDED(wq, start_timer);
1791 KERNEL_DEBUG(0xefffd118, wq, wq->wq_nthreads, start_timer, 0, 0);
1802 TAILQ_FOREACH(ttl, &wq->wq_thidlelist, th_entry) {
1805 TAILQ_REMOVE(&wq->wq_thidlelist, ttl, th_entry);
1812 tl = TAILQ_FIRST(&wq->wq_thidlelist);
1813 TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
1815 wq->wq_thidlecount--;
1817 TAILQ_INSERT_TAIL(&wq->wq_thrunlist, tl, th_entry);
1829 wq->wq_threads_scheduled++;
1830 wq->wq_thscheduled_count[priority][affinity_tag]++;
1831 OSAddAtomic(1, &wq->wq_thactive_count[priority][affinity_tag]);
1838 wq->wq_reqcount--;
1839 wq->wq_requests[priority]--;
1842 wq->wq_constrained_threads_scheduled++;
1847 wq->wq_constrained_threads_scheduled--;
1862 OSAddAtomic(-1, &wq->wq_thactive_count[orig_priority][orig_affinity_tag]);
1863 OSAddAtomic(1, &wq->wq_thactive_count[priority][affinity_tag]);
1865 wq->wq_thscheduled_count[orig_priority][orig_affinity_tag]--;
1866 wq->wq_thscheduled_count[priority][affinity_tag]++;
1868 wq->wq_thread_yielded_count = 0;
1883 KERNEL_DEBUG(0xefffd114 | DBG_FUNC_START, wq, orig_affinity_tag, 0, 0, 0);
1887 KERNEL_DEBUG(0xefffd114 | DBG_FUNC_END, wq, affinity_tag, 0, 0, 0);
1902 KERNEL_DEBUG(0xefffd120 | DBG_FUNC_START, wq, orig_priority, tl->th_policy, 0, 0);
1926 KERNEL_DEBUG(0xefffd120 | DBG_FUNC_END, wq, priority, policy, 0, 0);
1938 for (i = 0; i < wq->wq_affinity_max; i++) {
1939 if (wq->wq_thactive_count[n][i]) {
1941 KERNEL_DEBUG(code, lpri, laffinity, wq->wq_thactive_count[lpri][laffinity], first, 0);
1953 KERNEL_DEBUG(0xefffd02c | DBG_FUNC_END, lpri, laffinity, wq->wq_thactive_count[lpri][laffinity], first, 0);
1961 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_END, wq, thread_tid(th_to_run), overcommit, 1, 0);
1974 workqueue_interval_timer_start(wq);
1976 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_END, wq, thread_tid(thread), 0, 2, 0);
1985 TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
1989 TAILQ_INSERT_HEAD(&wq->wq_thidlelist, tl, th_entry);
1993 OSAddAtomic(-1, &wq->wq_thactive_count[tl->th_priority][tl->th_affinity_tag]);
1994 wq->wq_thscheduled_count[tl->th_priority][tl->th_affinity_tag]--;
1995 wq->wq_threads_scheduled--;
1998 wq->wq_constrained_threads_scheduled--;
1999 wq->wq_lflags &= ~WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
2002 if (wq->wq_thidlecount < 100)
2003 us_to_wait = wq_reduce_pool_window_usecs - (wq->wq_thidlecount * (wq_reduce_pool_window_usecs / 100));
2007 wq->wq_thidlecount++;
2008 wq->wq_lflags &= ~WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
2015 workqueue_interval_timer_start(wq);
2017 KERNEL_DEBUG1(0xefffd018 | DBG_FUNC_START, wq, wq->wq_threads_scheduled, wq->wq_thidlecount, us_to_wait, thread_tid(th_to_park));
2018 KERNEL_DEBUG(0xefffd000 | DBG_FUNC_END, wq, thread_tid(thread), 0, 3, 0);
2296 struct workqueue * wq;
2302 if ((wq = p->p_wqptr) == NULL) {
2309 for (affinity = 0; affinity < wq->wq_affinity_max; affinity++)
2310 activecount += wq->wq_thactive_count[pri][affinity];
2312 pwqinfo->pwq_nthreads = wq->wq_nthreads;
2314 pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
2317 if (wq->wq_lflags & WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT)
2320 if (wq->wq_lflags & WQL_EXCEEDED_TOTAL_THREAD_LIMIT)