Lines Matching refs:priority

130 static boolean_t workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, int priority);
132 static void wq_runreq(proc_t p, boolean_t overcommit, uint32_t priority, thread_t th, struct threadlist *tl,
135 static int setup_wqthread(proc_t p, thread_t th, boolean_t overcommit, uint32_t priority, int reuse_thread, struct threadlist *tl);
689 uint32_t priority;
694 for (priority = 0; priority < WORKQUEUE_NUMPRIOS; priority++) {
695 if (wq->wq_requests[priority])
698 assert(priority < WORKQUEUE_NUMPRIOS);
703 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
712 * i.e. no active threads at this priority level or higher
713 * and has not been active recently at this priority level or higher
715 for (i = 0; i <= priority; i++) {
836 uint32_t priority;
847 for (priority = 0; priority < WORKQUEUE_NUMPRIOS; priority++) {
848 if (wq->wq_requests[priority])
851 assert(priority < WORKQUEUE_NUMPRIOS);
854 wq->wq_requests[priority]--;
856 if (wq->wq_ocrequests[priority]) {
857 wq->wq_ocrequests[priority]--;
862 (void)workqueue_run_nextreq(p, wq, THREAD_NULL, force_oc, overcommit, priority, affinity);
1274 int priority = uap->prio;
1277 if (priority & WORKQUEUE_OVERCOMMIT) {
1278 priority &= ~WORKQUEUE_OVERCOMMIT;
1281 if ((reqcount <= 0) || (priority < 0) || (priority >= WORKQUEUE_NUMPRIOS)) {
1295 wq->wq_requests[priority] += reqcount;
1297 KERNEL_DEBUG(0xefffd008 | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1300 if (workqueue_run_one(p, wq, overcommit, priority) == FALSE)
1304 KERNEL_DEBUG(0xefffd13c | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1307 if (workqueue_run_one(p, wq, overcommit, priority) == FALSE)
1320 wq->wq_requests[priority] += reqcount;
1321 wq->wq_ocrequests[priority] += reqcount;
1323 KERNEL_DEBUG(0xefffd140 | DBG_FUNC_NONE, wq, priority, wq->wq_requests[priority], reqcount, 0);
1493 workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, int priority)
1508 ran_one = workqueue_run_nextreq(p, wq, THREAD_NULL, FALSE, overcommit, priority, -1);
1533 uint32_t priority, orig_priority;
1558 * since we have to independently update the priority
1571 priority = oc_prio;
1574 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
1581 for (i = 0; i <= priority; i++) {
1603 for (priority = 0; priority < WORKQUEUE_NUMPRIOS; priority++) {
1604 if (wq->wq_requests[priority])
1607 assert(priority < WORKQUEUE_NUMPRIOS);
1609 if (wq->wq_ocrequests[priority] && (thread != THREAD_NULL || wq->wq_thidlecount)) {
1612 * they have priority over normal requests
1613 * within a given priority level
1616 wq->wq_requests[priority]--;
1617 wq->wq_ocrequests[priority]--;
1619 oc_prio = priority;
1650 * specified concurrency for the priority level
1653 if (affinity_tag < wq->wq_reqconc[priority]) {
1662 * add up the active counts of all the priority levels
1665 for (i = 0; i <= priority; i++) {
1678 * affinity group at this priority level and higher,
1685 if (wq->wq_reqconc[priority] == 1) {
1688 * priority level or higher and since we only have
1710 * or the concurrency level has been cut back for this priority...
1717 for (affinity_tag = 0; affinity_tag < wq->wq_reqconc[priority]; affinity_tag++) {
1722 * i.e. no active threads at this priority level or higher
1725 for (i = 0; i <= priority; i++) {
1743 if (affinity_tag >= wq->wq_reqconc[priority]) {
1830 wq->wq_thscheduled_count[priority][affinity_tag]++;
1831 OSAddAtomic(1, &wq->wq_thactive_count[priority][affinity_tag]);
1839 wq->wq_requests[priority]--;
1854 tl->th_priority = priority;
1857 if (adjust_counters == TRUE && (orig_priority != priority || orig_affinity_tag != affinity_tag)) {
1860 * thread's new disposition w/r to affinity and priority
1863 OSAddAtomic(1, &wq->wq_thactive_count[priority][affinity_tag]);
1866 wq->wq_thscheduled_count[priority][affinity_tag]++;
1889 if (orig_priority != priority) {
1897 retval = proc_setthread_saved_importance(th_to_run, workqueue_importance[priority]);
1900 policy = workqueue_policy[priority];
1904 if ((orig_priority == WORKQUEUE_BG_PRIOQUEUE) || (priority == WORKQUEUE_BG_PRIOQUEUE)) {
1910 if (priority == WORKQUEUE_BG_PRIOQUEUE) {
1922 precedinfo.importance = workqueue_importance[priority];
1926 KERNEL_DEBUG(0xefffd120 | DBG_FUNC_END, wq, priority, policy, 0, 0);
1959 wq_runreq(p, overcommit, priority, th_to_run, tl, reuse_thread, wake_thread, (thread == th_to_run));
2174 wq_runreq(proc_t p, boolean_t overcommit, uint32_t priority, thread_t th, struct threadlist *tl,
2182 ret = setup_wqthread(p, th, overcommit, priority, reuse_thread, tl);
2227 setup_wqthread(proc_t p, thread_t th, boolean_t overcommit, uint32_t priority, int reuse_thread, struct threadlist *tl)
2234 flags |= priority;