Deleted Added
full compact
sched_ule.c (121872) sched_ule.c (121896)
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121872 2003-11-02 04:25:59Z jeff $");
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121896 2003-11-02 10:56:48Z jeff $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>

--- 167 unchanged lines hidden (view full) ---

204
205#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */
206
207struct kseq {
208 struct runq ksq_idle; /* Queue of IDLE threads. */
209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
210 struct runq *ksq_next; /* Next timeshare queue. */
211 struct runq *ksq_curr; /* Current queue. */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>

--- 167 unchanged lines hidden (view full) ---

204
205#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */
206
207struct kseq {
208 struct runq ksq_idle; /* Queue of IDLE threads. */
209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
210 struct runq *ksq_next; /* Next timeshare queue. */
211 struct runq *ksq_curr; /* Current queue. */
212 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */
212 int ksq_load_timeshare; /* Load for timeshare. */
213 int ksq_load; /* Aggregate load. */
214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215 short ksq_nicemin; /* Least nice. */
216#ifdef SMP
213 int ksq_load; /* Aggregate load. */
214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215 short ksq_nicemin; /* Least nice. */
216#ifdef SMP
217 int ksq_load_transferable; /* kses that may be migrated. */
217 unsigned int ksq_rslices; /* Slices on run queue */
218 int ksq_cpus; /* Count of CPUs in this kseq. */
219 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */
220#endif
221};
222
223/*
224 * One kse queue per processor.

--- 32 unchanged lines hidden (view full) ---

257static struct kse *runq_steal(struct runq *rq);
258static struct kseq *kseq_load_highest(void);
259static void kseq_balance(void *arg);
260static void kseq_move(struct kseq *from, int cpu);
261static int kseq_find(void);
262static void kseq_notify(struct kse *ke, int cpu);
263static void kseq_assign(struct kseq *);
264static struct kse *kseq_steal(struct kseq *kseq);
218 unsigned int ksq_rslices; /* Slices on run queue */
219 int ksq_cpus; /* Count of CPUs in this kseq. */
220 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */
221#endif
222};
223
224/*
225 * One kse queue per processor.

--- 32 unchanged lines hidden (view full) ---

258static struct kse *runq_steal(struct runq *rq);
259static struct kseq *kseq_load_highest(void);
260static void kseq_balance(void *arg);
261static void kseq_move(struct kseq *from, int cpu);
262static int kseq_find(void);
263static void kseq_notify(struct kse *ke, int cpu);
264static void kseq_assign(struct kseq *);
265static struct kse *kseq_steal(struct kseq *kseq);
266#define KSE_CAN_MIGRATE(ke, class) ((class) != PRI_ITHD)
265#endif
266
267void
268kseq_print(int cpu)
269{
270 struct kseq *kseq;
271 int i;
272
273 kseq = KSEQ_CPU(cpu);
274
275 printf("kseq:\n");
276 printf("\tload: %d\n", kseq->ksq_load);
267#endif
268
269void
270kseq_print(int cpu)
271{
272 struct kseq *kseq;
273 int i;
274
275 kseq = KSEQ_CPU(cpu);
276
277 printf("kseq:\n");
278 printf("\tload: %d\n", kseq->ksq_load);
277 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]);
278 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]);
279 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
280 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]);
279 printf("\tload REALTIME: %d\n", kseq->ksq_load_timeshare);
280#ifdef SMP
281 printf("\tload transferable: %d\n", kseq->ksq_load_transferable);
282#endif
281 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
282 printf("\tnice counts:\n");
283 for (i = 0; i < SCHED_PRI_NRESV; i++)
284 if (kseq->ksq_nice[i])
285 printf("\t\t%d = %d\n",
286 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
287}
288
289static void
290kseq_add(struct kseq *kseq, struct kse *ke)
291{
283 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
284 printf("\tnice counts:\n");
285 for (i = 0; i < SCHED_PRI_NRESV; i++)
286 if (kseq->ksq_nice[i])
287 printf("\t\t%d = %d\n",
288 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
289}
290
291static void
292kseq_add(struct kseq *kseq, struct kse *ke)
293{
294 int class;
292 mtx_assert(&sched_lock, MA_OWNED);
295 mtx_assert(&sched_lock, MA_OWNED);
293 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
296 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
297 if (class == PRI_TIMESHARE)
298 kseq->ksq_load_timeshare++;
299#ifdef SMP
300 if (KSE_CAN_MIGRATE(ke, class))
301 kseq->ksq_load_transferable++;
302 kseq->ksq_rslices += ke->ke_slice;
303#endif
294 kseq->ksq_load++;
295 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
296 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
297 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
298 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
299 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
300 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
304 kseq->ksq_load++;
305 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
306 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
307 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
308 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
309 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
310 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
301#ifdef SMP
302 kseq->ksq_rslices += ke->ke_slice;
303#endif
304}
305
306static void
307kseq_rem(struct kseq *kseq, struct kse *ke)
308{
311}
312
313static void
314kseq_rem(struct kseq *kseq, struct kse *ke)
315{
316 int class;
309 mtx_assert(&sched_lock, MA_OWNED);
317 mtx_assert(&sched_lock, MA_OWNED);
310 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
318 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
319 if (class == PRI_TIMESHARE)
320 kseq->ksq_load_timeshare--;
321#ifdef SMP
322 if (KSE_CAN_MIGRATE(ke, class))
323 kseq->ksq_load_transferable--;
324 kseq->ksq_rslices -= ke->ke_slice;
325#endif
311 kseq->ksq_load--;
312 ke->ke_runq = NULL;
313 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
314 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
326 kseq->ksq_load--;
327 ke->ke_runq = NULL;
328 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
329 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
315#ifdef SMP
316 kseq->ksq_rslices -= ke->ke_slice;
317#endif
318}
319
320static void
321kseq_nice_add(struct kseq *kseq, int nice)
322{
323 mtx_assert(&sched_lock, MA_OWNED);
324 /* Normalize to zero. */
325 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
330}
331
332static void
333kseq_nice_add(struct kseq *kseq, int nice)
334{
335 mtx_assert(&sched_lock, MA_OWNED);
336 /* Normalize to zero. */
337 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
326 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
338 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
327 kseq->ksq_nicemin = nice;
328}
329
330static void
331kseq_nice_rem(struct kseq *kseq, int nice)
332{
333 int n;
334

--- 5 unchanged lines hidden (view full) ---

340
341 /*
342 * If this wasn't the smallest nice value or there are more in
343 * this bucket we can just return. Otherwise we have to recalculate
344 * the smallest nice.
345 */
346 if (nice != kseq->ksq_nicemin ||
347 kseq->ksq_nice[n] != 0 ||
339 kseq->ksq_nicemin = nice;
340}
341
342static void
343kseq_nice_rem(struct kseq *kseq, int nice)
344{
345 int n;
346

--- 5 unchanged lines hidden (view full) ---

352
353 /*
354 * If this wasn't the smallest nice value or there are more in
355 * this bucket we can just return. Otherwise we have to recalculate
356 * the smallest nice.
357 */
358 if (nice != kseq->ksq_nicemin ||
359 kseq->ksq_nice[n] != 0 ||
348 kseq->ksq_loads[PRI_TIMESHARE] == 0)
360 kseq->ksq_load_timeshare == 0)
349 return;
350
351 for (; n < SCHED_PRI_NRESV; n++)
352 if (kseq->ksq_nice[n]) {
353 kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
354 return;
355 }
356}

--- 47 unchanged lines hidden (view full) ---

404 if (low_load == -1 || kseq->ksq_load < low_load) {
405 low_load = kseq->ksq_load;
406 low_cpu = i;
407 }
408 }
409
410 kseq = KSEQ_CPU(high_cpu);
411
361 return;
362
363 for (; n < SCHED_PRI_NRESV; n++)
364 if (kseq->ksq_nice[n]) {
365 kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
366 return;
367 }
368}

--- 47 unchanged lines hidden (view full) ---

416 if (low_load == -1 || kseq->ksq_load < low_load) {
417 low_load = kseq->ksq_load;
418 low_cpu = i;
419 }
420 }
421
422 kseq = KSEQ_CPU(high_cpu);
423
412 high_load = kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
413 kseq->ksq_loads[PRI_REALTIME];
424 high_load = kseq->ksq_load_transferable;
414 /*
415 * Nothing to do.
416 */
417 if (high_load < kseq->ksq_cpus + 1)
418 goto out;
419
420 high_load -= kseq->ksq_cpus;
421

--- 33 unchanged lines hidden (view full) ---

455 kseq = KSEQ_CPU(i);
456 if (kseq->ksq_load > load) {
457 load = kseq->ksq_load;
458 cpu = i;
459 }
460 }
461 kseq = KSEQ_CPU(cpu);
462
425 /*
426 * Nothing to do.
427 */
428 if (high_load < kseq->ksq_cpus + 1)
429 goto out;
430
431 high_load -= kseq->ksq_cpus;
432

--- 33 unchanged lines hidden (view full) ---

466 kseq = KSEQ_CPU(i);
467 if (kseq->ksq_load > load) {
468 load = kseq->ksq_load;
469 cpu = i;
470 }
471 }
472 kseq = KSEQ_CPU(cpu);
473
463 if ((kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
464 kseq->ksq_loads[PRI_REALTIME]) > kseq->ksq_cpus)
474 if (kseq->ksq_load_transferable > kseq->ksq_cpus)
465 return (kseq);
466
467 return (NULL);
468}
469
470static void
471kseq_move(struct kseq *from, int cpu)
472{

--- 94 unchanged lines hidden (view full) ---

567 for (word = 0; word < RQB_LEN; word++) {
568 if (rqb->rqb_bits[word] == 0)
569 continue;
570 for (bit = 0; bit < RQB_BPW; bit++) {
571 if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
572 continue;
573 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
574 TAILQ_FOREACH(ke, rqh, ke_procq) {
475 return (kseq);
476
477 return (NULL);
478}
479
480static void
481kseq_move(struct kseq *from, int cpu)
482{

--- 94 unchanged lines hidden (view full) ---

577 for (word = 0; word < RQB_LEN; word++) {
578 if (rqb->rqb_bits[word] == 0)
579 continue;
580 for (bit = 0; bit < RQB_BPW; bit++) {
581 if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
582 continue;
583 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
584 TAILQ_FOREACH(ke, rqh, ke_procq) {
575 if (PRI_BASE(ke->ke_ksegrp->kg_pri_class) !=
576 PRI_ITHD)
585 if (KSE_CAN_MIGRATE(ke,
586 PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
577 return (ke);
578 }
579 }
580 }
581 return (NULL);
582}
583
584static struct kse *

--- 54 unchanged lines hidden (view full) ---

639}
640
641static void
642kseq_setup(struct kseq *kseq)
643{
644 runq_init(&kseq->ksq_timeshare[0]);
645 runq_init(&kseq->ksq_timeshare[1]);
646 runq_init(&kseq->ksq_idle);
587 return (ke);
588 }
589 }
590 }
591 return (NULL);
592}
593
594static struct kse *

--- 54 unchanged lines hidden (view full) ---

649}
650
651static void
652kseq_setup(struct kseq *kseq)
653{
654 runq_init(&kseq->ksq_timeshare[0]);
655 runq_init(&kseq->ksq_timeshare[1]);
656 runq_init(&kseq->ksq_idle);
647
648 kseq->ksq_curr = &kseq->ksq_timeshare[0];
649 kseq->ksq_next = &kseq->ksq_timeshare[1];
657 kseq->ksq_curr = &kseq->ksq_timeshare[0];
658 kseq->ksq_next = &kseq->ksq_timeshare[1];
650
651 kseq->ksq_loads[PRI_ITHD] = 0;
652 kseq->ksq_loads[PRI_REALTIME] = 0;
653 kseq->ksq_loads[PRI_TIMESHARE] = 0;
654 kseq->ksq_loads[PRI_IDLE] = 0;
655 kseq->ksq_load = 0;
659 kseq->ksq_load = 0;
660 kseq->ksq_load_timeshare = 0;
656#ifdef SMP
661#ifdef SMP
662 kseq->ksq_load_transferable = 0;
657 kseq->ksq_rslices = 0;
658 kseq->ksq_assigned = NULL;
659#endif
660}
661
662static void
663sched_setup(void *dummy)
664{

--- 103 unchanged lines hidden (view full) ---

768 * run queue. The exception to this is nice 0 ksegs when
769 * a nice -20 is running. They are always granted a minimum
770 * slice.
771 */
772 if (!SCHED_INTERACTIVE(kg)) {
773 int nice;
774
775 nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
663 kseq->ksq_rslices = 0;
664 kseq->ksq_assigned = NULL;
665#endif
666}
667
668static void
669sched_setup(void *dummy)
670{

--- 103 unchanged lines hidden (view full) ---

774 * run queue. The exception to this is nice 0 ksegs when
775 * a nice -20 is running. They are always granted a minimum
776 * slice.
777 */
778 if (!SCHED_INTERACTIVE(kg)) {
779 int nice;
780
781 nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
776 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
782 if (kseq->ksq_load_timeshare == 0 ||
777 kg->kg_nice < kseq->ksq_nicemin)
778 ke->ke_slice = SCHED_SLICE_MAX;
779 else if (nice <= SCHED_SLICE_NTHRESH)
780 ke->ke_slice = SCHED_SLICE_NICE(nice);
781 else if (kg->kg_nice == 0)
782 ke->ke_slice = SCHED_SLICE_MIN;
783 else
784 ke->ke_slice = 0;
785 } else
786 ke->ke_slice = SCHED_SLICE_MIN;
787
788 CTR6(KTR_ULE,
789 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
790 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
783 kg->kg_nice < kseq->ksq_nicemin)
784 ke->ke_slice = SCHED_SLICE_MAX;
785 else if (nice <= SCHED_SLICE_NTHRESH)
786 ke->ke_slice = SCHED_SLICE_NICE(nice);
787 else if (kg->kg_nice == 0)
788 ke->ke_slice = SCHED_SLICE_MIN;
789 else
790 ke->ke_slice = 0;
791 } else
792 ke->ke_slice = SCHED_SLICE_MIN;
793
794 CTR6(KTR_ULE,
795 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
796 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
791 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
797 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
792
793 return;
794}
795
796/*
797 * This routine enforces a maximum limit on the amount of scheduling history
798 * kept. It is called after either the slptime or runtime is adjusted.
799 * This routine will not operate correctly when slp or run times have been

--- 320 unchanged lines hidden (view full) ---

1120{
1121}
1122
1123void
1124sched_class(struct ksegrp *kg, int class)
1125{
1126 struct kseq *kseq;
1127 struct kse *ke;
798
799 return;
800}
801
802/*
803 * This routine enforces a maximum limit on the amount of scheduling history
804 * kept. It is called after either the slptime or runtime is adjusted.
805 * This routine will not operate correctly when slp or run times have been

--- 320 unchanged lines hidden (view full) ---

1126{
1127}
1128
1129void
1130sched_class(struct ksegrp *kg, int class)
1131{
1132 struct kseq *kseq;
1133 struct kse *ke;
1134 int nclass;
1135 int oclass;
1128
1129 mtx_assert(&sched_lock, MA_OWNED);
1130 if (kg->kg_pri_class == class)
1131 return;
1132
1136
1137 mtx_assert(&sched_lock, MA_OWNED);
1138 if (kg->kg_pri_class == class)
1139 return;
1140
1141 nclass = PRI_BASE(class);
1142 oclass = PRI_BASE(kg->kg_pri_class);
1133 FOREACH_KSE_IN_GROUP(kg, ke) {
1134 if (ke->ke_state != KES_ONRUNQ &&
1135 ke->ke_state != KES_THREAD)
1136 continue;
1137 kseq = KSEQ_CPU(ke->ke_cpu);
1138
1143 FOREACH_KSE_IN_GROUP(kg, ke) {
1144 if (ke->ke_state != KES_ONRUNQ &&
1145 ke->ke_state != KES_THREAD)
1146 continue;
1147 kseq = KSEQ_CPU(ke->ke_cpu);
1148
1139 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
1140 kseq->ksq_loads[PRI_BASE(class)]++;
1149#ifdef SMP
1150 if (KSE_CAN_MIGRATE(ke, oclass))
1151 kseq->ksq_load_transferable--;
1152 if (KSE_CAN_MIGRATE(ke, nclass))
1153 kseq->ksq_load_transferable++;
1154#endif
1155 if (oclass == PRI_TIMESHARE)
1156 kseq->ksq_load_timeshare--;
1157 if (nclass == PRI_TIMESHARE)
1158 kseq->ksq_load_timeshare++;
1141
1142 if (kg->kg_pri_class == PRI_TIMESHARE)
1143 kseq_nice_rem(kseq, kg->kg_nice);
1144 else if (class == PRI_TIMESHARE)
1145 kseq_nice_add(kseq, kg->kg_nice);
1146 }
1147
1148 kg->kg_pri_class = class;

--- 251 unchanged lines hidden (view full) ---

1400 panic("Unknown pri class.");
1401 break;
1402 }
1403#ifdef SMP
1404 /*
1405 * If there are any idle processors, give them our extra load.
1406 */
1407 if (kseq_idle && class != PRI_ITHD &&
1159
1160 if (kg->kg_pri_class == PRI_TIMESHARE)
1161 kseq_nice_rem(kseq, kg->kg_nice);
1162 else if (class == PRI_TIMESHARE)
1163 kseq_nice_add(kseq, kg->kg_nice);
1164 }
1165
1166 kg->kg_pri_class = class;

--- 251 unchanged lines hidden (view full) ---

1418 panic("Unknown pri class.");
1419 break;
1420 }
1421#ifdef SMP
1422 /*
1423 * If there are any idle processors, give them our extra load.
1424 */
1425 if (kseq_idle && class != PRI_ITHD &&
1408 (kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
1409 kseq->ksq_loads[PRI_REALTIME]) >= kseq->ksq_cpus) {
1426 kseq->ksq_load_transferable >= kseq->ksq_cpus) {
1410 int cpu;
1411
1412 /*
1413 * Multiple cpus could find this bit simultaneously but the
1414 * race shouldn't be terrible.
1415 */
1416 cpu = ffs(kseq_idle);
1417 if (cpu) {

--- 102 unchanged lines hidden ---
1427 int cpu;
1428
1429 /*
1430 * Multiple cpus could find this bit simultaneously but the
1431 * race shouldn't be terrible.
1432 */
1433 cpu = ffs(kseq_idle);
1434 if (cpu) {

--- 102 unchanged lines hidden ---