Deleted Added
full compact
sched_ule.c (121896) sched_ule.c (121923)
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121896 2003-11-02 10:56:48Z jeff $");
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121923 2003-11-03 03:27:22Z jeff $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>

--- 173 unchanged lines hidden (view full) ---

210 struct runq *ksq_next; /* Next timeshare queue. */
211 struct runq *ksq_curr; /* Current queue. */
212 int ksq_load_timeshare; /* Load for timeshare. */
213 int ksq_load; /* Aggregate load. */
214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215 short ksq_nicemin; /* Least nice. */
216#ifdef SMP
217 int ksq_load_transferable; /* kses that may be migrated. */
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>

--- 173 unchanged lines hidden (view full) ---

210 struct runq *ksq_next; /* Next timeshare queue. */
211 struct runq *ksq_curr; /* Current queue. */
212 int ksq_load_timeshare; /* Load for timeshare. */
213 int ksq_load; /* Aggregate load. */
214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215 short ksq_nicemin; /* Least nice. */
216#ifdef SMP
217 int ksq_load_transferable; /* kses that may be migrated. */
218 int ksq_idled;
218 unsigned int ksq_rslices; /* Slices on run queue */
219 int ksq_cpus; /* Count of CPUs in this kseq. */
220 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */
221#endif
222};
223
224/*
225 * One kse queue per processor.

--- 25 unchanged lines hidden (view full) ---

251static void kseq_nice_add(struct kseq *kseq, int nice);
252static void kseq_nice_rem(struct kseq *kseq, int nice);
253void kseq_print(int cpu);
254#ifdef SMP
255#if 0
256static int sched_pickcpu(void);
257#endif
258static struct kse *runq_steal(struct runq *rq);
219 unsigned int ksq_rslices; /* Slices on run queue */
220 int ksq_cpus; /* Count of CPUs in this kseq. */
221 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */
222#endif
223};
224
225/*
226 * One kse queue per processor.

--- 25 unchanged lines hidden (view full) ---

252static void kseq_nice_add(struct kseq *kseq, int nice);
253static void kseq_nice_rem(struct kseq *kseq, int nice);
254void kseq_print(int cpu);
255#ifdef SMP
256#if 0
257static int sched_pickcpu(void);
258#endif
259static struct kse *runq_steal(struct runq *rq);
259static struct kseq *kseq_load_highest(void);
260static void kseq_balance(void *arg);
261static void kseq_move(struct kseq *from, int cpu);
260static void kseq_balance(void *arg);
261static void kseq_move(struct kseq *from, int cpu);
262static int kseq_find(void);
262static __inline void kseq_setidle(struct kseq *kseq);
263static void kseq_notify(struct kse *ke, int cpu);
264static void kseq_assign(struct kseq *);
265static struct kse *kseq_steal(struct kseq *kseq);
266#define KSE_CAN_MIGRATE(ke, class) ((class) != PRI_ITHD)
267#endif
268
269void
270kseq_print(int cpu)

--- 172 unchanged lines hidden (view full) ---

443
444out:
445 mtx_unlock_spin(&sched_lock);
446 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
447
448 return;
449}
450
263static void kseq_notify(struct kse *ke, int cpu);
264static void kseq_assign(struct kseq *);
265static struct kse *kseq_steal(struct kseq *kseq);
266#define KSE_CAN_MIGRATE(ke, class) ((class) != PRI_ITHD)
267#endif
268
269void
270kseq_print(int cpu)

--- 172 unchanged lines hidden (view full) ---

443
444out:
445 mtx_unlock_spin(&sched_lock);
446 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
447
448 return;
449}
450
451static struct kseq *
452kseq_load_highest(void)
453{
454 struct kseq *kseq;
455 int load;
456 int cpu;
457 int i;
458
459 mtx_assert(&sched_lock, MA_OWNED);
460 cpu = 0;
461 load = 0;
462
463 for (i = 0; i < mp_maxid; i++) {
464 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
465 continue;
466 kseq = KSEQ_CPU(i);
467 if (kseq->ksq_load > load) {
468 load = kseq->ksq_load;
469 cpu = i;
470 }
471 }
472 kseq = KSEQ_CPU(cpu);
473
474 if (kseq->ksq_load_transferable > kseq->ksq_cpus)
475 return (kseq);
476
477 return (NULL);
478}
479
480static void
481kseq_move(struct kseq *from, int cpu)
482{
483 struct kse *ke;
484
485 ke = kseq_steal(from);
486 runq_remove(ke->ke_runq, ke);
487 ke->ke_state = KES_THREAD;
488 kseq_rem(from, ke);
489
490 ke->ke_cpu = cpu;
451static void
452kseq_move(struct kseq *from, int cpu)
453{
454 struct kse *ke;
455
456 ke = kseq_steal(from);
457 runq_remove(ke->ke_runq, ke);
458 ke->ke_state = KES_THREAD;
459 kseq_rem(from, ke);
460
461 ke->ke_cpu = cpu;
491 sched_add(ke->ke_thread);
462 kseq_notify(ke, cpu);
492}
493
463}
464
494static int
495kseq_find(void)
465static __inline void
466kseq_setidle(struct kseq *kseq)
496{
467{
497 struct kseq *high;
498
499 if (!smp_started)
500 return (0);
501 if (kseq_idle & PCPU_GET(cpumask))
502 return (0);
503 /*
504 * Find the cpu with the highest load and steal one proc.
505 */
506 if ((high = kseq_load_highest()) == NULL ||
507 high == KSEQ_SELF()) {
508 /*
509 * If we couldn't find one, set ourselves in the
510 * idle map.
511 */
512 atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
513 return (0);
514 }
515 /*
516 * Remove this kse from this kseq and runq and then requeue
517 * on the current processor. We now have a load of one!
518 */
519 kseq_move(high, PCPU_GET(cpuid));
520
521 return (1);
468 if (kseq->ksq_idled)
469 return;
470 kseq->ksq_idled = 1;
471 atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
472 return;
522}
523
524static void
525kseq_assign(struct kseq *kseq)
526{
527 struct kse *nke;
528 struct kse *ke;
529

--- 126 unchanged lines hidden (view full) ---

656 runq_init(&kseq->ksq_idle);
657 kseq->ksq_curr = &kseq->ksq_timeshare[0];
658 kseq->ksq_next = &kseq->ksq_timeshare[1];
659 kseq->ksq_load = 0;
660 kseq->ksq_load_timeshare = 0;
661#ifdef SMP
662 kseq->ksq_load_transferable = 0;
663 kseq->ksq_rslices = 0;
473}
474
475static void
476kseq_assign(struct kseq *kseq)
477{
478 struct kse *nke;
479 struct kse *ke;
480

--- 126 unchanged lines hidden (view full) ---

607 runq_init(&kseq->ksq_idle);
608 kseq->ksq_curr = &kseq->ksq_timeshare[0];
609 kseq->ksq_next = &kseq->ksq_timeshare[1];
610 kseq->ksq_load = 0;
611 kseq->ksq_load_timeshare = 0;
612#ifdef SMP
613 kseq->ksq_load_transferable = 0;
614 kseq->ksq_rslices = 0;
615 kseq->ksq_idled = 0;
664 kseq->ksq_assigned = NULL;
665#endif
666}
667
668static void
669sched_setup(void *dummy)
670{
671#ifdef SMP

--- 647 unchanged lines hidden (view full) ---

1319sched_choose(void)
1320{
1321 struct kseq *kseq;
1322 struct kse *ke;
1323
1324 mtx_assert(&sched_lock, MA_OWNED);
1325 kseq = KSEQ_SELF();
1326#ifdef SMP
616 kseq->ksq_assigned = NULL;
617#endif
618}
619
620static void
621sched_setup(void *dummy)
622{
623#ifdef SMP

--- 647 unchanged lines hidden (view full) ---

1271sched_choose(void)
1272{
1273 struct kseq *kseq;
1274 struct kse *ke;
1275
1276 mtx_assert(&sched_lock, MA_OWNED);
1277 kseq = KSEQ_SELF();
1278#ifdef SMP
1327retry:
1328 if (kseq->ksq_assigned)
1329 kseq_assign(kseq);
1330#endif
1331 ke = kseq_choose(kseq);
1332 if (ke) {
1333#ifdef SMP
1334 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1279 if (kseq->ksq_assigned)
1280 kseq_assign(kseq);
1281#endif
1282 ke = kseq_choose(kseq);
1283 if (ke) {
1284#ifdef SMP
1285 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1335 if (kseq_find())
1336 goto retry;
1286 kseq_setidle(kseq);
1337#endif
1338 runq_remove(ke->ke_runq, ke);
1339 ke->ke_state = KES_THREAD;
1340
1341 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1342 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1343 ke, ke->ke_runq, ke->ke_slice,
1344 ke->ke_thread->td_priority);
1345 }
1346 return (ke);
1347 }
1348#ifdef SMP
1287#endif
1288 runq_remove(ke->ke_runq, ke);
1289 ke->ke_state = KES_THREAD;
1290
1291 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1292 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1293 ke, ke->ke_runq, ke->ke_slice,
1294 ke->ke_thread->td_priority);
1295 }
1296 return (ke);
1297 }
1298#ifdef SMP
1349 if (kseq_find())
1350 goto retry;
1299 kseq_setidle(kseq);
1351#endif
1300#endif
1352
1353 return (NULL);
1354}
1355
1356void
1357sched_add(struct thread *td)
1358{
1359 struct kseq *kseq;
1360 struct ksegrp *kg;

--- 74 unchanged lines hidden (view full) ---

1435 cpu--;
1436 atomic_clear_int(&kseq_idle, 1 << cpu);
1437 ke->ke_cpu = cpu;
1438 ke->ke_runq = NULL;
1439 kseq_notify(ke, cpu);
1440 return;
1441 }
1442 }
1301 return (NULL);
1302}
1303
1304void
1305sched_add(struct thread *td)
1306{
1307 struct kseq *kseq;
1308 struct ksegrp *kg;

--- 74 unchanged lines hidden (view full) ---

1383 cpu--;
1384 atomic_clear_int(&kseq_idle, 1 << cpu);
1385 ke->ke_cpu = cpu;
1386 ke->ke_runq = NULL;
1387 kseq_notify(ke, cpu);
1388 return;
1389 }
1390 }
1443 if (class == PRI_TIMESHARE || class == PRI_REALTIME)
1391 if (kseq->ksq_idled &&
1392 (class == PRI_TIMESHARE || class == PRI_REALTIME)) {
1444 atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1393 atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1394 kseq->ksq_idled = 0;
1395 }
1445#endif
1446 if (td->td_priority < curthread->td_priority)
1447 curthread->td_flags |= TDF_NEEDRESCHED;
1448
1449 ke->ke_ksegrp->kg_runq_kses++;
1450 ke->ke_state = KES_ONRUNQ;
1451
1452 runq_add(ke->ke_runq, ke);

--- 84 unchanged lines hidden ---
1396#endif
1397 if (td->td_priority < curthread->td_priority)
1398 curthread->td_flags |= TDF_NEEDRESCHED;
1399
1400 ke->ke_ksegrp->kg_runq_kses++;
1401 ke->ke_state = KES_ONRUNQ;
1402
1403 runq_add(ke->ke_runq, ke);

--- 84 unchanged lines hidden ---