Deleted Added
full compact
kern_switch.c (153510) kern_switch.c (153797)
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
75 \ \____
75 \ \____
76 \ \
77 KSEGROUP---thread--thread--thread (queued in priority order)
76 \ \
77 KSEGROUP---thread--thread--thread (queued in priority order)
78 \ /
78 \ /
79 \_______________/
80 (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
79 \_______________/
80 (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 153510 2005-12-18 18:10:57Z njl $");
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 153797 2005-12-28 17:13:31Z kan $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>

--- 164 unchanged lines hidden (view full) ---

262 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
263 TD_SET_CAN_RUN(td);
264 /*
265 * If it is not a threaded process, take the shortcut.
266 */
267 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
268 /* remve from sys run queue and free up a slot */
269 sched_rem(td);
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>

--- 164 unchanged lines hidden (view full) ---

262 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
263 TD_SET_CAN_RUN(td);
264 /*
265 * If it is not a threaded process, take the shortcut.
266 */
267 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
268 /* remve from sys run queue and free up a slot */
269 sched_rem(td);
270 ke->ke_state = KES_THREAD;
270 ke->ke_state = KES_THREAD;
271 return;
272 }
273 td3 = TAILQ_PREV(td, threadqueue, td_runq);
274 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
275 if (ke->ke_state == KES_ONRUNQ) {
276 /*
277 * This thread has been assigned to the system run queue.
278 * We need to dissociate it and try assign the
279 * KSE to the next available thread. Then, we should
280 * see if we need to move the KSE in the run queues.
281 */
282 sched_rem(td);
271 return;
272 }
273 td3 = TAILQ_PREV(td, threadqueue, td_runq);
274 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
275 if (ke->ke_state == KES_ONRUNQ) {
276 /*
277 * This thread has been assigned to the system run queue.
278 * We need to dissociate it and try assign the
279 * KSE to the next available thread. Then, we should
280 * see if we need to move the KSE in the run queues.
281 */
282 sched_rem(td);
283 ke->ke_state = KES_THREAD;
283 ke->ke_state = KES_THREAD;
284 td2 = kg->kg_last_assigned;
285 KASSERT((td2 != NULL), ("last assigned has wrong value"));
284 td2 = kg->kg_last_assigned;
285 KASSERT((td2 != NULL), ("last assigned has wrong value"));
286 if (td2 == td)
286 if (td2 == td)
287 kg->kg_last_assigned = td3;
288 /* slot_fill(kg); */ /* will replace it with another */
289 }
290}
291#endif
292
293/*
294 * Change the priority of a thread that is on the run queue.
295 */
296void
287 kg->kg_last_assigned = td3;
288 /* slot_fill(kg); */ /* will replace it with another */
289 }
290}
291#endif
292
293/*
294 * Change the priority of a thread that is on the run queue.
295 */
296void
297adjustrunqueue( struct thread *td, int newpri)
297adjustrunqueue( struct thread *td, int newpri)
298{
299 struct ksegrp *kg;
300 struct kse *ke;
301
302 mtx_assert(&sched_lock, MA_OWNED);
303 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
304
305 ke = td->td_kse;

--- 28 unchanged lines hidden (view full) ---

334 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
335 TD_SET_CAN_RUN(td);
336 td->td_priority = newpri;
337 setrunqueue(td, SRQ_BORING);
338}
339
340/*
341 * This function is called when a thread is about to be put on a
298{
299 struct ksegrp *kg;
300 struct kse *ke;
301
302 mtx_assert(&sched_lock, MA_OWNED);
303 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
304
305 ke = td->td_kse;

--- 28 unchanged lines hidden (view full) ---

334 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
335 TD_SET_CAN_RUN(td);
336 td->td_priority = newpri;
337 setrunqueue(td, SRQ_BORING);
338}
339
340/*
341 * This function is called when a thread is about to be put on a
342 * ksegrp run queue because it has been made runnable or its
343 * priority has been adjusted and the ksegrp does not have a
342 * ksegrp run queue because it has been made runnable or its
343 * priority has been adjusted and the ksegrp does not have a
344 * free kse slot. It determines if a thread from the same ksegrp
345 * should be preempted. If so, it tries to switch threads
346 * if the thread is on the same cpu or notifies another cpu that
344 * free kse slot. It determines if a thread from the same ksegrp
345 * should be preempted. If so, it tries to switch threads
346 * if the thread is on the same cpu or notifies another cpu that
347 * it should switch threads.
347 * it should switch threads.
348 */
349
350static void
351maybe_preempt_in_ksegrp(struct thread *td)
352#if !defined(SMP)
353{
354 struct thread *running_thread;
355

--- 8 unchanged lines hidden (view full) ---

364#ifdef PREEMPTION
365#ifndef FULL_PREEMPTION
366 if (td->td_priority > PRI_MAX_ITHD) {
367 running_thread->td_flags |= TDF_NEEDRESCHED;
368 return;
369 }
370#endif /* FULL_PREEMPTION */
371
348 */
349
350static void
351maybe_preempt_in_ksegrp(struct thread *td)
352#if !defined(SMP)
353{
354 struct thread *running_thread;
355

--- 8 unchanged lines hidden (view full) ---

364#ifdef PREEMPTION
365#ifndef FULL_PREEMPTION
366 if (td->td_priority > PRI_MAX_ITHD) {
367 running_thread->td_flags |= TDF_NEEDRESCHED;
368 return;
369 }
370#endif /* FULL_PREEMPTION */
371
372 if (running_thread->td_critnest > 1)
372 if (running_thread->td_critnest > 1)
373 running_thread->td_owepreempt = 1;
373 running_thread->td_owepreempt = 1;
374 else
374 else
375 mi_switch(SW_INVOL, NULL);
375 mi_switch(SW_INVOL, NULL);
376
376
377#else /* PREEMPTION */
378 running_thread->td_flags |= TDF_NEEDRESCHED;
379#endif /* PREEMPTION */
380 return;
381}
382
383#else /* SMP */
384{

--- 10 unchanged lines hidden (view full) ---

395 running_thread = curthread;
396
397#if !defined(KSEG_PEEMPT_BEST_CPU)
398 if (running_thread->td_ksegrp != td->td_ksegrp) {
399#endif
400 kg = td->td_ksegrp;
401
402 /* if someone is ahead of this thread, wait our turn */
377#else /* PREEMPTION */
378 running_thread->td_flags |= TDF_NEEDRESCHED;
379#endif /* PREEMPTION */
380 return;
381}
382
383#else /* SMP */
384{

--- 10 unchanged lines hidden (view full) ---

395 running_thread = curthread;
396
397#if !defined(KSEG_PEEMPT_BEST_CPU)
398 if (running_thread->td_ksegrp != td->td_ksegrp) {
399#endif
400 kg = td->td_ksegrp;
401
402 /* if someone is ahead of this thread, wait our turn */
403 if (td != TAILQ_FIRST(&kg->kg_runq))
403 if (td != TAILQ_FIRST(&kg->kg_runq))
404 return;
404 return;
405
405
406 worst_pri = td->td_priority;
407 best_pcpu = NULL;
408 dontuse = stopped_cpus | idle_cpus_mask;
406 worst_pri = td->td_priority;
407 best_pcpu = NULL;
408 dontuse = stopped_cpus | idle_cpus_mask;
409
410 /*
409
410 /*
411 * Find a cpu with the worst priority that runs at thread from
412 * the same ksegrp - if multiple exist give first the last run
411 * Find a cpu with the worst priority that runs at thread from
412 * the same ksegrp - if multiple exist give first the last run
413 * cpu and then the current cpu priority
413 * cpu and then the current cpu priority
414 */
414 */
415
415
416 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
417 cpumask = pc->pc_cpumask;
418 cputhread = pc->pc_curthread;
419
416 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
417 cpumask = pc->pc_cpumask;
418 cputhread = pc->pc_curthread;
419
420 if ((cpumask & dontuse) ||
420 if ((cpumask & dontuse) ||
421 cputhread->td_ksegrp != kg)
421 cputhread->td_ksegrp != kg)
422 continue;
422 continue;
423
424 if (cputhread->td_priority > worst_pri) {
425 worst_pri = cputhread->td_priority;
423
424 if (cputhread->td_priority > worst_pri) {
425 worst_pri = cputhread->td_priority;
426 best_pcpu = pc;
426 best_pcpu = pc;
427 continue;
428 }
427 continue;
428 }
429
429
430 if (cputhread->td_priority == worst_pri &&
430 if (cputhread->td_priority == worst_pri &&
431 best_pcpu != NULL &&
431 best_pcpu != NULL &&
432 (td->td_lastcpu == pc->pc_cpuid ||
433 (PCPU_GET(cpumask) == cpumask &&
432 (td->td_lastcpu == pc->pc_cpuid ||
433 (PCPU_GET(cpumask) == cpumask &&
434 td->td_lastcpu != best_pcpu->pc_cpuid)))
434 td->td_lastcpu != best_pcpu->pc_cpuid)))
435 best_pcpu = pc;
435 best_pcpu = pc;
436 }
437
436 }
437
438 /* Check if we need to preempt someone */
438 /* Check if we need to preempt someone */
439 if (best_pcpu == NULL)
439 if (best_pcpu == NULL)
440 return;
441
442#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
443#if !defined(FULL_PREEMPTION)
444 if (td->td_priority <= PRI_MAX_ITHD)
445#endif /* ! FULL_PREEMPTION */
446 {
447 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
448 return;
449 }
450#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
451
452 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
453 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
454 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
455 return;
456 }
457#if !defined(KSEG_PEEMPT_BEST_CPU)
440 return;
441
442#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
443#if !defined(FULL_PREEMPTION)
444 if (td->td_priority <= PRI_MAX_ITHD)
445#endif /* ! FULL_PREEMPTION */
446 {
447 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
448 return;
449 }
450#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
451
452 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
453 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
454 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
455 return;
456 }
457#if !defined(KSEG_PEEMPT_BEST_CPU)
458 }
458 }
459#endif
460
461 if (td->td_priority >= running_thread->td_priority)
462 return;
463#ifdef PREEMPTION
464
465#if !defined(FULL_PREEMPTION)
466 if (td->td_priority > PRI_MAX_ITHD) {
467 running_thread->td_flags |= TDF_NEEDRESCHED;
468 }
469#endif /* ! FULL_PREEMPTION */
459#endif
460
461 if (td->td_priority >= running_thread->td_priority)
462 return;
463#ifdef PREEMPTION
464
465#if !defined(FULL_PREEMPTION)
466 if (td->td_priority > PRI_MAX_ITHD) {
467 running_thread->td_flags |= TDF_NEEDRESCHED;
468 }
469#endif /* ! FULL_PREEMPTION */
470
471 if (running_thread->td_critnest > 1)
470
471 if (running_thread->td_critnest > 1)
472 running_thread->td_owepreempt = 1;
472 running_thread->td_owepreempt = 1;
473 else
473 else
474 mi_switch(SW_INVOL, NULL);
474 mi_switch(SW_INVOL, NULL);
475
475
476#else /* PREEMPTION */
477 running_thread->td_flags |= TDF_NEEDRESCHED;
478#endif /* PREEMPTION */
479 return;
480}
481#endif /* !SMP */
482
483

--- 31 unchanged lines hidden (view full) ---

515
516 }
517 kg->kg_avail_opennings = 1;
518 }
519 sched_add(td, flags);
520 return;
521 }
522
476#else /* PREEMPTION */
477 running_thread->td_flags |= TDF_NEEDRESCHED;
478#endif /* PREEMPTION */
479 return;
480}
481#endif /* !SMP */
482
483

--- 31 unchanged lines hidden (view full) ---

515
516 }
517 kg->kg_avail_opennings = 1;
518 }
519 sched_add(td, flags);
520 return;
521 }
522
523 /*
524 * If the concurrency has reduced, and we would go in the
525 * assigned section, then keep removing entries from the
526 * system run queue, until we are not in that section
523 /*
524 * If the concurrency has reduced, and we would go in the
525 * assigned section, then keep removing entries from the
526 * system run queue, until we are not in that section
527 * or there is room for us to be put in that section.
528 * What we MUST avoid is the case where there are threads of less
529 * priority than the new one scheduled, but it can not
530 * be scheduled itself. That would lead to a non contiguous set
531 * of scheduled threads, and everything would break.
527 * or there is room for us to be put in that section.
528 * What we MUST avoid is the case where there are threads of less
529 * priority than the new one scheduled, but it can not
530 * be scheduled itself. That would lead to a non contiguous set
531 * of scheduled threads, and everything would break.
532 */
532 */
533 tda = kg->kg_last_assigned;
534 while ((kg->kg_avail_opennings <= 0) &&
535 (tda && (tda->td_priority > td->td_priority))) {
536 /*
537 * None free, but there is one we can commandeer.
538 */
539 CTR2(KTR_RUNQ,
540 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);

--- 30 unchanged lines hidden (view full) ---

571 * No pre-existing last assigned so whoever is first
572 * gets the slot.. (maybe us)
573 */
574 td2 = TAILQ_FIRST(&kg->kg_runq);
575 kg->kg_last_assigned = td2;
576 } else if (tda->td_priority > td->td_priority) {
577 td2 = td;
578 } else {
533 tda = kg->kg_last_assigned;
534 while ((kg->kg_avail_opennings <= 0) &&
535 (tda && (tda->td_priority > td->td_priority))) {
536 /*
537 * None free, but there is one we can commandeer.
538 */
539 CTR2(KTR_RUNQ,
540 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);

--- 30 unchanged lines hidden (view full) ---

571 * No pre-existing last assigned so whoever is first
572 * gets the slot.. (maybe us)
573 */
574 td2 = TAILQ_FIRST(&kg->kg_runq);
575 kg->kg_last_assigned = td2;
576 } else if (tda->td_priority > td->td_priority) {
577 td2 = td;
578 } else {
579 /*
580 * We are past last_assigned, so
579 /*
580 * We are past last_assigned, so
581 * give the next slot to whatever is next,
582 * which may or may not be us.
583 */
584 td2 = TAILQ_NEXT(tda, td_runq);
585 kg->kg_last_assigned = td2;
586 }
587 sched_add(td2, flags);
588 } else {

--- 33 unchanged lines hidden (view full) ---

622 mtx_assert(&sched_lock, MA_NOTOWNED);
623 if (td->td_owepreempt) {
624 td->td_critnest = 1;
625 mtx_lock_spin(&sched_lock);
626 td->td_critnest--;
627 mi_switch(SW_INVOL, NULL);
628 mtx_unlock_spin(&sched_lock);
629 }
581 * give the next slot to whatever is next,
582 * which may or may not be us.
583 */
584 td2 = TAILQ_NEXT(tda, td_runq);
585 kg->kg_last_assigned = td2;
586 }
587 sched_add(td2, flags);
588 } else {

--- 33 unchanged lines hidden (view full) ---

622 mtx_assert(&sched_lock, MA_NOTOWNED);
623 if (td->td_owepreempt) {
624 td->td_critnest = 1;
625 mtx_lock_spin(&sched_lock);
626 td->td_critnest--;
627 mi_switch(SW_INVOL, NULL);
628 mtx_unlock_spin(&sched_lock);
629 }
630 } else
630 } else
631#endif
632 td->td_critnest--;
631#endif
632 td->td_critnest--;
633
634
633
635 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
636 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
637}
638
639/*
640 * This function is called when a thread is about to be put on run queue
641 * because it has been made runnable or its priority has been adjusted. It
642 * determines if the new thread should be immediately preempted to. If so,

--- 70 unchanged lines hidden (view full) ---

713 struct ksegrp *kg;
714
715 kg = td->td_ksegrp;
716 if (kg->kg_last_assigned == td)
717 kg->kg_last_assigned =
718 TAILQ_PREV(td, threadqueue, td_runq);
719 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
720 }
634 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
635 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
636}
637
638/*
639 * This function is called when a thread is about to be put on run queue
640 * because it has been made runnable or its priority has been adjusted. It
641 * determines if the new thread should be immediately preempted to. If so,

--- 70 unchanged lines hidden (view full) ---

712 struct ksegrp *kg;
713
714 kg = td->td_ksegrp;
715 if (kg->kg_last_assigned == td)
716 kg->kg_last_assigned =
717 TAILQ_PREV(td, threadqueue, td_runq);
718 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
719 }
721
720
722 TD_SET_RUNNING(td);
723 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
724 td->td_proc->p_pid, td->td_proc->p_comm);
725 mi_switch(SW_INVOL|SW_PREEMPT, td);
726 return (1);
727#else
728 return (0);
729#endif

--- 165 unchanged lines hidden (view full) ---

895
896 while (count-- && ke2) {
897 if (ke->ke_thread->td_lastcpu == cpu) {
898 ke = ke2;
899 break;
900 }
901 ke2 = TAILQ_NEXT(ke2, ke_procq);
902 }
721 TD_SET_RUNNING(td);
722 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
723 td->td_proc->p_pid, td->td_proc->p_comm);
724 mi_switch(SW_INVOL|SW_PREEMPT, td);
725 return (1);
726#else
727 return (0);
728#endif

--- 165 unchanged lines hidden (view full) ---

894
895 while (count-- && ke2) {
896 if (ke->ke_thread->td_lastcpu == cpu) {
897 ke = ke2;
898 break;
899 }
900 ke2 = TAILQ_NEXT(ke2, ke_procq);
901 }
903 } else
902 } else
904#endif
905 ke = TAILQ_FIRST(rqh);
906 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
907 CTR3(KTR_RUNQ,
908 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
909 return (ke);
910 }
911 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);

--- 127 unchanged lines hidden ---
903#endif
904 ke = TAILQ_FIRST(rqh);
905 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
906 CTR3(KTR_RUNQ,
907 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
908 return (ke);
909 }
910 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);

--- 127 unchanged lines hidden ---