Deleted Added
full compact
kern_switch.c (125315) kern_switch.c (131481)
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 72 unchanged lines hidden (view full) ---

81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 72 unchanged lines hidden (view full) ---

81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 125315 2004-02-02 08:13:27Z jeff $");
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 131481 2004-07-02 20:21:44Z jhb $");
90
90
91#include "opt_full_preemption.h"
92
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/kernel.h>
94#include <sys/ktr.h>
95#include <sys/lock.h>
96#include <sys/mutex.h>
97#include <sys/proc.h>
98#include <sys/queue.h>

--- 319 unchanged lines hidden (view full) ---

418 kg->kg_last_assigned = td2;
419 td2->td_kse = ke;
420 ke->ke_thread = td2;
421 }
422 sched_add(ke->ke_thread);
423 }
424}
425
93#include <sys/param.h>
94#include <sys/systm.h>
95#include <sys/kernel.h>
96#include <sys/ktr.h>
97#include <sys/lock.h>
98#include <sys/mutex.h>
99#include <sys/proc.h>
100#include <sys/queue.h>

--- 319 unchanged lines hidden (view full) ---

420 kg->kg_last_assigned = td2;
421 td2->td_kse = ke;
422 ke->ke_thread = td2;
423 }
424 sched_add(ke->ke_thread);
425 }
426}
427
426/************************************************************************
427 * Critical section marker functions *
428 ************************************************************************/
429/* Critical sections that prevent preemption. */
428/*
429 * Kernel thread preemption implementation. Critical sections mark
430 * regions of code in which preemptions are not allowed.
431 */
430void
431critical_enter(void)
432{
433 struct thread *td;
434
435 td = curthread;
436 if (td->td_critnest == 0)
437 cpu_critical_enter();

--- 4 unchanged lines hidden (view full) ---

442critical_exit(void)
443{
444 struct thread *td;
445
446 td = curthread;
447 KASSERT(td->td_critnest != 0,
448 ("critical_exit: td_critnest == 0"));
449 if (td->td_critnest == 1) {
432void
433critical_enter(void)
434{
435 struct thread *td;
436
437 td = curthread;
438 if (td->td_critnest == 0)
439 cpu_critical_enter();

--- 4 unchanged lines hidden (view full) ---

444critical_exit(void)
445{
446 struct thread *td;
447
448 td = curthread;
449 KASSERT(td->td_critnest != 0,
450 ("critical_exit: td_critnest == 0"));
451 if (td->td_critnest == 1) {
452#ifdef PREEMPTION
453 if (td->td_flags & TDF_OWEPREEMPT) {
454 mtx_lock_spin(&sched_lock);
455 mi_switch(SW_INVOL, NULL);
456 mtx_unlock_spin(&sched_lock);
457 }
458#endif
450 td->td_critnest = 0;
451 cpu_critical_exit();
452 } else {
453 td->td_critnest--;
454 }
455}
456
459 td->td_critnest = 0;
460 cpu_critical_exit();
461 } else {
462 td->td_critnest--;
463 }
464}
465
466/*
467 * This function is called when a thread is about to be put on run queue
468 * because it has been made runnable or its priority has been adjusted. It
469 * determines if the new thread should be immediately preempted to. If so,
470 * it switches to it and eventually returns true. If not, it returns false
471 * so that the caller may place the thread on an appropriate run queue.
472 */
473int
474maybe_preempt(struct thread *td)
475{
476 struct thread *ctd;
477 int cpri, pri;
457
478
479 mtx_assert(&sched_lock, MA_OWNED);
480#ifdef PREEMPTION
481 /*
482 * The new thread should not preempt the current thread if any of the
483 * following conditions are true:
484 *
485 * - The current thread has a higher (numerically lower) priority.
486 * - It is too early in the boot for context switches (cold is set).
487 * - The current thread has an inhibitor set or is in the process of
488 * exiting. In this case, the current thread is about to switch
489 * out anyways, so there's no point in preempting. If we did,
490 * the current thread would not be properly resumed as well, so
491 * just avoid that whole landmine.
492 * - If the new thread's priority is not a realtime priority and
493 * the current thread's priority is not an idle priority and
494 * FULL_PREEMPTION is disabled.
495 *
496 * If all of these conditions are false, but the current thread is in
497 * a nested critical section, then we have to defer the preemption
498 * until we exit the critical section. Otherwise, switch immediately
499 * to the new thread.
500 */
501 ctd = curthread;
502 pri = td->td_priority;
503 cpri = ctd->td_priority;
504 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
505 td->td_kse->ke_state != KES_THREAD)
506 return (0);
507#ifndef FULL_PREEMPTION
508 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
509 !(cpri >= PRI_MIN_IDLE))
510 return (0);
511#endif
512 if (ctd->td_critnest > 1) {
513 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
514 ctd->td_critnest);
515 ctd->td_flags |= TDF_OWEPREEMPT;
516 return (0);
517 }
518
519 /*
520 * Our thread state says that we are already on a run queue, so
521 * update our state as if we had been dequeued by choosethread().
522 */
523 MPASS(TD_ON_RUNQ(td));
524 TD_SET_RUNNING(td);
525 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
526 td->td_proc->p_pid, td->td_proc->p_comm);
527 mi_switch(SW_INVOL, td);
528 return (1);
529#else
530 return (0);
531#endif
532}
533
534#ifndef PREEMPTION
535/* XXX: There should be a non-static version of this. */
536static void
537printf_caddr_t(void *data)
538{
539 printf("%s", (char *)data);
540}
541static char preempt_warning[] =
542 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
543SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
544 preempt_warning)
545#endif
546
458/************************************************************************
459 * SYSTEM RUN QUEUE manipulations and tests *
460 ************************************************************************/
461/*
462 * Initialize a run structure.
463 */
464void
465runq_init(struct runq *rq)

--- 260 unchanged lines hidden ---
547/************************************************************************
548 * SYSTEM RUN QUEUE manipulations and tests *
549 ************************************************************************/
550/*
551 * Initialize a run structure.
552 */
553void
554runq_init(struct runq *rq)

--- 260 unchanged lines hidden ---