Deleted Added
full compact
28c28
< __FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166156 2007-01-20 21:24:05Z jeff $");
---
> __FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166190 2007-01-23 08:50:34Z jeff $");
62a63,66
> #ifndef PREEMPTION
> #error "SCHED_ULE requires options PREEMPTION"
> #endif
>
77,80d80
< enum {
< TSS_THREAD,
< TSS_ONRUNQ
< } ts_state; /* (j) thread sched specific status. */
255d254
< static struct td_sched *sched_choose(void); /* XXX Should be thread * */
1234d1232
< td_sched0.ts_state = TSS_THREAD;
1435c1433
< setrunqueue(td, preempt ?
---
> sched_add(td, preempt ?
1514c1512
< setrunqueue(td, SRQ_BORING);
---
> sched_add(td, SRQ_BORING);
1580c1578
< if (td->td_sched->ts_state == TSS_ONRUNQ) {
---
> if (TD_ON_RUNQ(td)) {
1682d1679
< /* Adjust ticks for pctcpu */
1684,1685d1680
< ts->ts_ticks += tickincr;
< ts->ts_ltick = ticks;
1687,1692d1681
< * Update if we've exceeded our desired tick threshhold by over one
< * second.
< */
< if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
< sched_pctcpu_update(ts);
< /*
1739c1728
< struct td_sched *
---
> struct thread *
1758,1759c1747
< ts->ts_state = TSS_THREAD;
< return (ts);
---
> return (ts->ts_thread);
1765c1753
< return (NULL);
---
> return (PCPU_GET(idlethread));
1767a1756,1790
> static int
> sched_preempt(struct thread *td)
> {
> struct thread *ctd;
> int cpri;
> int pri;
>
> ctd = curthread;
> pri = td->td_priority;
> cpri = ctd->td_priority;
> if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
> return (0);
> /*
> * Always preempt IDLE threads. Otherwise only if the preempting
> * thread is an ithread.
> */
> if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
> return (0);
> if (ctd->td_critnest > 1) {
> CTR1(KTR_PROC, "sched_preempt: in critical section %d",
> ctd->td_critnest);
> ctd->td_owepreempt = 1;
> return (0);
> }
> /*
> * Thread is runnable but not yet put on system run queue.
> */
> MPASS(TD_ON_RUNQ(td));
> TD_SET_RUNNING(td);
> CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
> td->td_proc->p_pid, td->td_proc->p_comm);
> mi_switch(SW_INVOL|SW_PREEMPT, td);
> return (1);
> }
>
1778a1802
> ts = td->td_sched;
1779a1804
> mtx_assert(&sched_lock, MA_OWNED);
1783,1790c1808,1811
< mtx_assert(&sched_lock, MA_OWNED);
< tdq = TDQ_SELF();
< ts = td->td_sched;
< class = PRI_BASE(td->td_pri_class);
< preemptive = !(flags & SRQ_YIELDING);
< KASSERT(ts->ts_state != TSS_ONRUNQ,
< ("sched_add: thread %p (%s) already in run queue", td,
< td->td_proc->p_comm));
---
> KASSERT((td->td_inhibitors == 0),
> ("sched_add: trying to run inhibited thread"));
> KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
> ("sched_add: bad thread state"));
1794a1816,1819
> TD_SET_RUNQ(td);
> tdq = TDQ_SELF();
> class = PRI_BASE(td->td_pri_class);
> preemptive = !(flags & SRQ_YIELDING);
1800a1826,1827
> if (ts->ts_slice == 0)
> ts->ts_slice = sched_slice;
1842c1869
< * Set the slice and pick the run queue.
---
> * Pick the run queue based on priority.
1844,1845d1870
< if (ts->ts_slice == 0)
< ts->ts_slice = sched_slice;
1852c1877
< if (preemptive && maybe_preempt(td))
---
> if (preemptive && sched_preempt(td))
1854,1855d1878
< ts->ts_state = TSS_ONRUNQ;
<
1879c1902
< KASSERT((ts->ts_state == TSS_ONRUNQ),
---
> KASSERT(TD_ON_RUNQ(td),
1882d1904
< ts->ts_state = TSS_THREAD;
1885a1908
> TD_SET_CAN_RUN(td);
1929d1951
< ts->ts_state = TSS_THREAD;
1997a2020,2031
> struct td_sched *ts;
>
> ts = curthread->td_sched;
> /* Adjust ticks for pctcpu */
> ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
> ts->ts_ltick = ticks;
> /*
> * Update if we've exceeded our desired tick threshhold by over one
> * second.
> */
> if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
> sched_pctcpu_update(ts);
1999a2034,2050
> /*
> * The actual idle process.
> */
> void
> sched_idletd(void *dummy)
> {
> struct proc *p;
> struct thread *td;
>
> td = curthread;
> p = td->td_proc;
> mtx_assert(&Giant, MA_NOTOWNED);
> /* ULE Relies on preemption for idle interruption. */
> for (;;)
> cpu_idle();
> }
>