Deleted Added
full compact
sched_ule.c (166156) sched_ule.c (166190)
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166156 2007-01-20 21:24:05Z jeff $");
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166190 2007-01-23 08:50:34Z jeff $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>

--- 18 unchanged lines hidden (view full) ---

55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>

--- 18 unchanged lines hidden (view full) ---

55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
63#ifndef PREEMPTION
64#error "SCHED_ULE requires options PREEMPTION"
65#endif
66
63/*
64 * TODO:
65 * Pick idle from affinity group or self group first.
66 * Implement pick_score.
67 */
68
69/*
70 * Thread scheduler specific section.
71 */
72struct td_sched {
73 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
74 int ts_flags; /* (j) TSF_* flags. */
75 struct thread *ts_thread; /* (*) Active associated thread. */
76 u_char ts_rqindex; /* (j) Run queue index. */
67/*
68 * TODO:
69 * Pick idle from affinity group or self group first.
70 * Implement pick_score.
71 */
72
73/*
74 * Thread scheduler specific section.
75 */
76struct td_sched {
77 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
78 int ts_flags; /* (j) TSF_* flags. */
79 struct thread *ts_thread; /* (*) Active associated thread. */
80 u_char ts_rqindex; /* (j) Run queue index. */
77 enum {
78 TSS_THREAD,
79 TSS_ONRUNQ
80 } ts_state; /* (j) thread sched specific status. */
81 int ts_slptime;
82 int ts_slice;
83 struct runq *ts_runq;
84 u_char ts_cpu; /* CPU that we have affinity for. */
85 /* The following variables are only used for pctcpu calculation */
86 int ts_ltick; /* Last tick that we were running on */
87 int ts_ftick; /* First tick that we were running on */
88 int ts_ticks; /* Tick count */

--- 158 unchanged lines hidden (view full) ---

247#define TDQ_GROUP(x) (&tdq_groups[(x)])
248#else /* !SMP */
249static struct tdq tdq_cpu;
250
251#define TDQ_SELF() (&tdq_cpu)
252#define TDQ_CPU(x) (&tdq_cpu)
253#endif
254
81 int ts_slptime;
82 int ts_slice;
83 struct runq *ts_runq;
84 u_char ts_cpu; /* CPU that we have affinity for. */
85 /* The following variables are only used for pctcpu calculation */
86 int ts_ltick; /* Last tick that we were running on */
87 int ts_ftick; /* First tick that we were running on */
88 int ts_ticks; /* Tick count */

--- 158 unchanged lines hidden (view full) ---

247#define TDQ_GROUP(x) (&tdq_groups[(x)])
248#else /* !SMP */
249static struct tdq tdq_cpu;
250
251#define TDQ_SELF() (&tdq_cpu)
252#define TDQ_CPU(x) (&tdq_cpu)
253#endif
254
255static struct td_sched *sched_choose(void); /* XXX Should be thread * */
256static void sched_priority(struct thread *);
257static void sched_thread_priority(struct thread *, u_char);
258static int sched_interact_score(struct thread *);
259static void sched_interact_update(struct thread *);
260static void sched_interact_fork(struct thread *);
261static void sched_pctcpu_update(struct td_sched *);
262static inline void sched_pin_td(struct thread *td);
263static inline void sched_unpin_td(struct thread *td);

--- 962 unchanged lines hidden (view full) ---

1226 /*
1227 * Set up the scheduler specific parts of proc0.
1228 */
1229 proc0.p_sched = NULL; /* XXX */
1230 thread0.td_sched = &td_sched0;
1231 td_sched0.ts_ltick = ticks;
1232 td_sched0.ts_ftick = ticks;
1233 td_sched0.ts_thread = &thread0;
255static void sched_priority(struct thread *);
256static void sched_thread_priority(struct thread *, u_char);
257static int sched_interact_score(struct thread *);
258static void sched_interact_update(struct thread *);
259static void sched_interact_fork(struct thread *);
260static void sched_pctcpu_update(struct td_sched *);
261static inline void sched_pin_td(struct thread *td);
262static inline void sched_unpin_td(struct thread *td);

--- 962 unchanged lines hidden (view full) ---

1225 /*
1226 * Set up the scheduler specific parts of proc0.
1227 */
1228 proc0.p_sched = NULL; /* XXX */
1229 thread0.td_sched = &td_sched0;
1230 td_sched0.ts_ltick = ticks;
1231 td_sched0.ts_ftick = ticks;
1232 td_sched0.ts_thread = &thread0;
1234 td_sched0.ts_state = TSS_THREAD;
1235}
1236
1237/*
1238 * This is only somewhat accurate since given many processes of the same
1239 * priority they will switch when their slices run out, which will be
1240 * at most sched_slice stathz ticks.
1241 */
1242int

--- 184 unchanged lines hidden (view full) ---

1427 tdq_load_rem(tdq, ts);
1428 if (TD_IS_RUNNING(td)) {
1429 /*
1430 * Don't allow the thread to migrate
1431 * from a preemption.
1432 */
1433 if (preempt)
1434 sched_pin_td(td);
1233}
1234
1235/*
1236 * This is only somewhat accurate since given many processes of the same
1237 * priority they will switch when their slices run out, which will be
1238 * at most sched_slice stathz ticks.
1239 */
1240int

--- 184 unchanged lines hidden (view full) ---

1425 tdq_load_rem(tdq, ts);
1426 if (TD_IS_RUNNING(td)) {
1427 /*
1428 * Don't allow the thread to migrate
1429 * from a preemption.
1430 */
1431 if (preempt)
1432 sched_pin_td(td);
1435 setrunqueue(td, preempt ?
1433 sched_add(td, preempt ?
1436 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1437 SRQ_OURSELF|SRQ_YIELDING);
1438 if (preempt)
1439 sched_unpin_td(td);
1440 }
1441 }
1442 if (newtd != NULL) {
1443 /*

--- 62 unchanged lines hidden (view full) ---

1506 int hzticks;
1507
1508 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1509 td->td_sched->skg_slptime += hzticks;
1510 sched_interact_update(td);
1511 sched_pctcpu_update(td->td_sched);
1512 sched_priority(td);
1513 }
1434 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1435 SRQ_OURSELF|SRQ_YIELDING);
1436 if (preempt)
1437 sched_unpin_td(td);
1438 }
1439 }
1440 if (newtd != NULL) {
1441 /*

--- 62 unchanged lines hidden (view full) ---

1504 int hzticks;
1505
1506 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1507 td->td_sched->skg_slptime += hzticks;
1508 sched_interact_update(td);
1509 sched_pctcpu_update(td->td_sched);
1510 sched_priority(td);
1511 }
1514 setrunqueue(td, SRQ_BORING);
1512 sched_add(td, SRQ_BORING);
1515}
1516
1517/*
1518 * Penalize the parent for creating a new child and initialize the child's
1519 * priority.
1520 */
1521void
1522sched_fork(struct thread *td, struct thread *child)

--- 49 unchanged lines hidden (view full) ---

1572 return;
1573
1574#ifdef SMP
1575 /*
1576 * On SMP if we're on the RUNQ we must adjust the transferable
1577 * count because could be changing to or from an interrupt
1578 * class.
1579 */
1513}
1514
1515/*
1516 * Penalize the parent for creating a new child and initialize the child's
1517 * priority.
1518 */
1519void
1520sched_fork(struct thread *td, struct thread *child)

--- 49 unchanged lines hidden (view full) ---

1570 return;
1571
1572#ifdef SMP
1573 /*
1574 * On SMP if we're on the RUNQ we must adjust the transferable
1575 * count because could be changing to or from an interrupt
1576 * class.
1577 */
1580 if (td->td_sched->ts_state == TSS_ONRUNQ) {
1578 if (TD_ON_RUNQ(td)) {
1581 struct tdq *tdq;
1582
1583 tdq = TDQ_CPU(td->td_sched->ts_cpu);
1584 if (THREAD_CAN_MIGRATE(td)) {
1585 tdq->tdq_transferable--;
1586 tdq->tdq_group->tdg_transferable--;
1587 }
1588 td->td_pri_class = class;

--- 85 unchanged lines hidden (view full) ---

1674 * Advance the insert index once for each tick to ensure that all
1675 * threads get a chance to run.
1676 */
1677 if (tdq->tdq_idx == tdq->tdq_ridx) {
1678 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1679 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1680 tdq->tdq_ridx = tdq->tdq_idx;
1681 }
1579 struct tdq *tdq;
1580
1581 tdq = TDQ_CPU(td->td_sched->ts_cpu);
1582 if (THREAD_CAN_MIGRATE(td)) {
1583 tdq->tdq_transferable--;
1584 tdq->tdq_group->tdg_transferable--;
1585 }
1586 td->td_pri_class = class;

--- 85 unchanged lines hidden (view full) ---

1672 * Advance the insert index once for each tick to ensure that all
1673 * threads get a chance to run.
1674 */
1675 if (tdq->tdq_idx == tdq->tdq_ridx) {
1676 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1677 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1678 tdq->tdq_ridx = tdq->tdq_idx;
1679 }
1682 /* Adjust ticks for pctcpu */
1683 ts = td->td_sched;
1680 ts = td->td_sched;
1684 ts->ts_ticks += tickincr;
1685 ts->ts_ltick = ticks;
1686 /*
1681 /*
1687 * Update if we've exceeded our desired tick threshhold by over one
1688 * second.
1689 */
1690 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
1691 sched_pctcpu_update(ts);
1692 /*
1693 * We only do slicing code for TIMESHARE threads.
1694 */
1695 if (td->td_pri_class != PRI_TIMESHARE)
1696 return;
1697 /*
1698 * We used a tick; charge it to the thread so that we can compute our
1699 * interactivity.
1700 */

--- 30 unchanged lines hidden (view full) ---

1731 } else
1732 if (tdq->tdq_load - 1 > 0)
1733 goto out;
1734 load = 0;
1735out:
1736 return (load);
1737}
1738
1682 * We only do slicing code for TIMESHARE threads.
1683 */
1684 if (td->td_pri_class != PRI_TIMESHARE)
1685 return;
1686 /*
1687 * We used a tick; charge it to the thread so that we can compute our
1688 * interactivity.
1689 */

--- 30 unchanged lines hidden (view full) ---

1720 } else
1721 if (tdq->tdq_load - 1 > 0)
1722 goto out;
1723 load = 0;
1724out:
1725 return (load);
1726}
1727
1739struct td_sched *
1728struct thread *
1740sched_choose(void)
1741{
1742 struct tdq *tdq;
1743 struct td_sched *ts;
1744
1745 mtx_assert(&sched_lock, MA_OWNED);
1746 tdq = TDQ_SELF();
1747#ifdef SMP
1748restart:
1749#endif
1750 ts = tdq_choose(tdq);
1751 if (ts) {
1752#ifdef SMP
1753 if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1754 if (tdq_idled(tdq) == 0)
1755 goto restart;
1756#endif
1757 tdq_runq_rem(tdq, ts);
1729sched_choose(void)
1730{
1731 struct tdq *tdq;
1732 struct td_sched *ts;
1733
1734 mtx_assert(&sched_lock, MA_OWNED);
1735 tdq = TDQ_SELF();
1736#ifdef SMP
1737restart:
1738#endif
1739 ts = tdq_choose(tdq);
1740 if (ts) {
1741#ifdef SMP
1742 if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1743 if (tdq_idled(tdq) == 0)
1744 goto restart;
1745#endif
1746 tdq_runq_rem(tdq, ts);
1758 ts->ts_state = TSS_THREAD;
1759 return (ts);
1747 return (ts->ts_thread);
1760 }
1761#ifdef SMP
1762 if (tdq_idled(tdq) == 0)
1763 goto restart;
1764#endif
1748 }
1749#ifdef SMP
1750 if (tdq_idled(tdq) == 0)
1751 goto restart;
1752#endif
1765 return (NULL);
1753 return (PCPU_GET(idlethread));
1766}
1767
1754}
1755
1756static int
1757sched_preempt(struct thread *td)
1758{
1759 struct thread *ctd;
1760 int cpri;
1761 int pri;
1762
1763 ctd = curthread;
1764 pri = td->td_priority;
1765 cpri = ctd->td_priority;
1766 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
1767 return (0);
1768 /*
1769 * Always preempt IDLE threads. Otherwise only if the preempting
1770 * thread is an ithread.
1771 */
1772 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
1773 return (0);
1774 if (ctd->td_critnest > 1) {
1775 CTR1(KTR_PROC, "sched_preempt: in critical section %d",
1776 ctd->td_critnest);
1777 ctd->td_owepreempt = 1;
1778 return (0);
1779 }
1780 /*
1781 * Thread is runnable but not yet put on system run queue.
1782 */
1783 MPASS(TD_ON_RUNQ(td));
1784 TD_SET_RUNNING(td);
1785 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
1786 td->td_proc->p_pid, td->td_proc->p_comm);
1787 mi_switch(SW_INVOL|SW_PREEMPT, td);
1788 return (1);
1789}
1790
1768void
1769sched_add(struct thread *td, int flags)
1770{
1771 struct tdq *tdq;
1772 struct td_sched *ts;
1773 int preemptive;
1774 int class;
1775#ifdef SMP
1776 int cpuid;
1777 int cpumask;
1778#endif
1791void
1792sched_add(struct thread *td, int flags)
1793{
1794 struct tdq *tdq;
1795 struct td_sched *ts;
1796 int preemptive;
1797 int class;
1798#ifdef SMP
1799 int cpuid;
1800 int cpumask;
1801#endif
1802 ts = td->td_sched;
1779
1803
1804 mtx_assert(&sched_lock, MA_OWNED);
1780 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1781 td, td->td_proc->p_comm, td->td_priority, curthread,
1782 curthread->td_proc->p_comm);
1805 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1806 td, td->td_proc->p_comm, td->td_priority, curthread,
1807 curthread->td_proc->p_comm);
1783 mtx_assert(&sched_lock, MA_OWNED);
1784 tdq = TDQ_SELF();
1785 ts = td->td_sched;
1786 class = PRI_BASE(td->td_pri_class);
1787 preemptive = !(flags & SRQ_YIELDING);
1788 KASSERT(ts->ts_state != TSS_ONRUNQ,
1789 ("sched_add: thread %p (%s) already in run queue", td,
1790 td->td_proc->p_comm));
1808 KASSERT((td->td_inhibitors == 0),
1809 ("sched_add: trying to run inhibited thread"));
1810 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1811 ("sched_add: bad thread state"));
1791 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1792 ("sched_add: process swapped out"));
1793 KASSERT(ts->ts_runq == NULL,
1794 ("sched_add: thread %p is still assigned to a run queue", td));
1812 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1813 ("sched_add: process swapped out"));
1814 KASSERT(ts->ts_runq == NULL,
1815 ("sched_add: thread %p is still assigned to a run queue", td));
1816 TD_SET_RUNQ(td);
1817 tdq = TDQ_SELF();
1818 class = PRI_BASE(td->td_pri_class);
1819 preemptive = !(flags & SRQ_YIELDING);
1795 /*
1796 * Recalculate the priority before we select the target cpu or
1797 * run-queue.
1798 */
1799 if (class == PRI_TIMESHARE)
1800 sched_priority(td);
1820 /*
1821 * Recalculate the priority before we select the target cpu or
1822 * run-queue.
1823 */
1824 if (class == PRI_TIMESHARE)
1825 sched_priority(td);
1826 if (ts->ts_slice == 0)
1827 ts->ts_slice = sched_slice;
1801#ifdef SMP
1802 cpuid = PCPU_GET(cpuid);
1803 /*
1804 * Pick the destination cpu and if it isn't ours transfer to the
1805 * target cpu.
1806 */
1807 if (THREAD_CAN_MIGRATE(td)) {
1808 if (td->td_priority <= PRI_MAX_ITHD) {

--- 25 unchanged lines hidden (view full) ---

1834 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1835 /*
1836 * Now remove ourselves from the group specific idle mask.
1837 */
1838 tdq->tdq_group->tdg_idlemask &= ~cpumask;
1839 }
1840#endif
1841 /*
1828#ifdef SMP
1829 cpuid = PCPU_GET(cpuid);
1830 /*
1831 * Pick the destination cpu and if it isn't ours transfer to the
1832 * target cpu.
1833 */
1834 if (THREAD_CAN_MIGRATE(td)) {
1835 if (td->td_priority <= PRI_MAX_ITHD) {

--- 25 unchanged lines hidden (view full) ---

1861 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1862 /*
1863 * Now remove ourselves from the group specific idle mask.
1864 */
1865 tdq->tdq_group->tdg_idlemask &= ~cpumask;
1866 }
1867#endif
1868 /*
1842 * Set the slice and pick the run queue.
1869 * Pick the run queue based on priority.
1843 */
1870 */
1844 if (ts->ts_slice == 0)
1845 ts->ts_slice = sched_slice;
1846 if (td->td_priority <= PRI_MAX_REALTIME)
1847 ts->ts_runq = &tdq->tdq_realtime;
1848 else if (td->td_priority <= PRI_MAX_TIMESHARE)
1849 ts->ts_runq = &tdq->tdq_timeshare;
1850 else
1851 ts->ts_runq = &tdq->tdq_idle;
1871 if (td->td_priority <= PRI_MAX_REALTIME)
1872 ts->ts_runq = &tdq->tdq_realtime;
1873 else if (td->td_priority <= PRI_MAX_TIMESHARE)
1874 ts->ts_runq = &tdq->tdq_timeshare;
1875 else
1876 ts->ts_runq = &tdq->tdq_idle;
1852 if (preemptive && maybe_preempt(td))
1877 if (preemptive && sched_preempt(td))
1853 return;
1878 return;
1854 ts->ts_state = TSS_ONRUNQ;
1855
1856 tdq_runq_add(tdq, ts, flags);
1857 tdq_load_add(tdq, ts);
1858#ifdef SMP
1859 if (ts->ts_cpu != cpuid) {
1860 tdq_notify(ts);
1861 return;
1862 }
1863#endif

--- 7 unchanged lines hidden (view full) ---

1871 struct tdq *tdq;
1872 struct td_sched *ts;
1873
1874 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1875 td, td->td_proc->p_comm, td->td_priority, curthread,
1876 curthread->td_proc->p_comm);
1877 mtx_assert(&sched_lock, MA_OWNED);
1878 ts = td->td_sched;
1879 tdq_runq_add(tdq, ts, flags);
1880 tdq_load_add(tdq, ts);
1881#ifdef SMP
1882 if (ts->ts_cpu != cpuid) {
1883 tdq_notify(ts);
1884 return;
1885 }
1886#endif

--- 7 unchanged lines hidden (view full) ---

1894 struct tdq *tdq;
1895 struct td_sched *ts;
1896
1897 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1898 td, td->td_proc->p_comm, td->td_priority, curthread,
1899 curthread->td_proc->p_comm);
1900 mtx_assert(&sched_lock, MA_OWNED);
1901 ts = td->td_sched;
1879 KASSERT((ts->ts_state == TSS_ONRUNQ),
1902 KASSERT(TD_ON_RUNQ(td),
1880 ("sched_rem: thread not on run queue"));
1881
1903 ("sched_rem: thread not on run queue"));
1904
1882 ts->ts_state = TSS_THREAD;
1883 tdq = TDQ_CPU(ts->ts_cpu);
1884 tdq_runq_rem(tdq, ts);
1885 tdq_load_rem(tdq, ts);
1905 tdq = TDQ_CPU(ts->ts_cpu);
1906 tdq_runq_rem(tdq, ts);
1907 tdq_load_rem(tdq, ts);
1908 TD_SET_CAN_RUN(td);
1886}
1887
1888fixpt_t
1889sched_pctcpu(struct thread *td)
1890{
1891 fixpt_t pctcpu;
1892 struct td_sched *ts;
1893

--- 27 unchanged lines hidden (view full) ---

1921 if (ts->ts_flags & TSF_BOUND)
1922 sched_unbind(td);
1923 ts->ts_flags |= TSF_BOUND;
1924#ifdef SMP
1925 sched_pin();
1926 if (PCPU_GET(cpuid) == cpu)
1927 return;
1928 ts->ts_cpu = cpu;
1909}
1910
1911fixpt_t
1912sched_pctcpu(struct thread *td)
1913{
1914 fixpt_t pctcpu;
1915 struct td_sched *ts;
1916

--- 27 unchanged lines hidden (view full) ---

1944 if (ts->ts_flags & TSF_BOUND)
1945 sched_unbind(td);
1946 ts->ts_flags |= TSF_BOUND;
1947#ifdef SMP
1948 sched_pin();
1949 if (PCPU_GET(cpuid) == cpu)
1950 return;
1951 ts->ts_cpu = cpu;
1929 ts->ts_state = TSS_THREAD;
1930 /* When we return from mi_switch we'll be on the correct cpu. */
1931 mi_switch(SW_VOL, NULL);
1932#endif
1933}
1934
1935void
1936sched_unbind(struct thread *td)
1937{

--- 52 unchanged lines hidden (view full) ---

1990sched_sizeof_thread(void)
1991{
1992 return (sizeof(struct thread) + sizeof(struct td_sched));
1993}
1994
1995void
1996sched_tick(void)
1997{
1952 /* When we return from mi_switch we'll be on the correct cpu. */
1953 mi_switch(SW_VOL, NULL);
1954#endif
1955}
1956
1957void
1958sched_unbind(struct thread *td)
1959{

--- 52 unchanged lines hidden (view full) ---

2012sched_sizeof_thread(void)
2013{
2014 return (sizeof(struct thread) + sizeof(struct td_sched));
2015}
2016
2017void
2018sched_tick(void)
2019{
2020 struct td_sched *ts;
2021
2022 ts = curthread->td_sched;
2023 /* Adjust ticks for pctcpu */
2024 ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2025 ts->ts_ltick = ticks;
2026 /*
2027 * Update if we've exceeded our desired tick threshhold by over one
2028 * second.
2029 */
2030 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2031 sched_pctcpu_update(ts);
1998}
1999
2032}
2033
2034/*
2035 * The actual idle process.
2036 */
2037void
2038sched_idletd(void *dummy)
2039{
2040 struct proc *p;
2041 struct thread *td;
2042
2043 td = curthread;
2044 p = td->td_proc;
2045 mtx_assert(&Giant, MA_NOTOWNED);
2046 /* ULE Relies on preemption for idle interruption. */
2047 for (;;)
2048 cpu_idle();
2049}
2050
2000static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2001SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2002 "Scheduler name");
2003SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2004SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2005SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2006SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2007#ifdef SMP

--- 23 unchanged lines hidden ---
2051static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2052SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2053 "Scheduler name");
2054SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2055SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2056SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2057SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2058#ifdef SMP

--- 23 unchanged lines hidden ---