Deleted Added
sdiff udiff text old ( 166156 ) new ( 166190 )
full compact
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 166156 2007-01-20 21:24:05Z jeff $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>

--- 18 unchanged lines hidden (view full) ---

55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
63/*
64 * TODO:
65 * Pick idle from affinity group or self group first.
66 * Implement pick_score.
67 */
68
69/*
70 * Thread scheduler specific section.
71 */
72struct td_sched {
73 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
74 int ts_flags; /* (j) TSF_* flags. */
75 struct thread *ts_thread; /* (*) Active associated thread. */
76 u_char ts_rqindex; /* (j) Run queue index. */
77 enum {
78 TSS_THREAD,
79 TSS_ONRUNQ
80 } ts_state; /* (j) thread sched specific status. */
81 int ts_slptime;
82 int ts_slice;
83 struct runq *ts_runq;
84 u_char ts_cpu; /* CPU that we have affinity for. */
85 /* The following variables are only used for pctcpu calculation */
86 int ts_ltick; /* Last tick that we were running on */
87 int ts_ftick; /* First tick that we were running on */
88 int ts_ticks; /* Tick count */

--- 158 unchanged lines hidden (view full) ---

247#define TDQ_GROUP(x) (&tdq_groups[(x)])
248#else /* !SMP */
249static struct tdq tdq_cpu;
250
251#define TDQ_SELF() (&tdq_cpu)
252#define TDQ_CPU(x) (&tdq_cpu)
253#endif
254
255static struct td_sched *sched_choose(void); /* XXX Should be thread * */
256static void sched_priority(struct thread *);
257static void sched_thread_priority(struct thread *, u_char);
258static int sched_interact_score(struct thread *);
259static void sched_interact_update(struct thread *);
260static void sched_interact_fork(struct thread *);
261static void sched_pctcpu_update(struct td_sched *);
262static inline void sched_pin_td(struct thread *td);
263static inline void sched_unpin_td(struct thread *td);

--- 962 unchanged lines hidden (view full) ---

1226 /*
1227 * Set up the scheduler specific parts of proc0.
1228 */
1229 proc0.p_sched = NULL; /* XXX */
1230 thread0.td_sched = &td_sched0;
1231 td_sched0.ts_ltick = ticks;
1232 td_sched0.ts_ftick = ticks;
1233 td_sched0.ts_thread = &thread0;
1234 td_sched0.ts_state = TSS_THREAD;
1235}
1236
1237/*
1238 * This is only somewhat accurate since given many processes of the same
1239 * priority they will switch when their slices run out, which will be
1240 * at most sched_slice stathz ticks.
1241 */
1242int

--- 184 unchanged lines hidden (view full) ---

1427 tdq_load_rem(tdq, ts);
1428 if (TD_IS_RUNNING(td)) {
1429 /*
1430 * Don't allow the thread to migrate
1431 * from a preemption.
1432 */
1433 if (preempt)
1434 sched_pin_td(td);
1435 setrunqueue(td, preempt ?
1436 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1437 SRQ_OURSELF|SRQ_YIELDING);
1438 if (preempt)
1439 sched_unpin_td(td);
1440 }
1441 }
1442 if (newtd != NULL) {
1443 /*

--- 62 unchanged lines hidden (view full) ---

1506 int hzticks;
1507
1508 hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1509 td->td_sched->skg_slptime += hzticks;
1510 sched_interact_update(td);
1511 sched_pctcpu_update(td->td_sched);
1512 sched_priority(td);
1513 }
1514 setrunqueue(td, SRQ_BORING);
1515}
1516
1517/*
1518 * Penalize the parent for creating a new child and initialize the child's
1519 * priority.
1520 */
1521void
1522sched_fork(struct thread *td, struct thread *child)

--- 49 unchanged lines hidden (view full) ---

1572 return;
1573
1574#ifdef SMP
1575 /*
1576 * On SMP if we're on the RUNQ we must adjust the transferable
1577 * count because could be changing to or from an interrupt
1578 * class.
1579 */
1580 if (td->td_sched->ts_state == TSS_ONRUNQ) {
1581 struct tdq *tdq;
1582
1583 tdq = TDQ_CPU(td->td_sched->ts_cpu);
1584 if (THREAD_CAN_MIGRATE(td)) {
1585 tdq->tdq_transferable--;
1586 tdq->tdq_group->tdg_transferable--;
1587 }
1588 td->td_pri_class = class;

--- 85 unchanged lines hidden (view full) ---

1674 * Advance the insert index once for each tick to ensure that all
1675 * threads get a chance to run.
1676 */
1677 if (tdq->tdq_idx == tdq->tdq_ridx) {
1678 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1679 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1680 tdq->tdq_ridx = tdq->tdq_idx;
1681 }
1682 /* Adjust ticks for pctcpu */
1683 ts = td->td_sched;
1684 ts->ts_ticks += tickincr;
1685 ts->ts_ltick = ticks;
1686 /*
1687 * Update if we've exceeded our desired tick threshhold by over one
1688 * second.
1689 */
1690 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
1691 sched_pctcpu_update(ts);
1692 /*
1693 * We only do slicing code for TIMESHARE threads.
1694 */
1695 if (td->td_pri_class != PRI_TIMESHARE)
1696 return;
1697 /*
1698 * We used a tick; charge it to the thread so that we can compute our
1699 * interactivity.
1700 */

--- 30 unchanged lines hidden (view full) ---

1731 } else
1732 if (tdq->tdq_load - 1 > 0)
1733 goto out;
1734 load = 0;
1735out:
1736 return (load);
1737}
1738
1739struct td_sched *
1740sched_choose(void)
1741{
1742 struct tdq *tdq;
1743 struct td_sched *ts;
1744
1745 mtx_assert(&sched_lock, MA_OWNED);
1746 tdq = TDQ_SELF();
1747#ifdef SMP
1748restart:
1749#endif
1750 ts = tdq_choose(tdq);
1751 if (ts) {
1752#ifdef SMP
1753 if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1754 if (tdq_idled(tdq) == 0)
1755 goto restart;
1756#endif
1757 tdq_runq_rem(tdq, ts);
1758 ts->ts_state = TSS_THREAD;
1759 return (ts);
1760 }
1761#ifdef SMP
1762 if (tdq_idled(tdq) == 0)
1763 goto restart;
1764#endif
1765 return (NULL);
1766}
1767
1768void
1769sched_add(struct thread *td, int flags)
1770{
1771 struct tdq *tdq;
1772 struct td_sched *ts;
1773 int preemptive;
1774 int class;
1775#ifdef SMP
1776 int cpuid;
1777 int cpumask;
1778#endif
1779
1780 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1781 td, td->td_proc->p_comm, td->td_priority, curthread,
1782 curthread->td_proc->p_comm);
1783 mtx_assert(&sched_lock, MA_OWNED);
1784 tdq = TDQ_SELF();
1785 ts = td->td_sched;
1786 class = PRI_BASE(td->td_pri_class);
1787 preemptive = !(flags & SRQ_YIELDING);
1788 KASSERT(ts->ts_state != TSS_ONRUNQ,
1789 ("sched_add: thread %p (%s) already in run queue", td,
1790 td->td_proc->p_comm));
1791 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1792 ("sched_add: process swapped out"));
1793 KASSERT(ts->ts_runq == NULL,
1794 ("sched_add: thread %p is still assigned to a run queue", td));
1795 /*
1796 * Recalculate the priority before we select the target cpu or
1797 * run-queue.
1798 */
1799 if (class == PRI_TIMESHARE)
1800 sched_priority(td);
1801#ifdef SMP
1802 cpuid = PCPU_GET(cpuid);
1803 /*
1804 * Pick the destination cpu and if it isn't ours transfer to the
1805 * target cpu.
1806 */
1807 if (THREAD_CAN_MIGRATE(td)) {
1808 if (td->td_priority <= PRI_MAX_ITHD) {

--- 25 unchanged lines hidden (view full) ---

1834 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1835 /*
1836 * Now remove ourselves from the group specific idle mask.
1837 */
1838 tdq->tdq_group->tdg_idlemask &= ~cpumask;
1839 }
1840#endif
1841 /*
1842 * Set the slice and pick the run queue.
1843 */
1844 if (ts->ts_slice == 0)
1845 ts->ts_slice = sched_slice;
1846 if (td->td_priority <= PRI_MAX_REALTIME)
1847 ts->ts_runq = &tdq->tdq_realtime;
1848 else if (td->td_priority <= PRI_MAX_TIMESHARE)
1849 ts->ts_runq = &tdq->tdq_timeshare;
1850 else
1851 ts->ts_runq = &tdq->tdq_idle;
1852 if (preemptive && maybe_preempt(td))
1853 return;
1854 ts->ts_state = TSS_ONRUNQ;
1855
1856 tdq_runq_add(tdq, ts, flags);
1857 tdq_load_add(tdq, ts);
1858#ifdef SMP
1859 if (ts->ts_cpu != cpuid) {
1860 tdq_notify(ts);
1861 return;
1862 }
1863#endif

--- 7 unchanged lines hidden (view full) ---

1871 struct tdq *tdq;
1872 struct td_sched *ts;
1873
1874 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1875 td, td->td_proc->p_comm, td->td_priority, curthread,
1876 curthread->td_proc->p_comm);
1877 mtx_assert(&sched_lock, MA_OWNED);
1878 ts = td->td_sched;
1879 KASSERT((ts->ts_state == TSS_ONRUNQ),
1880 ("sched_rem: thread not on run queue"));
1881
1882 ts->ts_state = TSS_THREAD;
1883 tdq = TDQ_CPU(ts->ts_cpu);
1884 tdq_runq_rem(tdq, ts);
1885 tdq_load_rem(tdq, ts);
1886}
1887
1888fixpt_t
1889sched_pctcpu(struct thread *td)
1890{
1891 fixpt_t pctcpu;
1892 struct td_sched *ts;
1893

--- 27 unchanged lines hidden (view full) ---

1921 if (ts->ts_flags & TSF_BOUND)
1922 sched_unbind(td);
1923 ts->ts_flags |= TSF_BOUND;
1924#ifdef SMP
1925 sched_pin();
1926 if (PCPU_GET(cpuid) == cpu)
1927 return;
1928 ts->ts_cpu = cpu;
1929 ts->ts_state = TSS_THREAD;
1930 /* When we return from mi_switch we'll be on the correct cpu. */
1931 mi_switch(SW_VOL, NULL);
1932#endif
1933}
1934
1935void
1936sched_unbind(struct thread *td)
1937{

--- 52 unchanged lines hidden (view full) ---

1990sched_sizeof_thread(void)
1991{
1992 return (sizeof(struct thread) + sizeof(struct td_sched));
1993}
1994
1995void
1996sched_tick(void)
1997{
1998}
1999
2000static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2001SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2002 "Scheduler name");
2003SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2004SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2005SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2006SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2007#ifdef SMP

--- 23 unchanged lines hidden ---