sched_4bsd.c revision 212541
1104964Sjeff/*-
2104964Sjeff * Copyright (c) 1982, 1986, 1990, 1991, 1993
3104964Sjeff *	The Regents of the University of California.  All rights reserved.
4104964Sjeff * (c) UNIX System Laboratories, Inc.
5104964Sjeff * All or some portions of this file are derived from material licensed
6104964Sjeff * to the University of California by American Telephone and Telegraph
7104964Sjeff * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8104964Sjeff * the permission of UNIX System Laboratories, Inc.
9104964Sjeff *
10104964Sjeff * Redistribution and use in source and binary forms, with or without
11104964Sjeff * modification, are permitted provided that the following conditions
12104964Sjeff * are met:
13104964Sjeff * 1. Redistributions of source code must retain the above copyright
14104964Sjeff *    notice, this list of conditions and the following disclaimer.
15104964Sjeff * 2. Redistributions in binary form must reproduce the above copyright
16104964Sjeff *    notice, this list of conditions and the following disclaimer in the
17104964Sjeff *    documentation and/or other materials provided with the distribution.
18104964Sjeff * 4. Neither the name of the University nor the names of its contributors
19104964Sjeff *    may be used to endorse or promote products derived from this software
20104964Sjeff *    without specific prior written permission.
21104964Sjeff *
22104964Sjeff * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23104964Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24104964Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25104964Sjeff * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26104964Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27104964Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28104964Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29104964Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30104964Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31104964Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32104964Sjeff * SUCH DAMAGE.
33104964Sjeff */
34104964Sjeff
35116182Sobrien#include <sys/cdefs.h>
36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 212541 2010-09-13 07:25:35Z mav $");
37116182Sobrien
38147565Speter#include "opt_hwpmc_hooks.h"
39177418Sjeff#include "opt_sched.h"
40179297Sjb#include "opt_kdtrace.h"
41147565Speter
42104964Sjeff#include <sys/param.h>
43104964Sjeff#include <sys/systm.h>
44176750Smarcel#include <sys/cpuset.h>
45104964Sjeff#include <sys/kernel.h>
46104964Sjeff#include <sys/ktr.h>
47104964Sjeff#include <sys/lock.h>
48123871Sjhb#include <sys/kthread.h>
49104964Sjeff#include <sys/mutex.h>
50104964Sjeff#include <sys/proc.h>
51104964Sjeff#include <sys/resourcevar.h>
52104964Sjeff#include <sys/sched.h>
53104964Sjeff#include <sys/smp.h>
54104964Sjeff#include <sys/sysctl.h>
55104964Sjeff#include <sys/sx.h>
56139453Sjhb#include <sys/turnstile.h>
57161599Sdavidxu#include <sys/umtx.h>
58160039Sobrien#include <machine/pcb.h>
59134689Sjulian#include <machine/smp.h>
60104964Sjeff
61145256Sjkoshy#ifdef HWPMC_HOOKS
62145256Sjkoshy#include <sys/pmckern.h>
63145256Sjkoshy#endif
64145256Sjkoshy
65179297Sjb#ifdef KDTRACE_HOOKS
66179297Sjb#include <sys/dtrace_bsd.h>
67179297Sjbint				dtrace_vtime_active;
68179297Sjbdtrace_vtime_switch_func_t	dtrace_vtime_switch_func;
69179297Sjb#endif
70179297Sjb
71107135Sjeff/*
72107135Sjeff * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
73107135Sjeff * the range 100-256 Hz (approximately).
74107135Sjeff */
75107135Sjeff#define	ESTCPULIM(e) \
76107135Sjeff    min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
77107135Sjeff    RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
78122355Sbde#ifdef SMP
79122355Sbde#define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
80122355Sbde#else
81107135Sjeff#define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
82122355Sbde#endif
83107135Sjeff#define	NICE_WEIGHT		1	/* Priorities per nice level. */
84107135Sjeff
85187679Sjeff#define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
86187357Sjeff
87134791Sjulian/*
88163709Sjb * The schedulable entity that runs a context.
89164936Sjulian * This is  an extension to the thread structure and is tailored to
90164936Sjulian * the requirements of this scheduler
91163709Sjb */
92164936Sjulianstruct td_sched {
93164936Sjulian	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
94164936Sjulian	int		ts_cpticks;	/* (j) Ticks of cpu time. */
95172264Sjeff	int		ts_slptime;	/* (j) Seconds !RUNNING. */
96180923Sjhb	int		ts_flags;
97164936Sjulian	struct runq	*ts_runq;	/* runq the thread is currently on */
98187357Sjeff#ifdef KTR
99187357Sjeff	char		ts_name[TS_NAME_LEN];
100187357Sjeff#endif
101109145Sjeff};
102109145Sjeff
103134791Sjulian/* flags kept in td_flags */
104164936Sjulian#define TDF_DIDRUN	TDF_SCHED0	/* thread actually ran. */
105177435Sjeff#define TDF_BOUND	TDF_SCHED1	/* Bound to one CPU. */
106134791Sjulian
107180923Sjhb/* flags kept in ts_flags */
108180923Sjhb#define	TSF_AFFINITY	0x0001		/* Has a non-"full" CPU set. */
109180923Sjhb
110164936Sjulian#define SKE_RUNQ_PCPU(ts)						\
111164936Sjulian    ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
112124955Sjeff
113180923Sjhb#define	THREAD_CAN_SCHED(td, cpu)	\
114180923Sjhb    CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
115180923Sjhb
116164936Sjulianstatic struct td_sched td_sched0;
117171488Sjeffstruct mtx sched_lock;
118134791Sjulian
119125288Sjeffstatic int	sched_tdcnt;	/* Total runnable threads in the system. */
120104964Sjeffstatic int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
121112535Smux#define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
122104964Sjeff
123124955Sjeffstatic void	setup_runqs(void);
124123871Sjhbstatic void	schedcpu(void);
125124955Sjeffstatic void	schedcpu_thread(void);
126139453Sjhbstatic void	sched_priority(struct thread *td, u_char prio);
127104964Sjeffstatic void	sched_setup(void *dummy);
128104964Sjeffstatic void	maybe_resched(struct thread *td);
129163709Sjbstatic void	updatepri(struct thread *td);
130163709Sjbstatic void	resetpriority(struct thread *td);
131163709Sjbstatic void	resetpriority_thread(struct thread *td);
132134694Sjulian#ifdef SMP
133180923Sjhbstatic int	sched_pickcpu(struct thread *td);
134180879Sjhbstatic int	forward_wakeup(int cpunum);
135180879Sjhbstatic void	kick_other_cpu(int pri, int cpuid);
136134694Sjulian#endif
137104964Sjeff
138124955Sjeffstatic struct kproc_desc sched_kp = {
139124955Sjeff        "schedcpu",
140124955Sjeff        schedcpu_thread,
141124955Sjeff        NULL
142124955Sjeff};
143177253SrwatsonSYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start,
144177253Srwatson    &sched_kp);
145177253SrwatsonSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
146104964Sjeff
147104964Sjeff/*
148104964Sjeff * Global run queue.
149104964Sjeff */
150104964Sjeffstatic struct runq runq;
151104964Sjeff
152124955Sjeff#ifdef SMP
153124955Sjeff/*
154124955Sjeff * Per-CPU run queues
155124955Sjeff */
156124955Sjeffstatic struct runq runq_pcpu[MAXCPU];
157180923Sjhblong runq_length[MAXCPU];
158124955Sjeff#endif
159124955Sjeff
160212455Smavstruct pcpuidlestat {
161212455Smav	u_int idlecalls;
162212455Smav	u_int oldidlecalls;
163212455Smav};
164212455Smavstatic DPCPU_DEFINE(struct pcpuidlestat, idlestat);
165212455Smav
166124955Sjeffstatic void
167124955Sjeffsetup_runqs(void)
168124955Sjeff{
169124955Sjeff#ifdef SMP
170124955Sjeff	int i;
171124955Sjeff
172124955Sjeff	for (i = 0; i < MAXCPU; ++i)
173124955Sjeff		runq_init(&runq_pcpu[i]);
174124955Sjeff#endif
175124955Sjeff
176124955Sjeff	runq_init(&runq);
177124955Sjeff}
178124955Sjeff
179104964Sjeffstatic int
180104964Sjeffsysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
181104964Sjeff{
182104964Sjeff	int error, new_val;
183104964Sjeff
184104964Sjeff	new_val = sched_quantum * tick;
185104964Sjeff	error = sysctl_handle_int(oidp, &new_val, 0, req);
186104964Sjeff        if (error != 0 || req->newptr == NULL)
187104964Sjeff		return (error);
188104964Sjeff	if (new_val < tick)
189104964Sjeff		return (EINVAL);
190104964Sjeff	sched_quantum = new_val / tick;
191104964Sjeff	hogticks = 2 * sched_quantum;
192104964Sjeff	return (0);
193104964Sjeff}
194104964Sjeff
195132589SscottlSYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
196130881Sscottl
197132589SscottlSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
198132589Sscottl    "Scheduler name");
199130881Sscottl
200132589SscottlSYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
201132589Sscottl    0, sizeof sched_quantum, sysctl_kern_quantum, "I",
202132589Sscottl    "Roundrobin scheduling quantum in microseconds");
203104964Sjeff
204134693Sjulian#ifdef SMP
205134688Sjulian/* Enable forwarding of wakeups to all other cpus */
206134688SjulianSYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
207134688Sjulian
208177419Sjeffstatic int runq_fuzz = 1;
209177419SjeffSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
210177419Sjeff
211134792Sjulianstatic int forward_wakeup_enabled = 1;
212134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
213134688Sjulian	   &forward_wakeup_enabled, 0,
214134688Sjulian	   "Forwarding of wakeup to idle CPUs");
215134688Sjulian
216134688Sjulianstatic int forward_wakeups_requested = 0;
217134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
218134688Sjulian	   &forward_wakeups_requested, 0,
219134688Sjulian	   "Requests for Forwarding of wakeup to idle CPUs");
220134688Sjulian
221134688Sjulianstatic int forward_wakeups_delivered = 0;
222134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
223134688Sjulian	   &forward_wakeups_delivered, 0,
224134688Sjulian	   "Completed Forwarding of wakeup to idle CPUs");
225134688Sjulian
226134792Sjulianstatic int forward_wakeup_use_mask = 1;
227134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
228134688Sjulian	   &forward_wakeup_use_mask, 0,
229134688Sjulian	   "Use the mask of idle cpus");
230134688Sjulian
231134688Sjulianstatic int forward_wakeup_use_loop = 0;
232134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
233134688Sjulian	   &forward_wakeup_use_loop, 0,
234134688Sjulian	   "Use a loop to find idle cpus");
235134688Sjulian
236134688Sjulianstatic int forward_wakeup_use_single = 0;
237134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
238134688Sjulian	   &forward_wakeup_use_single, 0,
239134688Sjulian	   "Only signal one idle cpu");
240134688Sjulian
241134688Sjulianstatic int forward_wakeup_use_htt = 0;
242134688SjulianSYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
243134688Sjulian	   &forward_wakeup_use_htt, 0,
244134688Sjulian	   "account for htt");
245135051Sjulian
246134693Sjulian#endif
247164936Sjulian#if 0
248135051Sjulianstatic int sched_followon = 0;
249135051SjulianSYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
250135051Sjulian	   &sched_followon, 0,
251135051Sjulian	   "allow threads to share a quantum");
252163709Sjb#endif
253135051Sjulian
254139317Sjeffstatic __inline void
255139317Sjeffsched_load_add(void)
256139317Sjeff{
257187357Sjeff
258139317Sjeff	sched_tdcnt++;
259187357Sjeff	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
260139317Sjeff}
261139317Sjeff
262139317Sjeffstatic __inline void
263139317Sjeffsched_load_rem(void)
264139317Sjeff{
265187357Sjeff
266139317Sjeff	sched_tdcnt--;
267187357Sjeff	KTR_COUNTER0(KTR_SCHED, "load", "global load", sched_tdcnt);
268139317Sjeff}
269104964Sjeff/*
270104964Sjeff * Arrange to reschedule if necessary, taking the priorities and
271104964Sjeff * schedulers into account.
272104964Sjeff */
273104964Sjeffstatic void
274104964Sjeffmaybe_resched(struct thread *td)
275104964Sjeff{
276104964Sjeff
277170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
278134791Sjulian	if (td->td_priority < curthread->td_priority)
279111032Sjulian		curthread->td_flags |= TDF_NEEDRESCHED;
280104964Sjeff}
281104964Sjeff
282104964Sjeff/*
283177419Sjeff * This function is called when a thread is about to be put on run queue
284177419Sjeff * because it has been made runnable or its priority has been adjusted.  It
285177419Sjeff * determines if the new thread should be immediately preempted to.  If so,
286177419Sjeff * it switches to it and eventually returns true.  If not, it returns false
287177419Sjeff * so that the caller may place the thread on an appropriate run queue.
288177419Sjeff */
289177419Sjeffint
290177419Sjeffmaybe_preempt(struct thread *td)
291177419Sjeff{
292177419Sjeff#ifdef PREEMPTION
293177419Sjeff	struct thread *ctd;
294177419Sjeff	int cpri, pri;
295177419Sjeff
296177419Sjeff	/*
297177419Sjeff	 * The new thread should not preempt the current thread if any of the
298177419Sjeff	 * following conditions are true:
299177419Sjeff	 *
300177419Sjeff	 *  - The kernel is in the throes of crashing (panicstr).
301177419Sjeff	 *  - The current thread has a higher (numerically lower) or
302177419Sjeff	 *    equivalent priority.  Note that this prevents curthread from
303177419Sjeff	 *    trying to preempt to itself.
304177419Sjeff	 *  - It is too early in the boot for context switches (cold is set).
305177419Sjeff	 *  - The current thread has an inhibitor set or is in the process of
306177419Sjeff	 *    exiting.  In this case, the current thread is about to switch
307177419Sjeff	 *    out anyways, so there's no point in preempting.  If we did,
308177419Sjeff	 *    the current thread would not be properly resumed as well, so
309177419Sjeff	 *    just avoid that whole landmine.
310177419Sjeff	 *  - If the new thread's priority is not a realtime priority and
311177419Sjeff	 *    the current thread's priority is not an idle priority and
312177419Sjeff	 *    FULL_PREEMPTION is disabled.
313177419Sjeff	 *
314177419Sjeff	 * If all of these conditions are false, but the current thread is in
315177419Sjeff	 * a nested critical section, then we have to defer the preemption
316177419Sjeff	 * until we exit the critical section.  Otherwise, switch immediately
317177419Sjeff	 * to the new thread.
318177419Sjeff	 */
319177419Sjeff	ctd = curthread;
320177419Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
321177419Sjeff	KASSERT((td->td_inhibitors == 0),
322177419Sjeff			("maybe_preempt: trying to run inhibited thread"));
323177419Sjeff	pri = td->td_priority;
324177419Sjeff	cpri = ctd->td_priority;
325177419Sjeff	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
326177419Sjeff	    TD_IS_INHIBITED(ctd))
327177419Sjeff		return (0);
328177419Sjeff#ifndef FULL_PREEMPTION
329177419Sjeff	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
330177419Sjeff		return (0);
331177419Sjeff#endif
332177419Sjeff
333177419Sjeff	if (ctd->td_critnest > 1) {
334177419Sjeff		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
335177419Sjeff		    ctd->td_critnest);
336177419Sjeff		ctd->td_owepreempt = 1;
337177419Sjeff		return (0);
338177419Sjeff	}
339177419Sjeff	/*
340177419Sjeff	 * Thread is runnable but not yet put on system run queue.
341177419Sjeff	 */
342177419Sjeff	MPASS(ctd->td_lock == td->td_lock);
343177419Sjeff	MPASS(TD_ON_RUNQ(td));
344177419Sjeff	TD_SET_RUNNING(td);
345177419Sjeff	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
346177419Sjeff	    td->td_proc->p_pid, td->td_name);
347178272Sjeff	mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, td);
348177419Sjeff	/*
349177419Sjeff	 * td's lock pointer may have changed.  We have to return with it
350177419Sjeff	 * locked.
351177419Sjeff	 */
352177419Sjeff	spinlock_enter();
353177419Sjeff	thread_unlock(ctd);
354177419Sjeff	thread_lock(td);
355177419Sjeff	spinlock_exit();
356177419Sjeff	return (1);
357177419Sjeff#else
358177419Sjeff	return (0);
359177419Sjeff#endif
360177419Sjeff}
361177419Sjeff
362177419Sjeff/*
363104964Sjeff * Constants for digital decay and forget:
364163709Sjb *	90% of (td_estcpu) usage in 5 * loadav time
365164936Sjulian *	95% of (ts_pctcpu) usage in 60 seconds (load insensitive)
366104964Sjeff *          Note that, as ps(1) mentions, this can let percentages
367104964Sjeff *          total over 100% (I've seen 137.9% for 3 processes).
368104964Sjeff *
369163709Sjb * Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
370104964Sjeff *
371163709Sjb * We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
372104964Sjeff * That is, the system wants to compute a value of decay such
373104964Sjeff * that the following for loop:
374104964Sjeff * 	for (i = 0; i < (5 * loadavg); i++)
375163709Sjb * 		td_estcpu *= decay;
376104964Sjeff * will compute
377163709Sjb * 	td_estcpu *= 0.1;
378104964Sjeff * for all values of loadavg:
379104964Sjeff *
380104964Sjeff * Mathematically this loop can be expressed by saying:
381104964Sjeff * 	decay ** (5 * loadavg) ~= .1
382104964Sjeff *
383104964Sjeff * The system computes decay as:
384104964Sjeff * 	decay = (2 * loadavg) / (2 * loadavg + 1)
385104964Sjeff *
386104964Sjeff * We wish to prove that the system's computation of decay
387104964Sjeff * will always fulfill the equation:
388104964Sjeff * 	decay ** (5 * loadavg) ~= .1
389104964Sjeff *
390104964Sjeff * If we compute b as:
391104964Sjeff * 	b = 2 * loadavg
392104964Sjeff * then
393104964Sjeff * 	decay = b / (b + 1)
394104964Sjeff *
395104964Sjeff * We now need to prove two things:
396104964Sjeff *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
397104964Sjeff *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
398104964Sjeff *
399104964Sjeff * Facts:
400104964Sjeff *         For x close to zero, exp(x) =~ 1 + x, since
401104964Sjeff *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
402104964Sjeff *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
403104964Sjeff *         For x close to zero, ln(1+x) =~ x, since
404104964Sjeff *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
405104964Sjeff *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
406104964Sjeff *         ln(.1) =~ -2.30
407104964Sjeff *
408104964Sjeff * Proof of (1):
409104964Sjeff *    Solve (factor)**(power) =~ .1 given power (5*loadav):
410104964Sjeff *	solving for factor,
411104964Sjeff *      ln(factor) =~ (-2.30/5*loadav), or
412104964Sjeff *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
413104964Sjeff *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
414104964Sjeff *
415104964Sjeff * Proof of (2):
416104964Sjeff *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
417104964Sjeff *	solving for power,
418104964Sjeff *      power*ln(b/(b+1)) =~ -2.30, or
419104964Sjeff *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
420104964Sjeff *
421104964Sjeff * Actual power values for the implemented algorithm are as follows:
422104964Sjeff *      loadav: 1       2       3       4
423104964Sjeff *      power:  5.68    10.32   14.94   19.55
424104964Sjeff */
425104964Sjeff
426104964Sjeff/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
427104964Sjeff#define	loadfactor(loadav)	(2 * (loadav))
428104964Sjeff#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
429104964Sjeff
430164936Sjulian/* decay 95% of `ts_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
431104964Sjeffstatic fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
432158082SjmgSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
433104964Sjeff
434104964Sjeff/*
435104964Sjeff * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
436104964Sjeff * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
437104964Sjeff * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
438104964Sjeff *
439104964Sjeff * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
440104964Sjeff *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
441104964Sjeff *
442104964Sjeff * If you don't want to bother with the faster/more-accurate formula, you
443104964Sjeff * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
444104964Sjeff * (more general) method of calculating the %age of CPU used by a process.
445104964Sjeff */
446104964Sjeff#define	CCPU_SHIFT	11
447104964Sjeff
448104964Sjeff/*
449104964Sjeff * Recompute process priorities, every hz ticks.
450104964Sjeff * MP-safe, called without the Giant mutex.
451104964Sjeff */
452104964Sjeff/* ARGSUSED */
453104964Sjeffstatic void
454123871Sjhbschedcpu(void)
455104964Sjeff{
456104964Sjeff	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
457104964Sjeff	struct thread *td;
458104964Sjeff	struct proc *p;
459164936Sjulian	struct td_sched *ts;
460118972Sjhb	int awake, realstathz;
461104964Sjeff
462104964Sjeff	realstathz = stathz ? stathz : hz;
463104964Sjeff	sx_slock(&allproc_lock);
464104964Sjeff	FOREACH_PROC_IN_SYSTEM(p) {
465177368Sjeff		PROC_LOCK(p);
466180879Sjhb		FOREACH_THREAD_IN_PROC(p, td) {
467104964Sjeff			awake = 0;
468170293Sjeff			thread_lock(td);
469164936Sjulian			ts = td->td_sched;
470163709Sjb			/*
471163709Sjb			 * Increment sleep time (if sleeping).  We
472163709Sjb			 * ignore overflow, as above.
473163709Sjb			 */
474163709Sjb			/*
475164936Sjulian			 * The td_sched slptimes are not touched in wakeup
476164936Sjulian			 * because the thread may not HAVE everything in
477164936Sjulian			 * memory? XXX I think this is out of date.
478163709Sjb			 */
479166188Sjeff			if (TD_ON_RUNQ(td)) {
480163709Sjb				awake = 1;
481177435Sjeff				td->td_flags &= ~TDF_DIDRUN;
482166188Sjeff			} else if (TD_IS_RUNNING(td)) {
483163709Sjb				awake = 1;
484177435Sjeff				/* Do not clear TDF_DIDRUN */
485177435Sjeff			} else if (td->td_flags & TDF_DIDRUN) {
486163709Sjb				awake = 1;
487177435Sjeff				td->td_flags &= ~TDF_DIDRUN;
488163709Sjb			}
489163709Sjb
490163709Sjb			/*
491164936Sjulian			 * ts_pctcpu is only for ps and ttyinfo().
492163709Sjb			 */
493164936Sjulian			ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
494163709Sjb			/*
495164936Sjulian			 * If the td_sched has been idle the entire second,
496163709Sjb			 * stop recalculating its priority until
497163709Sjb			 * it wakes up.
498163709Sjb			 */
499164936Sjulian			if (ts->ts_cpticks != 0) {
500163709Sjb#if	(FSHIFT >= CCPU_SHIFT)
501164936Sjulian				ts->ts_pctcpu += (realstathz == 100)
502164936Sjulian				    ? ((fixpt_t) ts->ts_cpticks) <<
503164936Sjulian				    (FSHIFT - CCPU_SHIFT) :
504164936Sjulian				    100 * (((fixpt_t) ts->ts_cpticks)
505164936Sjulian				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
506163709Sjb#else
507164936Sjulian				ts->ts_pctcpu += ((FSCALE - ccpu) *
508164936Sjulian				    (ts->ts_cpticks *
509164936Sjulian				    FSCALE / realstathz)) >> FSHIFT;
510163709Sjb#endif
511164936Sjulian				ts->ts_cpticks = 0;
512164267Sdavidxu			}
513180879Sjhb			/*
514163709Sjb			 * If there are ANY running threads in this process,
515104964Sjeff			 * then don't count it as sleeping.
516180879Sjhb			 * XXX: this is broken.
517104964Sjeff			 */
518104964Sjeff			if (awake) {
519172264Sjeff				if (ts->ts_slptime > 1) {
520104964Sjeff					/*
521104964Sjeff					 * In an ideal world, this should not
522104964Sjeff					 * happen, because whoever woke us
523104964Sjeff					 * up from the long sleep should have
524104964Sjeff					 * unwound the slptime and reset our
525104964Sjeff					 * priority before we run at the stale
526104964Sjeff					 * priority.  Should KASSERT at some
527104964Sjeff					 * point when all the cases are fixed.
528104964Sjeff					 */
529163709Sjb					updatepri(td);
530163709Sjb				}
531172264Sjeff				ts->ts_slptime = 0;
532163709Sjb			} else
533172264Sjeff				ts->ts_slptime++;
534172264Sjeff			if (ts->ts_slptime > 1) {
535170293Sjeff				thread_unlock(td);
536163709Sjb				continue;
537170293Sjeff			}
538163709Sjb			td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
539163709Sjb		      	resetpriority(td);
540163709Sjb			resetpriority_thread(td);
541170293Sjeff			thread_unlock(td);
542180879Sjhb		}
543177368Sjeff		PROC_UNLOCK(p);
544180879Sjhb	}
545104964Sjeff	sx_sunlock(&allproc_lock);
546104964Sjeff}
547104964Sjeff
548104964Sjeff/*
549123871Sjhb * Main loop for a kthread that executes schedcpu once a second.
550123871Sjhb */
551123871Sjhbstatic void
552124955Sjeffschedcpu_thread(void)
553123871Sjhb{
554123871Sjhb
555123871Sjhb	for (;;) {
556123871Sjhb		schedcpu();
557167086Sjhb		pause("-", hz);
558123871Sjhb	}
559123871Sjhb}
560123871Sjhb
561123871Sjhb/*
562104964Sjeff * Recalculate the priority of a process after it has slept for a while.
563163709Sjb * For all load averages >= 1 and max td_estcpu of 255, sleeping for at
564163709Sjb * least six times the loadfactor will decay td_estcpu to zero.
565104964Sjeff */
566104964Sjeffstatic void
567163709Sjbupdatepri(struct thread *td)
568104964Sjeff{
569172264Sjeff	struct td_sched *ts;
570172264Sjeff	fixpt_t loadfac;
571172264Sjeff	unsigned int newcpu;
572104964Sjeff
573172264Sjeff	ts = td->td_sched;
574118972Sjhb	loadfac = loadfactor(averunnable.ldavg[0]);
575172264Sjeff	if (ts->ts_slptime > 5 * loadfac)
576163709Sjb		td->td_estcpu = 0;
577104964Sjeff	else {
578163709Sjb		newcpu = td->td_estcpu;
579172264Sjeff		ts->ts_slptime--;	/* was incremented in schedcpu() */
580172264Sjeff		while (newcpu && --ts->ts_slptime)
581104964Sjeff			newcpu = decay_cpu(loadfac, newcpu);
582163709Sjb		td->td_estcpu = newcpu;
583104964Sjeff	}
584104964Sjeff}
585104964Sjeff
586104964Sjeff/*
587104964Sjeff * Compute the priority of a process when running in user mode.
588104964Sjeff * Arrange to reschedule if the resulting priority is better
589104964Sjeff * than that of the current process.
590104964Sjeff */
591104964Sjeffstatic void
592163709Sjbresetpriority(struct thread *td)
593104964Sjeff{
594104964Sjeff	register unsigned int newpriority;
595104964Sjeff
596163709Sjb	if (td->td_pri_class == PRI_TIMESHARE) {
597163709Sjb		newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
598163709Sjb		    NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
599104964Sjeff		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
600104964Sjeff		    PRI_MAX_TIMESHARE);
601163709Sjb		sched_user_prio(td, newpriority);
602104964Sjeff	}
603104964Sjeff}
604104964Sjeff
605139453Sjhb/*
606164936Sjulian * Update the thread's priority when the associated process's user
607139453Sjhb * priority changes.
608139453Sjhb */
609139453Sjhbstatic void
610163709Sjbresetpriority_thread(struct thread *td)
611139453Sjhb{
612139453Sjhb
613139453Sjhb	/* Only change threads with a time sharing user priority. */
614139453Sjhb	if (td->td_priority < PRI_MIN_TIMESHARE ||
615139453Sjhb	    td->td_priority > PRI_MAX_TIMESHARE)
616139453Sjhb		return;
617139453Sjhb
618139453Sjhb	/* XXX the whole needresched thing is broken, but not silly. */
619139453Sjhb	maybe_resched(td);
620139453Sjhb
621163709Sjb	sched_prio(td, td->td_user_pri);
622139453Sjhb}
623139453Sjhb
624104964Sjeff/* ARGSUSED */
625104964Sjeffstatic void
626104964Sjeffsched_setup(void *dummy)
627104964Sjeff{
628124955Sjeff	setup_runqs();
629118972Sjhb
630104964Sjeff	if (sched_quantum == 0)
631104964Sjeff		sched_quantum = SCHED_QUANTUM;
632104964Sjeff	hogticks = 2 * sched_quantum;
633104964Sjeff
634125288Sjeff	/* Account for thread0. */
635139317Sjeff	sched_load_add();
636104964Sjeff}
637104964Sjeff
638104964Sjeff/* External interfaces start here */
639180879Sjhb
640134791Sjulian/*
641134791Sjulian * Very early in the boot some setup of scheduler-specific
642145109Smaxim * parts of proc0 and of some scheduler resources needs to be done.
643134791Sjulian * Called from:
644134791Sjulian *  proc0_init()
645134791Sjulian */
646134791Sjulianvoid
647134791Sjulianschedinit(void)
648134791Sjulian{
649134791Sjulian	/*
650134791Sjulian	 * Set up the scheduler specific parts of proc0.
651134791Sjulian	 */
652134791Sjulian	proc0.p_sched = NULL; /* XXX */
653164936Sjulian	thread0.td_sched = &td_sched0;
654170293Sjeff	thread0.td_lock = &sched_lock;
655171488Sjeff	mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
656134791Sjulian}
657134791Sjulian
658104964Sjeffint
659104964Sjeffsched_runnable(void)
660104964Sjeff{
661124955Sjeff#ifdef SMP
662124955Sjeff	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
663124955Sjeff#else
664124955Sjeff	return runq_check(&runq);
665124955Sjeff#endif
666104964Sjeff}
667104964Sjeff
668180879Sjhbint
669104964Sjeffsched_rr_interval(void)
670104964Sjeff{
671104964Sjeff	if (sched_quantum == 0)
672104964Sjeff		sched_quantum = SCHED_QUANTUM;
673104964Sjeff	return (sched_quantum);
674104964Sjeff}
675104964Sjeff
676104964Sjeff/*
677104964Sjeff * We adjust the priority of the current process.  The priority of
678104964Sjeff * a process gets worse as it accumulates CPU time.  The cpu usage
679163709Sjb * estimator (td_estcpu) is increased here.  resetpriority() will
680163709Sjb * compute a different priority each time td_estcpu increases by
681104964Sjeff * INVERSE_ESTCPU_WEIGHT
682104964Sjeff * (until MAXPRI is reached).  The cpu usage estimator ramps up
683104964Sjeff * quite quickly when the process is running (linearly), and decays
684104964Sjeff * away exponentially, at a rate which is proportionally slower when
685104964Sjeff * the system is busy.  The basic principle is that the system will
686104964Sjeff * 90% forget that the process used a lot of CPU time in 5 * loadav
687104964Sjeff * seconds.  This causes the system to favor processes which haven't
688104964Sjeff * run much recently, and to round-robin among other processes.
689104964Sjeff */
690104964Sjeffvoid
691121127Sjeffsched_clock(struct thread *td)
692104964Sjeff{
693212455Smav	struct pcpuidlestat *stat;
694164936Sjulian	struct td_sched *ts;
695104964Sjeff
696170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
697164936Sjulian	ts = td->td_sched;
698113356Sjeff
699164936Sjulian	ts->ts_cpticks++;
700163709Sjb	td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
701163709Sjb	if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
702163709Sjb		resetpriority(td);
703163709Sjb		resetpriority_thread(td);
704104964Sjeff	}
705173081Sjhb
706173081Sjhb	/*
707173081Sjhb	 * Force a context switch if the current thread has used up a full
708173081Sjhb	 * quantum (default quantum is 100ms).
709173081Sjhb	 */
710173081Sjhb	if (!TD_IS_IDLETHREAD(td) &&
711173081Sjhb	    ticks - PCPU_GET(switchticks) >= sched_quantum)
712173081Sjhb		td->td_flags |= TDF_NEEDRESCHED;
713212455Smav
714212455Smav	stat = DPCPU_PTR(idlestat);
715212455Smav	stat->oldidlecalls = stat->idlecalls;
716212455Smav	stat->idlecalls = 0;
717104964Sjeff}
718118972Sjhb
719104964Sjeff/*
720180879Sjhb * Charge child's scheduling CPU usage to parent.
721104964Sjeff */
722104964Sjeffvoid
723132372Sjuliansched_exit(struct proc *p, struct thread *td)
724104964Sjeff{
725163709Sjb
726187357Sjeff	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
727187357Sjeff	    "prio:td", td->td_priority);
728187357Sjeff
729177368Sjeff	PROC_LOCK_ASSERT(p, MA_OWNED);
730164936Sjulian	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
731113356Sjeff}
732113356Sjeff
733113356Sjeffvoid
734164936Sjuliansched_exit_thread(struct thread *td, struct thread *child)
735113356Sjeff{
736113923Sjhb
737187357Sjeff	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "exit",
738187357Sjeff	    "prio:td", child->td_priority);
739170293Sjeff	thread_lock(td);
740164936Sjulian	td->td_estcpu = ESTCPULIM(td->td_estcpu + child->td_estcpu);
741170293Sjeff	thread_unlock(td);
742198854Sattilio	thread_lock(child);
743198854Sattilio	if ((child->td_flags & TDF_NOLOAD) == 0)
744139317Sjeff		sched_load_rem();
745198854Sattilio	thread_unlock(child);
746113356Sjeff}
747109145Sjeff
748113356Sjeffvoid
749134791Sjuliansched_fork(struct thread *td, struct thread *childtd)
750113356Sjeff{
751134791Sjulian	sched_fork_thread(td, childtd);
752113356Sjeff}
753113356Sjeff
754113356Sjeffvoid
755134791Sjuliansched_fork_thread(struct thread *td, struct thread *childtd)
756113356Sjeff{
757177426Sjeff	struct td_sched *ts;
758177426Sjeff
759164936Sjulian	childtd->td_estcpu = td->td_estcpu;
760170293Sjeff	childtd->td_lock = &sched_lock;
761176750Smarcel	childtd->td_cpuset = cpuset_ref(td->td_cpuset);
762177426Sjeff	ts = childtd->td_sched;
763177426Sjeff	bzero(ts, sizeof(*ts));
764180923Sjhb	ts->ts_flags |= (td->td_sched->ts_flags & TSF_AFFINITY);
765104964Sjeff}
766104964Sjeff
767104964Sjeffvoid
768130551Sjuliansched_nice(struct proc *p, int nice)
769104964Sjeff{
770139453Sjhb	struct thread *td;
771113873Sjhb
772130551Sjulian	PROC_LOCK_ASSERT(p, MA_OWNED);
773130551Sjulian	p->p_nice = nice;
774163709Sjb	FOREACH_THREAD_IN_PROC(p, td) {
775170293Sjeff		thread_lock(td);
776163709Sjb		resetpriority(td);
777163709Sjb		resetpriority_thread(td);
778170293Sjeff		thread_unlock(td);
779163709Sjb	}
780104964Sjeff}
781104964Sjeff
782113356Sjeffvoid
783163709Sjbsched_class(struct thread *td, int class)
784113356Sjeff{
785170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
786163709Sjb	td->td_pri_class = class;
787113356Sjeff}
788113356Sjeff
789105127Sjulian/*
790105127Sjulian * Adjust the priority of a thread.
791105127Sjulian */
792139453Sjhbstatic void
793139453Sjhbsched_priority(struct thread *td, u_char prio)
794104964Sjeff{
795104964Sjeff
796187357Sjeff
797187357Sjeff	KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
798187357Sjeff	    "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
799187357Sjeff	    sched_tdname(curthread));
800187357Sjeff	if (td != curthread && prio > td->td_priority) {
801187357Sjeff		KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
802187357Sjeff		    "lend prio", "prio:%d", td->td_priority, "new prio:%d",
803187357Sjeff		    prio, KTR_ATTR_LINKED, sched_tdname(td));
804187357Sjeff	}
805170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
806139453Sjhb	if (td->td_priority == prio)
807139453Sjhb		return;
808166188Sjeff	td->td_priority = prio;
809177435Sjeff	if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
810166188Sjeff		sched_rem(td);
811166188Sjeff		sched_add(td, SRQ_BORING);
812104964Sjeff	}
813104964Sjeff}
814104964Sjeff
815139453Sjhb/*
816139453Sjhb * Update a thread's priority when it is lent another thread's
817139453Sjhb * priority.
818139453Sjhb */
819104964Sjeffvoid
820139453Sjhbsched_lend_prio(struct thread *td, u_char prio)
821139453Sjhb{
822139453Sjhb
823139453Sjhb	td->td_flags |= TDF_BORROWING;
824139453Sjhb	sched_priority(td, prio);
825139453Sjhb}
826139453Sjhb
827139453Sjhb/*
828139453Sjhb * Restore a thread's priority when priority propagation is
829139453Sjhb * over.  The prio argument is the minimum priority the thread
830139453Sjhb * needs to have to satisfy other possible priority lending
831139453Sjhb * requests.  If the thread's regulary priority is less
832139453Sjhb * important than prio the thread will keep a priority boost
833139453Sjhb * of prio.
834139453Sjhb */
835139453Sjhbvoid
836139453Sjhbsched_unlend_prio(struct thread *td, u_char prio)
837139453Sjhb{
838139453Sjhb	u_char base_pri;
839139453Sjhb
840139453Sjhb	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
841139453Sjhb	    td->td_base_pri <= PRI_MAX_TIMESHARE)
842163709Sjb		base_pri = td->td_user_pri;
843139453Sjhb	else
844139453Sjhb		base_pri = td->td_base_pri;
845139453Sjhb	if (prio >= base_pri) {
846139453Sjhb		td->td_flags &= ~TDF_BORROWING;
847139453Sjhb		sched_prio(td, base_pri);
848139453Sjhb	} else
849139453Sjhb		sched_lend_prio(td, prio);
850139453Sjhb}
851139453Sjhb
852139453Sjhbvoid
853139453Sjhbsched_prio(struct thread *td, u_char prio)
854139453Sjhb{
855139453Sjhb	u_char oldprio;
856139453Sjhb
857139453Sjhb	/* First, update the base priority. */
858139453Sjhb	td->td_base_pri = prio;
859139453Sjhb
860139453Sjhb	/*
861139453Sjhb	 * If the thread is borrowing another thread's priority, don't ever
862139453Sjhb	 * lower the priority.
863139453Sjhb	 */
864139453Sjhb	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
865139453Sjhb		return;
866139453Sjhb
867139453Sjhb	/* Change the real priority. */
868139453Sjhb	oldprio = td->td_priority;
869139453Sjhb	sched_priority(td, prio);
870139453Sjhb
871139453Sjhb	/*
872139453Sjhb	 * If the thread is on a turnstile, then let the turnstile update
873139453Sjhb	 * its state.
874139453Sjhb	 */
875139453Sjhb	if (TD_ON_LOCK(td) && oldprio != prio)
876139453Sjhb		turnstile_adjust(td, oldprio);
877139453Sjhb}
878139453Sjhb
879139453Sjhbvoid
880163709Sjbsched_user_prio(struct thread *td, u_char prio)
881161599Sdavidxu{
882161599Sdavidxu	u_char oldprio;
883161599Sdavidxu
884174536Sdavidxu	THREAD_LOCK_ASSERT(td, MA_OWNED);
885163709Sjb	td->td_base_user_pri = prio;
886164177Sdavidxu	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
887164177Sdavidxu		return;
888163709Sjb	oldprio = td->td_user_pri;
889163709Sjb	td->td_user_pri = prio;
890161599Sdavidxu}
891161599Sdavidxu
892161599Sdavidxuvoid
893161599Sdavidxusched_lend_user_prio(struct thread *td, u_char prio)
894161599Sdavidxu{
895161599Sdavidxu	u_char oldprio;
896161599Sdavidxu
897174536Sdavidxu	THREAD_LOCK_ASSERT(td, MA_OWNED);
898161599Sdavidxu	td->td_flags |= TDF_UBORROWING;
899163709Sjb	oldprio = td->td_user_pri;
900163709Sjb	td->td_user_pri = prio;
901161599Sdavidxu}
902161599Sdavidxu
903161599Sdavidxuvoid
904161599Sdavidxusched_unlend_user_prio(struct thread *td, u_char prio)
905161599Sdavidxu{
906161599Sdavidxu	u_char base_pri;
907161599Sdavidxu
908174536Sdavidxu	THREAD_LOCK_ASSERT(td, MA_OWNED);
909163709Sjb	base_pri = td->td_base_user_pri;
910161599Sdavidxu	if (prio >= base_pri) {
911161599Sdavidxu		td->td_flags &= ~TDF_UBORROWING;
912163709Sjb		sched_user_prio(td, base_pri);
913174536Sdavidxu	} else {
914161599Sdavidxu		sched_lend_user_prio(td, prio);
915174536Sdavidxu	}
916161599Sdavidxu}
917161599Sdavidxu
918161599Sdavidxuvoid
919177085Sjeffsched_sleep(struct thread *td, int pri)
920104964Sjeff{
921113923Sjhb
922170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
923172264Sjeff	td->td_slptick = ticks;
924172264Sjeff	td->td_sched->ts_slptime = 0;
925177085Sjeff	if (pri)
926177085Sjeff		sched_prio(td, pri);
927201347Skib	if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
928177085Sjeff		td->td_flags |= TDF_CANSWAP;
929104964Sjeff}
930104964Sjeff
931104964Sjeffvoid
932135051Sjuliansched_switch(struct thread *td, struct thread *newtd, int flags)
933104964Sjeff{
934202889Sattilio	struct mtx *tmtx;
935164936Sjulian	struct td_sched *ts;
936104964Sjeff	struct proc *p;
937104964Sjeff
938202889Sattilio	tmtx = NULL;
939164936Sjulian	ts = td->td_sched;
940104964Sjeff	p = td->td_proc;
941104964Sjeff
942170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
943180879Sjhb
944180879Sjhb	/*
945170293Sjeff	 * Switch to the sched lock to fix things up and pick
946170293Sjeff	 * a new thread.
947202889Sattilio	 * Block the td_lock in order to avoid breaking the critical path.
948170293Sjeff	 */
949170293Sjeff	if (td->td_lock != &sched_lock) {
950170293Sjeff		mtx_lock_spin(&sched_lock);
951202889Sattilio		tmtx = thread_lock_block(td);
952170293Sjeff	}
953104964Sjeff
954198854Sattilio	if ((td->td_flags & TDF_NOLOAD) == 0)
955139317Sjeff		sched_load_rem();
956135051Sjulian
957202940Sattilio	if (newtd) {
958202940Sattilio		MPASS(newtd->td_lock == &sched_lock);
959138527Sups		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
960202940Sattilio	}
961138527Sups
962113339Sjulian	td->td_lastcpu = td->td_oncpu;
963132266Sjhb	td->td_flags &= ~TDF_NEEDRESCHED;
964144777Sups	td->td_owepreempt = 0;
965113339Sjulian	td->td_oncpu = NOCPU;
966180879Sjhb
967104964Sjeff	/*
968104964Sjeff	 * At the last moment, if this thread is still marked RUNNING,
969104964Sjeff	 * then put it back on the run queue as it has not been suspended
970131473Sjhb	 * or stopped or any thing else similar.  We never put the idle
971131473Sjhb	 * threads on the run queue, however.
972104964Sjeff	 */
973166415Sjulian	if (td->td_flags & TDF_IDLETD) {
974131473Sjhb		TD_SET_CAN_RUN(td);
975166415Sjulian#ifdef SMP
976166415Sjulian		idle_cpus_mask &= ~PCPU_GET(cpumask);
977166415Sjulian#endif
978166415Sjulian	} else {
979134791Sjulian		if (TD_IS_RUNNING(td)) {
980164936Sjulian			/* Put us back on the run queue. */
981166188Sjeff			sched_add(td, (flags & SW_PREEMPT) ?
982136170Sjulian			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
983136170Sjulian			    SRQ_OURSELF|SRQ_YIELDING);
984134791Sjulian		}
985104964Sjeff	}
986136170Sjulian	if (newtd) {
987180879Sjhb		/*
988136170Sjulian		 * The thread we are about to run needs to be counted
989136170Sjulian		 * as if it had been added to the run queue and selected.
990136170Sjulian		 * It came from:
991136170Sjulian		 * * A preemption
992180879Sjhb		 * * An upcall
993136170Sjulian		 * * A followon
994136170Sjulian		 */
995136170Sjulian		KASSERT((newtd->td_inhibitors == 0),
996165693Srwatson			("trying to run inhibited thread"));
997177435Sjeff		newtd->td_flags |= TDF_DIDRUN;
998136170Sjulian        	TD_SET_RUNNING(newtd);
999198854Sattilio		if ((newtd->td_flags & TDF_NOLOAD) == 0)
1000139317Sjeff			sched_load_add();
1001136170Sjulian	} else {
1002131473Sjhb		newtd = choosethread();
1003202940Sattilio		MPASS(newtd->td_lock == &sched_lock);
1004136170Sjulian	}
1005136170Sjulian
1006145256Sjkoshy	if (td != newtd) {
1007145256Sjkoshy#ifdef	HWPMC_HOOKS
1008145256Sjkoshy		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1009145256Sjkoshy			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1010145256Sjkoshy#endif
1011166415Sjulian                /* I feel sleepy */
1012174629Sjeff		lock_profile_release_lock(&sched_lock.lock_object);
1013179297Sjb#ifdef KDTRACE_HOOKS
1014179297Sjb		/*
1015179297Sjb		 * If DTrace has set the active vtime enum to anything
1016179297Sjb		 * other than INACTIVE (0), then it should have set the
1017179297Sjb		 * function to call.
1018179297Sjb		 */
1019179297Sjb		if (dtrace_vtime_active)
1020179297Sjb			(*dtrace_vtime_switch_func)(newtd);
1021179297Sjb#endif
1022179297Sjb
1023202889Sattilio		cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
1024174629Sjeff		lock_profile_obtain_lock_success(&sched_lock.lock_object,
1025174629Sjeff		    0, 0, __FILE__, __LINE__);
1026166415Sjulian		/*
1027166415Sjulian		 * Where am I?  What year is it?
1028166415Sjulian		 * We are in the same thread that went to sleep above,
1029180879Sjhb		 * but any amount of time may have passed. All our context
1030166415Sjulian		 * will still be available as will local variables.
1031166415Sjulian		 * PCPU values however may have changed as we may have
1032166415Sjulian		 * changed CPU so don't trust cached values of them.
1033166415Sjulian		 * New threads will go to fork_exit() instead of here
1034166415Sjulian		 * so if you change things here you may need to change
1035166415Sjulian		 * things there too.
1036180879Sjhb		 *
1037166415Sjulian		 * If the thread above was exiting it will never wake
1038166415Sjulian		 * up again here, so either it has saved everything it
1039166415Sjulian		 * needed to, or the thread_wait() or wait() will
1040166415Sjulian		 * need to reap it.
1041166415Sjulian		 */
1042145256Sjkoshy#ifdef	HWPMC_HOOKS
1043145256Sjkoshy		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1044145256Sjkoshy			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1045145256Sjkoshy#endif
1046145256Sjkoshy	}
1047145256Sjkoshy
1048166415Sjulian#ifdef SMP
1049166415Sjulian	if (td->td_flags & TDF_IDLETD)
1050166415Sjulian		idle_cpus_mask |= PCPU_GET(cpumask);
1051166415Sjulian#endif
1052121128Sjeff	sched_lock.mtx_lock = (uintptr_t)td;
1053121128Sjeff	td->td_oncpu = PCPU_GET(cpuid);
1054170293Sjeff	MPASS(td->td_lock == &sched_lock);
1055104964Sjeff}
1056104964Sjeff
1057104964Sjeffvoid
1058104964Sjeffsched_wakeup(struct thread *td)
1059104964Sjeff{
1060172264Sjeff	struct td_sched *ts;
1061172264Sjeff
1062170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
1063172264Sjeff	ts = td->td_sched;
1064177085Sjeff	td->td_flags &= ~TDF_CANSWAP;
1065172264Sjeff	if (ts->ts_slptime > 1) {
1066163709Sjb		updatepri(td);
1067163709Sjb		resetpriority(td);
1068163709Sjb	}
1069201790Sattilio	td->td_slptick = 0;
1070172264Sjeff	ts->ts_slptime = 0;
1071166188Sjeff	sched_add(td, SRQ_BORING);
1072104964Sjeff}
1073104964Sjeff
1074134693Sjulian#ifdef SMP
1075134688Sjulianstatic int
1076180879Sjhbforward_wakeup(int cpunum)
1077134688Sjulian{
1078134688Sjulian	struct pcpu *pc;
1079180879Sjhb	cpumask_t dontuse, id, map, map2, map3, me;
1080134688Sjulian
1081134688Sjulian	mtx_assert(&sched_lock, MA_OWNED);
1082134688Sjulian
1083134791Sjulian	CTR0(KTR_RUNQ, "forward_wakeup()");
1084134688Sjulian
1085134688Sjulian	if ((!forward_wakeup_enabled) ||
1086134688Sjulian	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1087134688Sjulian		return (0);
1088134688Sjulian	if (!smp_started || cold || panicstr)
1089134688Sjulian		return (0);
1090134688Sjulian
1091134688Sjulian	forward_wakeups_requested++;
1092134688Sjulian
1093180879Sjhb	/*
1094180879Sjhb	 * Check the idle mask we received against what we calculated
1095180879Sjhb	 * before in the old version.
1096180879Sjhb	 */
1097134688Sjulian	me = PCPU_GET(cpumask);
1098180879Sjhb
1099180879Sjhb	/* Don't bother if we should be doing it ourself. */
1100134688Sjulian	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1101134688Sjulian		return (0);
1102134688Sjulian
1103134688Sjulian	dontuse = me | stopped_cpus | hlt_cpus_mask;
1104134688Sjulian	map3 = 0;
1105134688Sjulian	if (forward_wakeup_use_loop) {
1106134688Sjulian		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1107134688Sjulian			id = pc->pc_cpumask;
1108180879Sjhb			if ((id & dontuse) == 0 &&
1109134688Sjulian			    pc->pc_curthread == pc->pc_idlethread) {
1110134688Sjulian				map3 |= id;
1111134688Sjulian			}
1112134688Sjulian		}
1113134688Sjulian	}
1114134688Sjulian
1115134688Sjulian	if (forward_wakeup_use_mask) {
1116134688Sjulian		map = 0;
1117134688Sjulian		map = idle_cpus_mask & ~dontuse;
1118134688Sjulian
1119180879Sjhb		/* If they are both on, compare and use loop if different. */
1120134688Sjulian		if (forward_wakeup_use_loop) {
1121134688Sjulian			if (map != map3) {
1122180879Sjhb				printf("map (%02X) != map3 (%02X)\n", map,
1123180879Sjhb				    map3);
1124134688Sjulian				map = map3;
1125134688Sjulian			}
1126134688Sjulian		}
1127134688Sjulian	} else {
1128134688Sjulian		map = map3;
1129134688Sjulian	}
1130180879Sjhb
1131180879Sjhb	/* If we only allow a specific CPU, then mask off all the others. */
1132134688Sjulian	if (cpunum != NOCPU) {
1133134688Sjulian		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1134134688Sjulian		map &= (1 << cpunum);
1135134688Sjulian	} else {
1136134688Sjulian		/* Try choose an idle die. */
1137134688Sjulian		if (forward_wakeup_use_htt) {
1138134688Sjulian			map2 =  (map & (map >> 1)) & 0x5555;
1139134688Sjulian			if (map2) {
1140134688Sjulian				map = map2;
1141134688Sjulian			}
1142134688Sjulian		}
1143134688Sjulian
1144180879Sjhb		/* Set only one bit. */
1145134688Sjulian		if (forward_wakeup_use_single) {
1146134688Sjulian			map = map & ((~map) + 1);
1147134688Sjulian		}
1148134688Sjulian	}
1149134688Sjulian	if (map) {
1150134688Sjulian		forward_wakeups_delivered++;
1151212455Smav		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1152212455Smav			id = pc->pc_cpumask;
1153212455Smav			if ((map & id) == 0)
1154212455Smav				continue;
1155212455Smav			if (cpu_idle_wakeup(pc->pc_cpuid))
1156212455Smav				map &= ~id;
1157212455Smav		}
1158212455Smav		if (map)
1159212455Smav			ipi_selected(map, IPI_AST);
1160134688Sjulian		return (1);
1161134688Sjulian	}
1162134688Sjulian	if (cpunum == NOCPU)
1163134688Sjulian		printf("forward_wakeup: Idle processor not found\n");
1164134688Sjulian	return (0);
1165134688Sjulian}
1166134688Sjulian
1167147182Supsstatic void
1168180879Sjhbkick_other_cpu(int pri, int cpuid)
1169180879Sjhb{
1170180879Sjhb	struct pcpu *pcpu;
1171180879Sjhb	int cpri;
1172147182Sups
1173180879Sjhb	pcpu = pcpu_find(cpuid);
1174147182Sups	if (idle_cpus_mask & pcpu->pc_cpumask) {
1175147182Sups		forward_wakeups_delivered++;
1176212455Smav		if (!cpu_idle_wakeup(cpuid))
1177212455Smav			ipi_cpu(cpuid, IPI_AST);
1178147182Sups		return;
1179147182Sups	}
1180147182Sups
1181180879Sjhb	cpri = pcpu->pc_curthread->td_priority;
1182147182Sups	if (pri >= cpri)
1183147182Sups		return;
1184147182Sups
1185147182Sups#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
1186147182Sups#if !defined(FULL_PREEMPTION)
1187147182Sups	if (pri <= PRI_MAX_ITHD)
1188147182Sups#endif /* ! FULL_PREEMPTION */
1189147182Sups	{
1190210939Sjhb		ipi_cpu(cpuid, IPI_PREEMPT);
1191147182Sups		return;
1192147182Sups	}
1193147182Sups#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
1194147182Sups
1195147182Sups	pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
1196210939Sjhb	ipi_cpu(cpuid, IPI_AST);
1197147182Sups	return;
1198147182Sups}
1199147182Sups#endif /* SMP */
1200147182Sups
1201180923Sjhb#ifdef SMP
1202180923Sjhbstatic int
1203180923Sjhbsched_pickcpu(struct thread *td)
1204180923Sjhb{
1205180923Sjhb	int best, cpu;
1206180923Sjhb
1207180923Sjhb	mtx_assert(&sched_lock, MA_OWNED);
1208180923Sjhb
1209180937Sjhb	if (THREAD_CAN_SCHED(td, td->td_lastcpu))
1210180937Sjhb		best = td->td_lastcpu;
1211180937Sjhb	else
1212180937Sjhb		best = NOCPU;
1213209059Sjhb	CPU_FOREACH(cpu) {
1214180923Sjhb		if (!THREAD_CAN_SCHED(td, cpu))
1215180923Sjhb			continue;
1216180923Sjhb
1217180923Sjhb		if (best == NOCPU)
1218180923Sjhb			best = cpu;
1219180923Sjhb		else if (runq_length[cpu] < runq_length[best])
1220180923Sjhb			best = cpu;
1221180923Sjhb	}
1222180923Sjhb	KASSERT(best != NOCPU, ("no valid CPUs"));
1223180923Sjhb
1224180923Sjhb	return (best);
1225180923Sjhb}
1226180923Sjhb#endif
1227180923Sjhb
1228104964Sjeffvoid
1229134586Sjuliansched_add(struct thread *td, int flags)
1230147182Sups#ifdef SMP
1231104964Sjeff{
1232164936Sjulian	struct td_sched *ts;
1233134591Sjulian	int forwarded = 0;
1234134591Sjulian	int cpu;
1235147182Sups	int single_cpu = 0;
1236121127Sjeff
1237164936Sjulian	ts = td->td_sched;
1238170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
1239166188Sjeff	KASSERT((td->td_inhibitors == 0),
1240166188Sjeff	    ("sched_add: trying to run inhibited thread"));
1241166188Sjeff	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1242166188Sjeff	    ("sched_add: bad thread state"));
1243172207Sjeff	KASSERT(td->td_flags & TDF_INMEM,
1244172207Sjeff	    ("sched_add: thread swapped out"));
1245180879Sjhb
1246187357Sjeff	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1247187357Sjeff	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1248187357Sjeff	    sched_tdname(curthread));
1249187357Sjeff	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1250187357Sjeff	    KTR_ATTR_LINKED, sched_tdname(td));
1251187357Sjeff
1252187357Sjeff
1253170293Sjeff	/*
1254170293Sjeff	 * Now that the thread is moving to the run-queue, set the lock
1255170293Sjeff	 * to the scheduler's lock.
1256170293Sjeff	 */
1257170293Sjeff	if (td->td_lock != &sched_lock) {
1258170293Sjeff		mtx_lock_spin(&sched_lock);
1259170293Sjeff		thread_lock_set(td, &sched_lock);
1260170293Sjeff	}
1261166188Sjeff	TD_SET_RUNQ(td);
1262131481Sjhb
1263147182Sups	if (td->td_pinned != 0) {
1264147182Sups		cpu = td->td_lastcpu;
1265164936Sjulian		ts->ts_runq = &runq_pcpu[cpu];
1266147182Sups		single_cpu = 1;
1267147182Sups		CTR3(KTR_RUNQ,
1268180879Sjhb		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1269180879Sjhb		    cpu);
1270180879Sjhb	} else if (td->td_flags & TDF_BOUND) {
1271180879Sjhb		/* Find CPU from bound runq. */
1272180879Sjhb		KASSERT(SKE_RUNQ_PCPU(ts),
1273180879Sjhb		    ("sched_add: bound td_sched not on cpu runq"));
1274164936Sjulian		cpu = ts->ts_runq - &runq_pcpu[0];
1275147182Sups		single_cpu = 1;
1276147182Sups		CTR3(KTR_RUNQ,
1277180879Sjhb		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1278180879Sjhb		    cpu);
1279180923Sjhb	} else if (ts->ts_flags & TSF_AFFINITY) {
1280180923Sjhb		/* Find a valid CPU for our cpuset */
1281180923Sjhb		cpu = sched_pickcpu(td);
1282180923Sjhb		ts->ts_runq = &runq_pcpu[cpu];
1283180923Sjhb		single_cpu = 1;
1284180923Sjhb		CTR3(KTR_RUNQ,
1285180923Sjhb		    "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1286180923Sjhb		    cpu);
1287180879Sjhb	} else {
1288134591Sjulian		CTR2(KTR_RUNQ,
1289180879Sjhb		    "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1290180879Sjhb		    td);
1291134591Sjulian		cpu = NOCPU;
1292164936Sjulian		ts->ts_runq = &runq;
1293147182Sups	}
1294180879Sjhb
1295147190Sups	if (single_cpu && (cpu != PCPU_GET(cpuid))) {
1296180879Sjhb	        kick_other_cpu(td->td_priority, cpu);
1297124955Sjeff	} else {
1298147190Sups		if (!single_cpu) {
1299147182Sups			cpumask_t me = PCPU_GET(cpumask);
1300180879Sjhb			cpumask_t idle = idle_cpus_mask & me;
1301147182Sups
1302147190Sups			if (!idle && ((flags & SRQ_INTR) == 0) &&
1303147190Sups			    (idle_cpus_mask & ~(hlt_cpus_mask | me)))
1304147182Sups				forwarded = forward_wakeup(cpu);
1305147182Sups		}
1306147182Sups
1307147182Sups		if (!forwarded) {
1308147190Sups			if ((flags & SRQ_YIELDING) == 0 && maybe_preempt(td))
1309147182Sups				return;
1310147182Sups			else
1311147182Sups				maybe_resched(td);
1312147182Sups		}
1313124955Sjeff	}
1314180879Sjhb
1315198854Sattilio	if ((td->td_flags & TDF_NOLOAD) == 0)
1316147182Sups		sched_load_add();
1317177435Sjeff	runq_add(ts->ts_runq, td, flags);
1318180923Sjhb	if (cpu != NOCPU)
1319180923Sjhb		runq_length[cpu]++;
1320147182Sups}
1321147182Sups#else /* SMP */
1322147182Sups{
1323164936Sjulian	struct td_sched *ts;
1324180923Sjhb
1325164936Sjulian	ts = td->td_sched;
1326170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
1327166188Sjeff	KASSERT((td->td_inhibitors == 0),
1328166188Sjeff	    ("sched_add: trying to run inhibited thread"));
1329166188Sjeff	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1330166188Sjeff	    ("sched_add: bad thread state"));
1331172207Sjeff	KASSERT(td->td_flags & TDF_INMEM,
1332172207Sjeff	    ("sched_add: thread swapped out"));
1333187357Sjeff	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1334187357Sjeff	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1335187357Sjeff	    sched_tdname(curthread));
1336187357Sjeff	KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
1337187357Sjeff	    KTR_ATTR_LINKED, sched_tdname(td));
1338180879Sjhb
1339170293Sjeff	/*
1340170293Sjeff	 * Now that the thread is moving to the run-queue, set the lock
1341170293Sjeff	 * to the scheduler's lock.
1342170293Sjeff	 */
1343170293Sjeff	if (td->td_lock != &sched_lock) {
1344170293Sjeff		mtx_lock_spin(&sched_lock);
1345170293Sjeff		thread_lock_set(td, &sched_lock);
1346170293Sjeff	}
1347166188Sjeff	TD_SET_RUNQ(td);
1348164936Sjulian	CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1349164936Sjulian	ts->ts_runq = &runq;
1350134591Sjulian
1351180879Sjhb	/*
1352180879Sjhb	 * If we are yielding (on the way out anyhow) or the thread
1353180879Sjhb	 * being saved is US, then don't try be smart about preemption
1354180879Sjhb	 * or kicking off another CPU as it won't help and may hinder.
1355180879Sjhb	 * In the YIEDLING case, we are about to run whoever is being
1356180879Sjhb	 * put in the queue anyhow, and in the OURSELF case, we are
1357180879Sjhb	 * puting ourself on the run queue which also only happens
1358180879Sjhb	 * when we are about to yield.
1359134591Sjulian	 */
1360180879Sjhb	if ((flags & SRQ_YIELDING) == 0) {
1361147182Sups		if (maybe_preempt(td))
1362147182Sups			return;
1363180879Sjhb	}
1364198854Sattilio	if ((td->td_flags & TDF_NOLOAD) == 0)
1365139317Sjeff		sched_load_add();
1366177435Sjeff	runq_add(ts->ts_runq, td, flags);
1367132118Sjhb	maybe_resched(td);
1368104964Sjeff}
1369147182Sups#endif /* SMP */
1370147182Sups
1371104964Sjeffvoid
1372121127Sjeffsched_rem(struct thread *td)
1373104964Sjeff{
1374164936Sjulian	struct td_sched *ts;
1375121127Sjeff
1376164936Sjulian	ts = td->td_sched;
1377172207Sjeff	KASSERT(td->td_flags & TDF_INMEM,
1378172207Sjeff	    ("sched_rem: thread swapped out"));
1379166188Sjeff	KASSERT(TD_ON_RUNQ(td),
1380164936Sjulian	    ("sched_rem: thread not on run queue"));
1381104964Sjeff	mtx_assert(&sched_lock, MA_OWNED);
1382187357Sjeff	KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1383187357Sjeff	    "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1384187357Sjeff	    sched_tdname(curthread));
1385104964Sjeff
1386198854Sattilio	if ((td->td_flags & TDF_NOLOAD) == 0)
1387139317Sjeff		sched_load_rem();
1388180923Sjhb#ifdef SMP
1389180923Sjhb	if (ts->ts_runq != &runq)
1390180923Sjhb		runq_length[ts->ts_runq - runq_pcpu]--;
1391180923Sjhb#endif
1392177435Sjeff	runq_remove(ts->ts_runq, td);
1393166188Sjeff	TD_SET_CAN_RUN(td);
1394104964Sjeff}
1395104964Sjeff
1396135295Sjulian/*
1397180879Sjhb * Select threads to run.  Note that running threads still consume a
1398180879Sjhb * slot.
1399135295Sjulian */
1400166188Sjeffstruct thread *
1401104964Sjeffsched_choose(void)
1402104964Sjeff{
1403177435Sjeff	struct thread *td;
1404124955Sjeff	struct runq *rq;
1405104964Sjeff
1406170293Sjeff	mtx_assert(&sched_lock,  MA_OWNED);
1407124955Sjeff#ifdef SMP
1408177435Sjeff	struct thread *tdcpu;
1409124955Sjeff
1410124955Sjeff	rq = &runq;
1411177435Sjeff	td = runq_choose_fuzz(&runq, runq_fuzz);
1412177435Sjeff	tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1413104964Sjeff
1414180879Sjhb	if (td == NULL ||
1415180879Sjhb	    (tdcpu != NULL &&
1416177435Sjeff	     tdcpu->td_priority < td->td_priority)) {
1417177435Sjeff		CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1418124955Sjeff		     PCPU_GET(cpuid));
1419177435Sjeff		td = tdcpu;
1420124955Sjeff		rq = &runq_pcpu[PCPU_GET(cpuid)];
1421180879Sjhb	} else {
1422177435Sjeff		CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1423124955Sjeff	}
1424124955Sjeff
1425124955Sjeff#else
1426124955Sjeff	rq = &runq;
1427177435Sjeff	td = runq_choose(&runq);
1428124955Sjeff#endif
1429124955Sjeff
1430177435Sjeff	if (td) {
1431180923Sjhb#ifdef SMP
1432180923Sjhb		if (td == tdcpu)
1433180923Sjhb			runq_length[PCPU_GET(cpuid)]--;
1434180923Sjhb#endif
1435177435Sjeff		runq_remove(rq, td);
1436177435Sjeff		td->td_flags |= TDF_DIDRUN;
1437104964Sjeff
1438177435Sjeff		KASSERT(td->td_flags & TDF_INMEM,
1439172207Sjeff		    ("sched_choose: thread swapped out"));
1440177435Sjeff		return (td);
1441180879Sjhb	}
1442166188Sjeff	return (PCPU_GET(idlethread));
1443104964Sjeff}
1444104964Sjeff
1445104964Sjeffvoid
1446177004Sjeffsched_preempt(struct thread *td)
1447177004Sjeff{
1448177004Sjeff	thread_lock(td);
1449177004Sjeff	if (td->td_critnest > 1)
1450177004Sjeff		td->td_owepreempt = 1;
1451177004Sjeff	else
1452178272Sjeff		mi_switch(SW_INVOL | SW_PREEMPT | SWT_PREEMPT, NULL);
1453177004Sjeff	thread_unlock(td);
1454177004Sjeff}
1455177004Sjeff
1456177004Sjeffvoid
1457104964Sjeffsched_userret(struct thread *td)
1458104964Sjeff{
1459104964Sjeff	/*
1460104964Sjeff	 * XXX we cheat slightly on the locking here to avoid locking in
1461104964Sjeff	 * the usual case.  Setting td_priority here is essentially an
1462104964Sjeff	 * incomplete workaround for not setting it properly elsewhere.
1463104964Sjeff	 * Now that some interrupt handlers are threads, not setting it
1464104964Sjeff	 * properly elsewhere can clobber it in the window between setting
1465104964Sjeff	 * it here and returning to user mode, so don't waste time setting
1466104964Sjeff	 * it perfectly here.
1467104964Sjeff	 */
1468139453Sjhb	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1469139453Sjhb	    ("thread with borrowed priority returning to userland"));
1470163709Sjb	if (td->td_priority != td->td_user_pri) {
1471170293Sjeff		thread_lock(td);
1472163709Sjb		td->td_priority = td->td_user_pri;
1473163709Sjb		td->td_base_pri = td->td_user_pri;
1474170293Sjeff		thread_unlock(td);
1475163709Sjb	}
1476104964Sjeff}
1477107126Sjeff
1478124955Sjeffvoid
1479124955Sjeffsched_bind(struct thread *td, int cpu)
1480124955Sjeff{
1481164936Sjulian	struct td_sched *ts;
1482124955Sjeff
1483208391Sjhb	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1484208391Sjhb	KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1485124955Sjeff
1486164936Sjulian	ts = td->td_sched;
1487124955Sjeff
1488177435Sjeff	td->td_flags |= TDF_BOUND;
1489124955Sjeff#ifdef SMP
1490164936Sjulian	ts->ts_runq = &runq_pcpu[cpu];
1491124955Sjeff	if (PCPU_GET(cpuid) == cpu)
1492124955Sjeff		return;
1493124955Sjeff
1494131473Sjhb	mi_switch(SW_VOL, NULL);
1495124955Sjeff#endif
1496124955Sjeff}
1497124955Sjeff
1498124955Sjeffvoid
1499124955Sjeffsched_unbind(struct thread* td)
1500124955Sjeff{
1501170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
1502208391Sjhb	KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1503177435Sjeff	td->td_flags &= ~TDF_BOUND;
1504124955Sjeff}
1505124955Sjeff
1506107126Sjeffint
1507145256Sjkoshysched_is_bound(struct thread *td)
1508145256Sjkoshy{
1509170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
1510177435Sjeff	return (td->td_flags & TDF_BOUND);
1511145256Sjkoshy}
1512145256Sjkoshy
1513159630Sdavidxuvoid
1514159630Sdavidxusched_relinquish(struct thread *td)
1515159630Sdavidxu{
1516170293Sjeff	thread_lock(td);
1517178272Sjeff	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
1518170293Sjeff	thread_unlock(td);
1519159630Sdavidxu}
1520159630Sdavidxu
1521145256Sjkoshyint
1522125288Sjeffsched_load(void)
1523125288Sjeff{
1524125288Sjeff	return (sched_tdcnt);
1525125288Sjeff}
1526125288Sjeff
1527125288Sjeffint
1528107126Sjeffsched_sizeof_proc(void)
1529107126Sjeff{
1530107126Sjeff	return (sizeof(struct proc));
1531107126Sjeff}
1532159630Sdavidxu
1533107126Sjeffint
1534107126Sjeffsched_sizeof_thread(void)
1535107126Sjeff{
1536164936Sjulian	return (sizeof(struct thread) + sizeof(struct td_sched));
1537107126Sjeff}
1538107137Sjeff
1539107137Sjefffixpt_t
1540121127Sjeffsched_pctcpu(struct thread *td)
1541107137Sjeff{
1542164936Sjulian	struct td_sched *ts;
1543121147Sjeff
1544208787Sjhb	THREAD_LOCK_ASSERT(td, MA_OWNED);
1545164936Sjulian	ts = td->td_sched;
1546164936Sjulian	return (ts->ts_pctcpu);
1547107137Sjeff}
1548159570Sdavidxu
1549159570Sdavidxuvoid
1550212541Smavsched_tick(int cnt)
1551159570Sdavidxu{
1552159570Sdavidxu}
1553166188Sjeff
1554166188Sjeff/*
1555166188Sjeff * The actual idle process.
1556166188Sjeff */
1557166188Sjeffvoid
1558166188Sjeffsched_idletd(void *dummy)
1559166188Sjeff{
1560212455Smav	struct pcpuidlestat *stat;
1561166188Sjeff
1562212455Smav	stat = DPCPU_PTR(idlestat);
1563166188Sjeff	for (;;) {
1564166188Sjeff		mtx_assert(&Giant, MA_NOTOWNED);
1565166188Sjeff
1566212455Smav		while (sched_runnable() == 0) {
1567212455Smav			cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
1568212455Smav			stat->idlecalls++;
1569212455Smav		}
1570166188Sjeff
1571166188Sjeff		mtx_lock_spin(&sched_lock);
1572178272Sjeff		mi_switch(SW_VOL | SWT_IDLE, NULL);
1573166188Sjeff		mtx_unlock_spin(&sched_lock);
1574166188Sjeff	}
1575166188Sjeff}
1576166188Sjeff
1577170293Sjeff/*
1578170293Sjeff * A CPU is entering for the first time or a thread is exiting.
1579170293Sjeff */
1580170293Sjeffvoid
1581170293Sjeffsched_throw(struct thread *td)
1582170293Sjeff{
1583170293Sjeff	/*
1584170293Sjeff	 * Correct spinlock nesting.  The idle thread context that we are
1585170293Sjeff	 * borrowing was created so that it would start out with a single
1586170293Sjeff	 * spin lock (sched_lock) held in fork_trampoline().  Since we've
1587170293Sjeff	 * explicitly acquired locks in this function, the nesting count
1588170293Sjeff	 * is now 2 rather than 1.  Since we are nested, calling
1589170293Sjeff	 * spinlock_exit() will simply adjust the counts without allowing
1590170293Sjeff	 * spin lock using code to interrupt us.
1591170293Sjeff	 */
1592170293Sjeff	if (td == NULL) {
1593170293Sjeff		mtx_lock_spin(&sched_lock);
1594170293Sjeff		spinlock_exit();
1595170293Sjeff	} else {
1596174629Sjeff		lock_profile_release_lock(&sched_lock.lock_object);
1597170293Sjeff		MPASS(td->td_lock == &sched_lock);
1598170293Sjeff	}
1599170293Sjeff	mtx_assert(&sched_lock, MA_OWNED);
1600170293Sjeff	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1601170293Sjeff	PCPU_SET(switchtime, cpu_ticks());
1602170293Sjeff	PCPU_SET(switchticks, ticks);
1603170293Sjeff	cpu_throw(td, choosethread());	/* doesn't return */
1604170293Sjeff}
1605170293Sjeff
1606170293Sjeffvoid
1607170600Sjeffsched_fork_exit(struct thread *td)
1608170293Sjeff{
1609170293Sjeff
1610170293Sjeff	/*
1611170293Sjeff	 * Finish setting up thread glue so that it begins execution in a
1612170293Sjeff	 * non-nested critical section with sched_lock held but not recursed.
1613170293Sjeff	 */
1614170600Sjeff	td->td_oncpu = PCPU_GET(cpuid);
1615170600Sjeff	sched_lock.mtx_lock = (uintptr_t)td;
1616174629Sjeff	lock_profile_obtain_lock_success(&sched_lock.lock_object,
1617174629Sjeff	    0, 0, __FILE__, __LINE__);
1618170600Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1619170293Sjeff}
1620170293Sjeff
1621187357Sjeffchar *
1622187357Sjeffsched_tdname(struct thread *td)
1623187357Sjeff{
1624187357Sjeff#ifdef KTR
1625187357Sjeff	struct td_sched *ts;
1626187357Sjeff
1627187357Sjeff	ts = td->td_sched;
1628187357Sjeff	if (ts->ts_name[0] == '\0')
1629187357Sjeff		snprintf(ts->ts_name, sizeof(ts->ts_name),
1630187357Sjeff		    "%s tid %d", td->td_name, td->td_tid);
1631187357Sjeff	return (ts->ts_name);
1632187357Sjeff#else
1633187357Sjeff	return (td->td_name);
1634187357Sjeff#endif
1635187357Sjeff}
1636187357Sjeff
1637176729Sjeffvoid
1638176729Sjeffsched_affinity(struct thread *td)
1639176729Sjeff{
1640180923Sjhb#ifdef SMP
1641180923Sjhb	struct td_sched *ts;
1642180923Sjhb	int cpu;
1643180923Sjhb
1644180923Sjhb	THREAD_LOCK_ASSERT(td, MA_OWNED);
1645180923Sjhb
1646180923Sjhb	/*
1647180923Sjhb	 * Set the TSF_AFFINITY flag if there is at least one CPU this
1648180923Sjhb	 * thread can't run on.
1649180923Sjhb	 */
1650180923Sjhb	ts = td->td_sched;
1651180923Sjhb	ts->ts_flags &= ~TSF_AFFINITY;
1652209059Sjhb	CPU_FOREACH(cpu) {
1653180923Sjhb		if (!THREAD_CAN_SCHED(td, cpu)) {
1654180923Sjhb			ts->ts_flags |= TSF_AFFINITY;
1655180923Sjhb			break;
1656180923Sjhb		}
1657180923Sjhb	}
1658180923Sjhb
1659180923Sjhb	/*
1660180923Sjhb	 * If this thread can run on all CPUs, nothing else to do.
1661180923Sjhb	 */
1662180923Sjhb	if (!(ts->ts_flags & TSF_AFFINITY))
1663180923Sjhb		return;
1664180923Sjhb
1665180923Sjhb	/* Pinned threads and bound threads should be left alone. */
1666180923Sjhb	if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1667180923Sjhb		return;
1668180923Sjhb
1669180923Sjhb	switch (td->td_state) {
1670180923Sjhb	case TDS_RUNQ:
1671180923Sjhb		/*
1672180923Sjhb		 * If we are on a per-CPU runqueue that is in the set,
1673180923Sjhb		 * then nothing needs to be done.
1674180923Sjhb		 */
1675180923Sjhb		if (ts->ts_runq != &runq &&
1676180923Sjhb		    THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1677180923Sjhb			return;
1678180923Sjhb
1679180923Sjhb		/* Put this thread on a valid per-CPU runqueue. */
1680180923Sjhb		sched_rem(td);
1681180923Sjhb		sched_add(td, SRQ_BORING);
1682180923Sjhb		break;
1683180923Sjhb	case TDS_RUNNING:
1684180923Sjhb		/*
1685180923Sjhb		 * See if our current CPU is in the set.  If not, force a
1686180923Sjhb		 * context switch.
1687180923Sjhb		 */
1688180923Sjhb		if (THREAD_CAN_SCHED(td, td->td_oncpu))
1689180923Sjhb			return;
1690180923Sjhb
1691180923Sjhb		td->td_flags |= TDF_NEEDRESCHED;
1692180923Sjhb		if (td != curthread)
1693210939Sjhb			ipi_cpu(cpu, IPI_AST);
1694180923Sjhb		break;
1695180923Sjhb	default:
1696180923Sjhb		break;
1697180923Sjhb	}
1698180923Sjhb#endif
1699176729Sjeff}
1700