Deleted Added
sdiff udiff text old ( 130551 ) new ( 130881 )
full compact
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 130551 2004-06-16 00:26:31Z julian $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>
37#include <sys/resource.h>
38#include <sys/resourcevar.h>
39#include <sys/sched.h>
40#include <sys/smp.h>
41#include <sys/sx.h>
42#include <sys/sysctl.h>
43#include <sys/sysproto.h>
44#include <sys/vmmeter.h>
45#ifdef DDB
46#include <ddb/ddb.h>
47#endif
48#ifdef KTRACE
49#include <sys/uio.h>
50#include <sys/ktrace.h>
51#endif
52
53#include <machine/cpu.h>
54#include <machine/smp.h>
55
56#define KTR_ULE KTR_NFS
57
58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
59/* XXX This is bogus compatability crap for ps */
60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
62
63static void sched_setup(void *dummy);
64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
65
66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
67
68static int slice_min = 1;
69SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
70
71static int slice_max = 10;
72SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
73
74int realstathz;
75int tickincr = 1;
76
77/*
78 * These datastructures are allocated within their parent datastructure but
79 * are scheduler specific.
80 */
81
82struct ke_sched {
83 int ske_slice;
84 struct runq *ske_runq;
85 /* The following variables are only used for pctcpu calculation */
86 int ske_ltick; /* Last tick that we were running on */
87 int ske_ftick; /* First tick that we were running on */
88 int ske_ticks; /* Tick count */
89 /* CPU that we have affinity for. */
90 u_char ske_cpu;
91};
92#define ke_slice ke_sched->ske_slice
93#define ke_runq ke_sched->ske_runq
94#define ke_ltick ke_sched->ske_ltick
95#define ke_ftick ke_sched->ske_ftick
96#define ke_ticks ke_sched->ske_ticks
97#define ke_cpu ke_sched->ske_cpu
98#define ke_assign ke_procq.tqe_next
99
100#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */
101#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */
102
103struct kg_sched {
104 int skg_slptime; /* Number of ticks we vol. slept */
105 int skg_runtime; /* Number of ticks we were running */
106};
107#define kg_slptime kg_sched->skg_slptime
108#define kg_runtime kg_sched->skg_runtime
109
110struct td_sched {
111 int std_slptime;
112};
113#define td_slptime td_sched->std_slptime
114
115struct td_sched td_sched;
116struct ke_sched ke_sched;
117struct kg_sched kg_sched;
118
119struct ke_sched *kse0_sched = &ke_sched;
120struct kg_sched *ksegrp0_sched = &kg_sched;
121struct p_sched *proc0_sched = NULL;
122struct td_sched *thread0_sched = &td_sched;
123
124/*
125 * The priority is primarily determined by the interactivity score. Thus, we
126 * give lower(better) priorities to kse groups that use less CPU. The nice
127 * value is then directly added to this to allow nice to have some effect
128 * on latency.
129 *
130 * PRI_RANGE: Total priority range for timeshare threads.
131 * PRI_NRESV: Number of nice values.
132 * PRI_BASE: The start of the dynamic range.
133 */
134#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
135#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1)
136#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
137#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE)
138#define SCHED_PRI_INTERACT(score) \
139 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
140
141/*
142 * These determine the interactivity of a process.
143 *
144 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
145 * before throttling back.
146 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
147 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
148 * INTERACT_THRESH: Threshhold for placement on the current runq.
149 */
150#define SCHED_SLP_RUN_MAX ((hz * 5) << 10)
151#define SCHED_SLP_RUN_FORK ((hz / 2) << 10)
152#define SCHED_INTERACT_MAX (100)
153#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
154#define SCHED_INTERACT_THRESH (30)
155
156/*
157 * These parameters and macros determine the size of the time slice that is
158 * granted to each thread.
159 *
160 * SLICE_MIN: Minimum time slice granted, in units of ticks.
161 * SLICE_MAX: Maximum time slice granted.
162 * SLICE_RANGE: Range of available time slices scaled by hz.
163 * SLICE_SCALE: The number slices granted per val in the range of [0, max].
164 * SLICE_NICE: Determine the amount of slice granted to a scaled nice.
165 * SLICE_NTHRESH: The nice cutoff point for slice assignment.
166 */
167#define SCHED_SLICE_MIN (slice_min)
168#define SCHED_SLICE_MAX (slice_max)
169#define SCHED_SLICE_INTERACTIVE (slice_max)
170#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1)
171#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
172#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max))
173#define SCHED_SLICE_NICE(nice) \
174 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
175
176/*
177 * This macro determines whether or not the kse belongs on the current or
178 * next run queue.
179 */
180#define SCHED_INTERACTIVE(kg) \
181 (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
182#define SCHED_CURR(kg, ke) \
183 (ke->ke_thread->td_priority < kg->kg_user_pri || \
184 SCHED_INTERACTIVE(kg))
185
186/*
187 * Cpu percentage computation macros and defines.
188 *
189 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across.
190 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across.
191 */
192
193#define SCHED_CPU_TIME 10
194#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME)
195
196/*
197 * kseq - per processor runqs and statistics.
198 */
199struct kseq {
200 struct runq ksq_idle; /* Queue of IDLE threads. */
201 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
202 struct runq *ksq_next; /* Next timeshare queue. */
203 struct runq *ksq_curr; /* Current queue. */
204 int ksq_load_timeshare; /* Load for timeshare. */
205 int ksq_load; /* Aggregate load. */
206 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
207 short ksq_nicemin; /* Least nice. */
208#ifdef SMP
209 int ksq_transferable;
210 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */
211 struct kseq_group *ksq_group; /* Our processor group. */
212 volatile struct kse *ksq_assigned; /* assigned by another CPU. */
213#else
214 int ksq_sysload; /* For loadavg, !ITHD load. */
215#endif
216};
217
218#ifdef SMP
219/*
220 * kseq groups are groups of processors which can cheaply share threads. When
221 * one processor in the group goes idle it will check the runqs of the other
222 * processors in its group prior to halting and waiting for an interrupt.
223 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
224 * In a numa environment we'd want an idle bitmap per group and a two tiered
225 * load balancer.
226 */
227struct kseq_group {
228 int ksg_cpus; /* Count of CPUs in this kseq group. */
229 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */
230 cpumask_t ksg_idlemask; /* Idle cpus in this group. */
231 cpumask_t ksg_mask; /* Bit mask for first cpu. */
232 int ksg_load; /* Total load of this group. */
233 int ksg_transferable; /* Transferable load of this group. */
234 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */
235};
236#endif
237
238/*
239 * One kse queue per processor.
240 */
241#ifdef SMP
242static cpumask_t kseq_idle;
243static int ksg_maxid;
244static struct kseq kseq_cpu[MAXCPU];
245static struct kseq_group kseq_groups[MAXCPU];
246static int bal_tick;
247static int gbal_tick;
248
249#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)])
250#define KSEQ_CPU(x) (&kseq_cpu[(x)])
251#define KSEQ_ID(x) ((x) - kseq_cpu)
252#define KSEQ_GROUP(x) (&kseq_groups[(x)])
253#else /* !SMP */
254static struct kseq kseq_cpu;
255
256#define KSEQ_SELF() (&kseq_cpu)
257#define KSEQ_CPU(x) (&kseq_cpu)
258#endif
259
260static void sched_slice(struct kse *ke);
261static void sched_priority(struct ksegrp *kg);
262static int sched_interact_score(struct ksegrp *kg);
263static void sched_interact_update(struct ksegrp *kg);
264static void sched_interact_fork(struct ksegrp *kg);
265static void sched_pctcpu_update(struct kse *ke);
266
267/* Operations on per processor queues */
268static struct kse * kseq_choose(struct kseq *kseq);
269static void kseq_setup(struct kseq *kseq);
270static void kseq_load_add(struct kseq *kseq, struct kse *ke);
271static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
272static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
273static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
274static void kseq_nice_add(struct kseq *kseq, int nice);
275static void kseq_nice_rem(struct kseq *kseq, int nice);
276void kseq_print(int cpu);
277#ifdef SMP
278static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
279static struct kse *runq_steal(struct runq *rq);
280static void sched_balance(void);
281static void sched_balance_groups(void);
282static void sched_balance_group(struct kseq_group *ksg);
283static void sched_balance_pair(struct kseq *high, struct kseq *low);
284static void kseq_move(struct kseq *from, int cpu);
285static int kseq_idled(struct kseq *kseq);
286static void kseq_notify(struct kse *ke, int cpu);
287static void kseq_assign(struct kseq *);
288static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
289/*
290 * On P4 Xeons the round-robin interrupt delivery is broken. As a result of
291 * this, we can't pin interrupts to the cpu that they were delivered to,
292 * otherwise all ithreads only run on CPU 0.
293 */
294#ifdef __i386__
295#define KSE_CAN_MIGRATE(ke, class) \
296 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
297#else /* !__i386__ */
298#define KSE_CAN_MIGRATE(ke, class) \
299 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \
300 ((ke)->ke_flags & KEF_BOUND) == 0)
301#endif /* !__i386__ */
302#endif
303
304void
305kseq_print(int cpu)
306{
307 struct kseq *kseq;
308 int i;
309
310 kseq = KSEQ_CPU(cpu);
311
312 printf("kseq:\n");
313 printf("\tload: %d\n", kseq->ksq_load);
314 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
315#ifdef SMP
316 printf("\tload transferable: %d\n", kseq->ksq_transferable);
317#endif
318 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
319 printf("\tnice counts:\n");
320 for (i = 0; i < SCHED_PRI_NRESV; i++)
321 if (kseq->ksq_nice[i])
322 printf("\t\t%d = %d\n",
323 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
324}
325
326static __inline void
327kseq_runq_add(struct kseq *kseq, struct kse *ke)
328{
329#ifdef SMP
330 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
331 kseq->ksq_transferable++;
332 kseq->ksq_group->ksg_transferable++;
333 }
334#endif
335 runq_add(ke->ke_runq, ke);
336}
337
338static __inline void
339kseq_runq_rem(struct kseq *kseq, struct kse *ke)
340{
341#ifdef SMP
342 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
343 kseq->ksq_transferable--;
344 kseq->ksq_group->ksg_transferable--;
345 }
346#endif
347 runq_remove(ke->ke_runq, ke);
348}
349
350static void
351kseq_load_add(struct kseq *kseq, struct kse *ke)
352{
353 int class;
354 mtx_assert(&sched_lock, MA_OWNED);
355 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
356 if (class == PRI_TIMESHARE)
357 kseq->ksq_load_timeshare++;
358 kseq->ksq_load++;
359 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
360#ifdef SMP
361 kseq->ksq_group->ksg_load++;
362#else
363 kseq->ksq_sysload++;
364#endif
365 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
366 CTR6(KTR_ULE,
367 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
368 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
369 ke->ke_proc->p_nice, kseq->ksq_nicemin);
370 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
371 kseq_nice_add(kseq, ke->ke_proc->p_nice);
372}
373
374static void
375kseq_load_rem(struct kseq *kseq, struct kse *ke)
376{
377 int class;
378 mtx_assert(&sched_lock, MA_OWNED);
379 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
380 if (class == PRI_TIMESHARE)
381 kseq->ksq_load_timeshare--;
382 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
383#ifdef SMP
384 kseq->ksq_group->ksg_load--;
385#else
386 kseq->ksq_sysload--;
387#endif
388 kseq->ksq_load--;
389 ke->ke_runq = NULL;
390 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
391 kseq_nice_rem(kseq, ke->ke_proc->p_nice);
392}
393
394static void
395kseq_nice_add(struct kseq *kseq, int nice)
396{
397 mtx_assert(&sched_lock, MA_OWNED);
398 /* Normalize to zero. */
399 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
400 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
401 kseq->ksq_nicemin = nice;
402}
403
404static void
405kseq_nice_rem(struct kseq *kseq, int nice)
406{
407 int n;
408
409 mtx_assert(&sched_lock, MA_OWNED);
410 /* Normalize to zero. */
411 n = nice + SCHED_PRI_NHALF;
412 kseq->ksq_nice[n]--;
413 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
414
415 /*
416 * If this wasn't the smallest nice value or there are more in
417 * this bucket we can just return. Otherwise we have to recalculate
418 * the smallest nice.
419 */
420 if (nice != kseq->ksq_nicemin ||
421 kseq->ksq_nice[n] != 0 ||
422 kseq->ksq_load_timeshare == 0)
423 return;
424
425 for (; n < SCHED_PRI_NRESV; n++)
426 if (kseq->ksq_nice[n]) {
427 kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
428 return;
429 }
430}
431
432#ifdef SMP
433/*
434 * sched_balance is a simple CPU load balancing algorithm. It operates by
435 * finding the least loaded and most loaded cpu and equalizing their load
436 * by migrating some processes.
437 *
438 * Dealing only with two CPUs at a time has two advantages. Firstly, most
439 * installations will only have 2 cpus. Secondly, load balancing too much at
440 * once can have an unpleasant effect on the system. The scheduler rarely has
441 * enough information to make perfect decisions. So this algorithm chooses
442 * algorithm simplicity and more gradual effects on load in larger systems.
443 *
444 * It could be improved by considering the priorities and slices assigned to
445 * each task prior to balancing them. There are many pathological cases with
446 * any approach and so the semi random algorithm below may work as well as any.
447 *
448 */
449static void
450sched_balance(void)
451{
452 struct kseq_group *high;
453 struct kseq_group *low;
454 struct kseq_group *ksg;
455 int cnt;
456 int i;
457
458 if (smp_started == 0)
459 goto out;
460 low = high = NULL;
461 i = random() % (ksg_maxid + 1);
462 for (cnt = 0; cnt <= ksg_maxid; cnt++) {
463 ksg = KSEQ_GROUP(i);
464 /*
465 * Find the CPU with the highest load that has some
466 * threads to transfer.
467 */
468 if ((high == NULL || ksg->ksg_load > high->ksg_load)
469 && ksg->ksg_transferable)
470 high = ksg;
471 if (low == NULL || ksg->ksg_load < low->ksg_load)
472 low = ksg;
473 if (++i > ksg_maxid)
474 i = 0;
475 }
476 if (low != NULL && high != NULL && high != low)
477 sched_balance_pair(LIST_FIRST(&high->ksg_members),
478 LIST_FIRST(&low->ksg_members));
479out:
480 bal_tick = ticks + (random() % (hz * 2));
481}
482
483static void
484sched_balance_groups(void)
485{
486 int i;
487
488 mtx_assert(&sched_lock, MA_OWNED);
489 if (smp_started)
490 for (i = 0; i <= ksg_maxid; i++)
491 sched_balance_group(KSEQ_GROUP(i));
492 gbal_tick = ticks + (random() % (hz * 2));
493}
494
495static void
496sched_balance_group(struct kseq_group *ksg)
497{
498 struct kseq *kseq;
499 struct kseq *high;
500 struct kseq *low;
501 int load;
502
503 if (ksg->ksg_transferable == 0)
504 return;
505 low = NULL;
506 high = NULL;
507 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
508 load = kseq->ksq_load;
509 if (high == NULL || load > high->ksq_load)
510 high = kseq;
511 if (low == NULL || load < low->ksq_load)
512 low = kseq;
513 }
514 if (high != NULL && low != NULL && high != low)
515 sched_balance_pair(high, low);
516}
517
518static void
519sched_balance_pair(struct kseq *high, struct kseq *low)
520{
521 int transferable;
522 int high_load;
523 int low_load;
524 int move;
525 int diff;
526 int i;
527
528 /*
529 * If we're transfering within a group we have to use this specific
530 * kseq's transferable count, otherwise we can steal from other members
531 * of the group.
532 */
533 if (high->ksq_group == low->ksq_group) {
534 transferable = high->ksq_transferable;
535 high_load = high->ksq_load;
536 low_load = low->ksq_load;
537 } else {
538 transferable = high->ksq_group->ksg_transferable;
539 high_load = high->ksq_group->ksg_load;
540 low_load = low->ksq_group->ksg_load;
541 }
542 if (transferable == 0)
543 return;
544 /*
545 * Determine what the imbalance is and then adjust that to how many
546 * kses we actually have to give up (transferable).
547 */
548 diff = high_load - low_load;
549 move = diff / 2;
550 if (diff & 0x1)
551 move++;
552 move = min(move, transferable);
553 for (i = 0; i < move; i++)
554 kseq_move(high, KSEQ_ID(low));
555 return;
556}
557
558static void
559kseq_move(struct kseq *from, int cpu)
560{
561 struct kseq *kseq;
562 struct kseq *to;
563 struct kse *ke;
564
565 kseq = from;
566 to = KSEQ_CPU(cpu);
567 ke = kseq_steal(kseq, 1);
568 if (ke == NULL) {
569 struct kseq_group *ksg;
570
571 ksg = kseq->ksq_group;
572 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
573 if (kseq == from || kseq->ksq_transferable == 0)
574 continue;
575 ke = kseq_steal(kseq, 1);
576 break;
577 }
578 if (ke == NULL)
579 panic("kseq_move: No KSEs available with a "
580 "transferable count of %d\n",
581 ksg->ksg_transferable);
582 }
583 if (kseq == to)
584 return;
585 ke->ke_state = KES_THREAD;
586 kseq_runq_rem(kseq, ke);
587 kseq_load_rem(kseq, ke);
588 kseq_notify(ke, cpu);
589}
590
591static int
592kseq_idled(struct kseq *kseq)
593{
594 struct kseq_group *ksg;
595 struct kseq *steal;
596 struct kse *ke;
597
598 ksg = kseq->ksq_group;
599 /*
600 * If we're in a cpu group, try and steal kses from another cpu in
601 * the group before idling.
602 */
603 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
604 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
605 if (steal == kseq || steal->ksq_transferable == 0)
606 continue;
607 ke = kseq_steal(steal, 0);
608 if (ke == NULL)
609 continue;
610 ke->ke_state = KES_THREAD;
611 kseq_runq_rem(steal, ke);
612 kseq_load_rem(steal, ke);
613 ke->ke_cpu = PCPU_GET(cpuid);
614 sched_add(ke->ke_thread);
615 return (0);
616 }
617 }
618 /*
619 * We only set the idled bit when all of the cpus in the group are
620 * idle. Otherwise we could get into a situation where a KSE bounces
621 * back and forth between two idle cores on seperate physical CPUs.
622 */
623 ksg->ksg_idlemask |= PCPU_GET(cpumask);
624 if (ksg->ksg_idlemask != ksg->ksg_cpumask)
625 return (1);
626 atomic_set_int(&kseq_idle, ksg->ksg_mask);
627 return (1);
628}
629
630static void
631kseq_assign(struct kseq *kseq)
632{
633 struct kse *nke;
634 struct kse *ke;
635
636 do {
637 (volatile struct kse *)ke = kseq->ksq_assigned;
638 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
639 for (; ke != NULL; ke = nke) {
640 nke = ke->ke_assign;
641 ke->ke_flags &= ~KEF_ASSIGNED;
642 sched_add(ke->ke_thread);
643 }
644}
645
646static void
647kseq_notify(struct kse *ke, int cpu)
648{
649 struct kseq *kseq;
650 struct thread *td;
651 struct pcpu *pcpu;
652
653 ke->ke_cpu = cpu;
654 ke->ke_flags |= KEF_ASSIGNED;
655
656 kseq = KSEQ_CPU(cpu);
657
658 /*
659 * Place a KSE on another cpu's queue and force a resched.
660 */
661 do {
662 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
663 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
664 pcpu = pcpu_find(cpu);
665 td = pcpu->pc_curthread;
666 if (ke->ke_thread->td_priority < td->td_priority ||
667 td == pcpu->pc_idlethread) {
668 td->td_flags |= TDF_NEEDRESCHED;
669 ipi_selected(1 << cpu, IPI_AST);
670 }
671}
672
673static struct kse *
674runq_steal(struct runq *rq)
675{
676 struct rqhead *rqh;
677 struct rqbits *rqb;
678 struct kse *ke;
679 int word;
680 int bit;
681
682 mtx_assert(&sched_lock, MA_OWNED);
683 rqb = &rq->rq_status;
684 for (word = 0; word < RQB_LEN; word++) {
685 if (rqb->rqb_bits[word] == 0)
686 continue;
687 for (bit = 0; bit < RQB_BPW; bit++) {
688 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
689 continue;
690 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
691 TAILQ_FOREACH(ke, rqh, ke_procq) {
692 if (KSE_CAN_MIGRATE(ke,
693 PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
694 return (ke);
695 }
696 }
697 }
698 return (NULL);
699}
700
701static struct kse *
702kseq_steal(struct kseq *kseq, int stealidle)
703{
704 struct kse *ke;
705
706 /*
707 * Steal from next first to try to get a non-interactive task that
708 * may not have run for a while.
709 */
710 if ((ke = runq_steal(kseq->ksq_next)) != NULL)
711 return (ke);
712 if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
713 return (ke);
714 if (stealidle)
715 return (runq_steal(&kseq->ksq_idle));
716 return (NULL);
717}
718
719int
720kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
721{
722 struct kseq_group *ksg;
723 int cpu;
724
725 if (smp_started == 0)
726 return (0);
727 cpu = 0;
728 ksg = kseq->ksq_group;
729
730 /*
731 * If there are any idle groups, give them our extra load. The
732 * threshold at which we start to reassign kses has a large impact
733 * on the overall performance of the system. Tuned too high and
734 * some CPUs may idle. Too low and there will be excess migration
735 * and context switches.
736 */
737 if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) {
738 /*
739 * Multiple cpus could find this bit simultaneously
740 * but the race shouldn't be terrible.
741 */
742 cpu = ffs(kseq_idle);
743 if (cpu)
744 atomic_clear_int(&kseq_idle, 1 << (cpu - 1));
745 }
746 /*
747 * If another cpu in this group has idled, assign a thread over
748 * to them after checking to see if there are idled groups.
749 */
750 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) {
751 cpu = ffs(ksg->ksg_idlemask);
752 if (cpu)
753 ksg->ksg_idlemask &= ~(1 << (cpu - 1));
754 }
755 /*
756 * Now that we've found an idle CPU, migrate the thread.
757 */
758 if (cpu) {
759 cpu--;
760 ke->ke_runq = NULL;
761 kseq_notify(ke, cpu);
762 return (1);
763 }
764 return (0);
765}
766
767#endif /* SMP */
768
769/*
770 * Pick the highest priority task we have and return it.
771 */
772
773static struct kse *
774kseq_choose(struct kseq *kseq)
775{
776 struct kse *ke;
777 struct runq *swap;
778
779 mtx_assert(&sched_lock, MA_OWNED);
780 swap = NULL;
781
782 for (;;) {
783 ke = runq_choose(kseq->ksq_curr);
784 if (ke == NULL) {
785 /*
786 * We already swaped once and didn't get anywhere.
787 */
788 if (swap)
789 break;
790 swap = kseq->ksq_curr;
791 kseq->ksq_curr = kseq->ksq_next;
792 kseq->ksq_next = swap;
793 continue;
794 }
795 /*
796 * If we encounter a slice of 0 the kse is in a
797 * TIMESHARE kse group and its nice was too far out
798 * of the range that receives slices.
799 */
800 if (ke->ke_slice == 0) {
801 runq_remove(ke->ke_runq, ke);
802 sched_slice(ke);
803 ke->ke_runq = kseq->ksq_next;
804 runq_add(ke->ke_runq, ke);
805 continue;
806 }
807 return (ke);
808 }
809
810 return (runq_choose(&kseq->ksq_idle));
811}
812
813static void
814kseq_setup(struct kseq *kseq)
815{
816 runq_init(&kseq->ksq_timeshare[0]);
817 runq_init(&kseq->ksq_timeshare[1]);
818 runq_init(&kseq->ksq_idle);
819 kseq->ksq_curr = &kseq->ksq_timeshare[0];
820 kseq->ksq_next = &kseq->ksq_timeshare[1];
821 kseq->ksq_load = 0;
822 kseq->ksq_load_timeshare = 0;
823}
824
825static void
826sched_setup(void *dummy)
827{
828#ifdef SMP
829 int balance_groups;
830 int i;
831#endif
832
833 slice_min = (hz/100); /* 10ms */
834 slice_max = (hz/7); /* ~140ms */
835
836#ifdef SMP
837 balance_groups = 0;
838 /*
839 * Initialize the kseqs.
840 */
841 for (i = 0; i < MAXCPU; i++) {
842 struct kseq *ksq;
843
844 ksq = &kseq_cpu[i];
845 ksq->ksq_assigned = NULL;
846 kseq_setup(&kseq_cpu[i]);
847 }
848 if (smp_topology == NULL) {
849 struct kseq_group *ksg;
850 struct kseq *ksq;
851
852 for (i = 0; i < MAXCPU; i++) {
853 ksq = &kseq_cpu[i];
854 ksg = &kseq_groups[i];
855 /*
856 * Setup a kseq group with one member.
857 */
858 ksq->ksq_transferable = 0;
859 ksq->ksq_group = ksg;
860 ksg->ksg_cpus = 1;
861 ksg->ksg_idlemask = 0;
862 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
863 ksg->ksg_load = 0;
864 ksg->ksg_transferable = 0;
865 LIST_INIT(&ksg->ksg_members);
866 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
867 }
868 } else {
869 struct kseq_group *ksg;
870 struct cpu_group *cg;
871 int j;
872
873 for (i = 0; i < smp_topology->ct_count; i++) {
874 cg = &smp_topology->ct_group[i];
875 ksg = &kseq_groups[i];
876 /*
877 * Initialize the group.
878 */
879 ksg->ksg_idlemask = 0;
880 ksg->ksg_load = 0;
881 ksg->ksg_transferable = 0;
882 ksg->ksg_cpus = cg->cg_count;
883 ksg->ksg_cpumask = cg->cg_mask;
884 LIST_INIT(&ksg->ksg_members);
885 /*
886 * Find all of the group members and add them.
887 */
888 for (j = 0; j < MAXCPU; j++) {
889 if ((cg->cg_mask & (1 << j)) != 0) {
890 if (ksg->ksg_mask == 0)
891 ksg->ksg_mask = 1 << j;
892 kseq_cpu[j].ksq_transferable = 0;
893 kseq_cpu[j].ksq_group = ksg;
894 LIST_INSERT_HEAD(&ksg->ksg_members,
895 &kseq_cpu[j], ksq_siblings);
896 }
897 }
898 if (ksg->ksg_cpus > 1)
899 balance_groups = 1;
900 }
901 ksg_maxid = smp_topology->ct_count - 1;
902 }
903 /*
904 * Stagger the group and global load balancer so they do not
905 * interfere with each other.
906 */
907 bal_tick = ticks + hz;
908 if (balance_groups)
909 gbal_tick = ticks + (hz / 2);
910#else
911 kseq_setup(KSEQ_SELF());
912#endif
913 mtx_lock_spin(&sched_lock);
914 kseq_load_add(KSEQ_SELF(), &kse0);
915 mtx_unlock_spin(&sched_lock);
916}
917
918/*
919 * Scale the scheduling priority according to the "interactivity" of this
920 * process.
921 */
922static void
923sched_priority(struct ksegrp *kg)
924{
925 int pri;
926
927 if (kg->kg_pri_class != PRI_TIMESHARE)
928 return;
929
930 pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
931 pri += SCHED_PRI_BASE;
932 pri += kg->kg_proc->p_nice;
933
934 if (pri > PRI_MAX_TIMESHARE)
935 pri = PRI_MAX_TIMESHARE;
936 else if (pri < PRI_MIN_TIMESHARE)
937 pri = PRI_MIN_TIMESHARE;
938
939 kg->kg_user_pri = pri;
940
941 return;
942}
943
944/*
945 * Calculate a time slice based on the properties of the kseg and the runq
946 * that we're on. This is only for PRI_TIMESHARE ksegrps.
947 */
948static void
949sched_slice(struct kse *ke)
950{
951 struct kseq *kseq;
952 struct ksegrp *kg;
953
954 kg = ke->ke_ksegrp;
955 kseq = KSEQ_CPU(ke->ke_cpu);
956
957 /*
958 * Rationale:
959 * KSEs in interactive ksegs get the minimum slice so that we
960 * quickly notice if it abuses its advantage.
961 *
962 * KSEs in non-interactive ksegs are assigned a slice that is
963 * based on the ksegs nice value relative to the least nice kseg
964 * on the run queue for this cpu.
965 *
966 * If the KSE is less nice than all others it gets the maximum
967 * slice and other KSEs will adjust their slice relative to
968 * this when they first expire.
969 *
970 * There is 20 point window that starts relative to the least
971 * nice kse on the run queue. Slice size is determined by
972 * the kse distance from the last nice ksegrp.
973 *
974 * If the kse is outside of the window it will get no slice
975 * and will be reevaluated each time it is selected on the
976 * run queue. The exception to this is nice 0 ksegs when
977 * a nice -20 is running. They are always granted a minimum
978 * slice.
979 */
980 if (!SCHED_INTERACTIVE(kg)) {
981 int nice;
982
983 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
984 if (kseq->ksq_load_timeshare == 0 ||
985 kg->kg_proc->p_nice < kseq->ksq_nicemin)
986 ke->ke_slice = SCHED_SLICE_MAX;
987 else if (nice <= SCHED_SLICE_NTHRESH)
988 ke->ke_slice = SCHED_SLICE_NICE(nice);
989 else if (kg->kg_proc->p_nice == 0)
990 ke->ke_slice = SCHED_SLICE_MIN;
991 else
992 ke->ke_slice = 0;
993 } else
994 ke->ke_slice = SCHED_SLICE_INTERACTIVE;
995
996 CTR6(KTR_ULE,
997 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
998 ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
999 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1000
1001 return;
1002}
1003
1004/*
1005 * This routine enforces a maximum limit on the amount of scheduling history
1006 * kept. It is called after either the slptime or runtime is adjusted.
1007 * This routine will not operate correctly when slp or run times have been
1008 * adjusted to more than double their maximum.
1009 */
1010static void
1011sched_interact_update(struct ksegrp *kg)
1012{
1013 int sum;
1014
1015 sum = kg->kg_runtime + kg->kg_slptime;
1016 if (sum < SCHED_SLP_RUN_MAX)
1017 return;
1018 /*
1019 * If we have exceeded by more than 1/5th then the algorithm below
1020 * will not bring us back into range. Dividing by two here forces
1021 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1022 */
1023 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1024 kg->kg_runtime /= 2;
1025 kg->kg_slptime /= 2;
1026 return;
1027 }
1028 kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1029 kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1030}
1031
1032static void
1033sched_interact_fork(struct ksegrp *kg)
1034{
1035 int ratio;
1036 int sum;
1037
1038 sum = kg->kg_runtime + kg->kg_slptime;
1039 if (sum > SCHED_SLP_RUN_FORK) {
1040 ratio = sum / SCHED_SLP_RUN_FORK;
1041 kg->kg_runtime /= ratio;
1042 kg->kg_slptime /= ratio;
1043 }
1044}
1045
1046static int
1047sched_interact_score(struct ksegrp *kg)
1048{
1049 int div;
1050
1051 if (kg->kg_runtime > kg->kg_slptime) {
1052 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1053 return (SCHED_INTERACT_HALF +
1054 (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1055 } if (kg->kg_slptime > kg->kg_runtime) {
1056 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1057 return (kg->kg_runtime / div);
1058 }
1059
1060 /*
1061 * This can happen if slptime and runtime are 0.
1062 */
1063 return (0);
1064
1065}
1066
1067/*
1068 * This is only somewhat accurate since given many processes of the same
1069 * priority they will switch when their slices run out, which will be
1070 * at most SCHED_SLICE_MAX.
1071 */
1072int
1073sched_rr_interval(void)
1074{
1075 return (SCHED_SLICE_MAX);
1076}
1077
1078static void
1079sched_pctcpu_update(struct kse *ke)
1080{
1081 /*
1082 * Adjust counters and watermark for pctcpu calc.
1083 */
1084 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1085 /*
1086 * Shift the tick count out so that the divide doesn't
1087 * round away our results.
1088 */
1089 ke->ke_ticks <<= 10;
1090 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1091 SCHED_CPU_TICKS;
1092 ke->ke_ticks >>= 10;
1093 } else
1094 ke->ke_ticks = 0;
1095 ke->ke_ltick = ticks;
1096 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1097}
1098
1099void
1100sched_prio(struct thread *td, u_char prio)
1101{
1102 struct kse *ke;
1103
1104 ke = td->td_kse;
1105 mtx_assert(&sched_lock, MA_OWNED);
1106 if (TD_ON_RUNQ(td)) {
1107 /*
1108 * If the priority has been elevated due to priority
1109 * propagation, we may have to move ourselves to a new
1110 * queue. We still call adjustrunqueue below in case kse
1111 * needs to fix things up.
1112 */
1113 if (prio < td->td_priority && ke &&
1114 (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1115 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1116 runq_remove(ke->ke_runq, ke);
1117 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1118 runq_add(ke->ke_runq, ke);
1119 }
1120 adjustrunqueue(td, prio);
1121 } else
1122 td->td_priority = prio;
1123}
1124
1125void
1126sched_switch(struct thread *td)
1127{
1128 struct thread *newtd;
1129 struct kse *ke;
1130
1131 mtx_assert(&sched_lock, MA_OWNED);
1132
1133 ke = td->td_kse;
1134
1135 td->td_last_kse = ke;
1136 td->td_lastcpu = td->td_oncpu;
1137 td->td_oncpu = NOCPU;
1138 td->td_flags &= ~TDF_NEEDRESCHED;
1139
1140 /*
1141 * If the KSE has been assigned it may be in the process of switching
1142 * to the new cpu. This is the case in sched_bind().
1143 */
1144 if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1145 if (TD_IS_RUNNING(td)) {
1146 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1147 setrunqueue(td);
1148 } else {
1149 if (ke->ke_runq) {
1150 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1151 } else if ((td->td_flags & TDF_IDLETD) == 0)
1152 backtrace();
1153 /*
1154 * We will not be on the run queue. So we must be
1155 * sleeping or similar.
1156 */
1157 if (td->td_proc->p_flag & P_SA)
1158 kse_reassign(ke);
1159 }
1160 }
1161 newtd = choosethread();
1162 if (td != newtd)
1163 cpu_switch(td, newtd);
1164 sched_lock.mtx_lock = (uintptr_t)td;
1165
1166 td->td_oncpu = PCPU_GET(cpuid);
1167}
1168
1169void
1170sched_nice(struct proc *p, int nice)
1171{
1172 struct ksegrp *kg;
1173 struct kse *ke;
1174 struct thread *td;
1175 struct kseq *kseq;
1176
1177 PROC_LOCK_ASSERT(p, MA_OWNED);
1178 mtx_assert(&sched_lock, MA_OWNED);
1179 /*
1180 * We need to adjust the nice counts for running KSEs.
1181 */
1182 FOREACH_KSEGRP_IN_PROC(p, kg) {
1183 if (kg->kg_pri_class == PRI_TIMESHARE) {
1184 FOREACH_KSE_IN_GROUP(kg, ke) {
1185 if (ke->ke_runq == NULL)
1186 continue;
1187 kseq = KSEQ_CPU(ke->ke_cpu);
1188 kseq_nice_rem(kseq, p->p_nice);
1189 kseq_nice_add(kseq, nice);
1190 }
1191 }
1192 }
1193 p->p_nice = nice;
1194 FOREACH_KSEGRP_IN_PROC(p, kg) {
1195 sched_priority(kg);
1196 FOREACH_THREAD_IN_GROUP(kg, td)
1197 td->td_flags |= TDF_NEEDRESCHED;
1198 }
1199}
1200
1201void
1202sched_sleep(struct thread *td)
1203{
1204 mtx_assert(&sched_lock, MA_OWNED);
1205
1206 td->td_slptime = ticks;
1207 td->td_base_pri = td->td_priority;
1208
1209 CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1210 td->td_kse, td->td_slptime);
1211}
1212
1213void
1214sched_wakeup(struct thread *td)
1215{
1216 mtx_assert(&sched_lock, MA_OWNED);
1217
1218 /*
1219 * Let the kseg know how long we slept for. This is because process
1220 * interactivity behavior is modeled in the kseg.
1221 */
1222 if (td->td_slptime) {
1223 struct ksegrp *kg;
1224 int hzticks;
1225
1226 kg = td->td_ksegrp;
1227 hzticks = (ticks - td->td_slptime) << 10;
1228 if (hzticks >= SCHED_SLP_RUN_MAX) {
1229 kg->kg_slptime = SCHED_SLP_RUN_MAX;
1230 kg->kg_runtime = 1;
1231 } else {
1232 kg->kg_slptime += hzticks;
1233 sched_interact_update(kg);
1234 }
1235 sched_priority(kg);
1236 if (td->td_kse)
1237 sched_slice(td->td_kse);
1238 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1239 td->td_kse, hzticks);
1240 td->td_slptime = 0;
1241 }
1242 setrunqueue(td);
1243}
1244
1245/*
1246 * Penalize the parent for creating a new child and initialize the child's
1247 * priority.
1248 */
1249void
1250sched_fork(struct proc *p, struct proc *p1)
1251{
1252
1253 mtx_assert(&sched_lock, MA_OWNED);
1254
1255 p1->p_nice = p->p_nice;
1256 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1257 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1258 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1259}
1260
1261void
1262sched_fork_kse(struct kse *ke, struct kse *child)
1263{
1264
1265 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */
1266 child->ke_cpu = ke->ke_cpu;
1267 child->ke_runq = NULL;
1268
1269 /* Grab our parents cpu estimation information. */
1270 child->ke_ticks = ke->ke_ticks;
1271 child->ke_ltick = ke->ke_ltick;
1272 child->ke_ftick = ke->ke_ftick;
1273}
1274
1275void
1276sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1277{
1278 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1279
1280 child->kg_slptime = kg->kg_slptime;
1281 child->kg_runtime = kg->kg_runtime;
1282 child->kg_user_pri = kg->kg_user_pri;
1283 sched_interact_fork(child);
1284 kg->kg_runtime += tickincr << 10;
1285 sched_interact_update(kg);
1286
1287 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1288 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1289 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1290}
1291
1292void
1293sched_fork_thread(struct thread *td, struct thread *child)
1294{
1295}
1296
1297void
1298sched_class(struct ksegrp *kg, int class)
1299{
1300 struct kseq *kseq;
1301 struct kse *ke;
1302 int nclass;
1303 int oclass;
1304
1305 mtx_assert(&sched_lock, MA_OWNED);
1306 if (kg->kg_pri_class == class)
1307 return;
1308
1309 nclass = PRI_BASE(class);
1310 oclass = PRI_BASE(kg->kg_pri_class);
1311 FOREACH_KSE_IN_GROUP(kg, ke) {
1312 if (ke->ke_state != KES_ONRUNQ &&
1313 ke->ke_state != KES_THREAD)
1314 continue;
1315 kseq = KSEQ_CPU(ke->ke_cpu);
1316
1317#ifdef SMP
1318 /*
1319 * On SMP if we're on the RUNQ we must adjust the transferable
1320 * count because could be changing to or from an interrupt
1321 * class.
1322 */
1323 if (ke->ke_state == KES_ONRUNQ) {
1324 if (KSE_CAN_MIGRATE(ke, oclass)) {
1325 kseq->ksq_transferable--;
1326 kseq->ksq_group->ksg_transferable--;
1327 }
1328 if (KSE_CAN_MIGRATE(ke, nclass)) {
1329 kseq->ksq_transferable++;
1330 kseq->ksq_group->ksg_transferable++;
1331 }
1332 }
1333#endif
1334 if (oclass == PRI_TIMESHARE) {
1335 kseq->ksq_load_timeshare--;
1336 kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1337 }
1338 if (nclass == PRI_TIMESHARE) {
1339 kseq->ksq_load_timeshare++;
1340 kseq_nice_add(kseq, kg->kg_proc->p_nice);
1341 }
1342 }
1343
1344 kg->kg_pri_class = class;
1345}
1346
1347/*
1348 * Return some of the child's priority and interactivity to the parent.
1349 */
1350void
1351sched_exit(struct proc *p, struct proc *child)
1352{
1353 mtx_assert(&sched_lock, MA_OWNED);
1354 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1355 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1356}
1357
1358void
1359sched_exit_kse(struct kse *ke, struct kse *child)
1360{
1361 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
1362}
1363
1364void
1365sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1366{
1367 /* kg->kg_slptime += child->kg_slptime; */
1368 kg->kg_runtime += child->kg_runtime;
1369 sched_interact_update(kg);
1370}
1371
1372void
1373sched_exit_thread(struct thread *td, struct thread *child)
1374{
1375}
1376
1377void
1378sched_clock(struct thread *td)
1379{
1380 struct kseq *kseq;
1381 struct ksegrp *kg;
1382 struct kse *ke;
1383
1384 mtx_assert(&sched_lock, MA_OWNED);
1385#ifdef SMP
1386 if (ticks == bal_tick)
1387 sched_balance();
1388 if (ticks == gbal_tick)
1389 sched_balance_groups();
1390#endif
1391 /*
1392 * sched_setup() apparently happens prior to stathz being set. We
1393 * need to resolve the timers earlier in the boot so we can avoid
1394 * calculating this here.
1395 */
1396 if (realstathz == 0) {
1397 realstathz = stathz ? stathz : hz;
1398 tickincr = hz / realstathz;
1399 /*
1400 * XXX This does not work for values of stathz that are much
1401 * larger than hz.
1402 */
1403 if (tickincr == 0)
1404 tickincr = 1;
1405 }
1406
1407 ke = td->td_kse;
1408 kg = ke->ke_ksegrp;
1409
1410 /* Adjust ticks for pctcpu */
1411 ke->ke_ticks++;
1412 ke->ke_ltick = ticks;
1413
1414 /* Go up to one second beyond our max and then trim back down */
1415 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1416 sched_pctcpu_update(ke);
1417
1418 if (td->td_flags & TDF_IDLETD)
1419 return;
1420
1421 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1422 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1423 /*
1424 * We only do slicing code for TIMESHARE ksegrps.
1425 */
1426 if (kg->kg_pri_class != PRI_TIMESHARE)
1427 return;
1428 /*
1429 * We used a tick charge it to the ksegrp so that we can compute our
1430 * interactivity.
1431 */
1432 kg->kg_runtime += tickincr << 10;
1433 sched_interact_update(kg);
1434
1435 /*
1436 * We used up one time slice.
1437 */
1438 if (--ke->ke_slice > 0)
1439 return;
1440 /*
1441 * We're out of time, recompute priorities and requeue.
1442 */
1443 kseq = KSEQ_SELF();
1444 kseq_load_rem(kseq, ke);
1445 sched_priority(kg);
1446 sched_slice(ke);
1447 if (SCHED_CURR(kg, ke))
1448 ke->ke_runq = kseq->ksq_curr;
1449 else
1450 ke->ke_runq = kseq->ksq_next;
1451 kseq_load_add(kseq, ke);
1452 td->td_flags |= TDF_NEEDRESCHED;
1453}
1454
1455int
1456sched_runnable(void)
1457{
1458 struct kseq *kseq;
1459 int load;
1460
1461 load = 1;
1462
1463 kseq = KSEQ_SELF();
1464#ifdef SMP
1465 if (kseq->ksq_assigned) {
1466 mtx_lock_spin(&sched_lock);
1467 kseq_assign(kseq);
1468 mtx_unlock_spin(&sched_lock);
1469 }
1470#endif
1471 if ((curthread->td_flags & TDF_IDLETD) != 0) {
1472 if (kseq->ksq_load > 0)
1473 goto out;
1474 } else
1475 if (kseq->ksq_load - 1 > 0)
1476 goto out;
1477 load = 0;
1478out:
1479 return (load);
1480}
1481
1482void
1483sched_userret(struct thread *td)
1484{
1485 struct ksegrp *kg;
1486
1487 kg = td->td_ksegrp;
1488
1489 if (td->td_priority != kg->kg_user_pri) {
1490 mtx_lock_spin(&sched_lock);
1491 td->td_priority = kg->kg_user_pri;
1492 mtx_unlock_spin(&sched_lock);
1493 }
1494}
1495
1496struct kse *
1497sched_choose(void)
1498{
1499 struct kseq *kseq;
1500 struct kse *ke;
1501
1502 mtx_assert(&sched_lock, MA_OWNED);
1503 kseq = KSEQ_SELF();
1504#ifdef SMP
1505restart:
1506 if (kseq->ksq_assigned)
1507 kseq_assign(kseq);
1508#endif
1509 ke = kseq_choose(kseq);
1510 if (ke) {
1511#ifdef SMP
1512 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1513 if (kseq_idled(kseq) == 0)
1514 goto restart;
1515#endif
1516 kseq_runq_rem(kseq, ke);
1517 ke->ke_state = KES_THREAD;
1518
1519 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1520 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1521 ke, ke->ke_runq, ke->ke_slice,
1522 ke->ke_thread->td_priority);
1523 }
1524 return (ke);
1525 }
1526#ifdef SMP
1527 if (kseq_idled(kseq) == 0)
1528 goto restart;
1529#endif
1530 return (NULL);
1531}
1532
1533void
1534sched_add(struct thread *td)
1535{
1536 struct kseq *kseq;
1537 struct ksegrp *kg;
1538 struct kse *ke;
1539 int class;
1540
1541 mtx_assert(&sched_lock, MA_OWNED);
1542 ke = td->td_kse;
1543 kg = td->td_ksegrp;
1544 if (ke->ke_flags & KEF_ASSIGNED)
1545 return;
1546 kseq = KSEQ_SELF();
1547 KASSERT((ke->ke_thread != NULL),
1548 ("sched_add: No thread on KSE"));
1549 KASSERT((ke->ke_thread->td_kse != NULL),
1550 ("sched_add: No KSE on thread"));
1551 KASSERT(ke->ke_state != KES_ONRUNQ,
1552 ("sched_add: kse %p (%s) already in run queue", ke,
1553 ke->ke_proc->p_comm));
1554 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1555 ("sched_add: process swapped out"));
1556 KASSERT(ke->ke_runq == NULL,
1557 ("sched_add: KSE %p is still assigned to a run queue", ke));
1558
1559 class = PRI_BASE(kg->kg_pri_class);
1560 switch (class) {
1561 case PRI_ITHD:
1562 case PRI_REALTIME:
1563 ke->ke_runq = kseq->ksq_curr;
1564 ke->ke_slice = SCHED_SLICE_MAX;
1565 ke->ke_cpu = PCPU_GET(cpuid);
1566 break;
1567 case PRI_TIMESHARE:
1568 if (SCHED_CURR(kg, ke))
1569 ke->ke_runq = kseq->ksq_curr;
1570 else
1571 ke->ke_runq = kseq->ksq_next;
1572 break;
1573 case PRI_IDLE:
1574 /*
1575 * This is for priority prop.
1576 */
1577 if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1578 ke->ke_runq = kseq->ksq_curr;
1579 else
1580 ke->ke_runq = &kseq->ksq_idle;
1581 ke->ke_slice = SCHED_SLICE_MIN;
1582 break;
1583 default:
1584 panic("Unknown pri class.");
1585 break;
1586 }
1587#ifdef SMP
1588 if (ke->ke_cpu != PCPU_GET(cpuid)) {
1589 ke->ke_runq = NULL;
1590 kseq_notify(ke, ke->ke_cpu);
1591 return;
1592 }
1593 /*
1594 * If we had been idle, clear our bit in the group and potentially
1595 * the global bitmap. If not, see if we should transfer this thread.
1596 */
1597 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1598 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1599 /*
1600 * Check to see if our group is unidling, and if so, remove it
1601 * from the global idle mask.
1602 */
1603 if (kseq->ksq_group->ksg_idlemask ==
1604 kseq->ksq_group->ksg_cpumask)
1605 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1606 /*
1607 * Now remove ourselves from the group specific idle mask.
1608 */
1609 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1610 } else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class))
1611 if (kseq_transfer(kseq, ke, class))
1612 return;
1613#endif
1614 if (td->td_priority < curthread->td_priority)
1615 curthread->td_flags |= TDF_NEEDRESCHED;
1616
1617 ke->ke_ksegrp->kg_runq_kses++;
1618 ke->ke_state = KES_ONRUNQ;
1619
1620 kseq_runq_add(kseq, ke);
1621 kseq_load_add(kseq, ke);
1622}
1623
1624void
1625sched_rem(struct thread *td)
1626{
1627 struct kseq *kseq;
1628 struct kse *ke;
1629
1630 ke = td->td_kse;
1631 /*
1632 * It is safe to just return here because sched_rem() is only ever
1633 * used in places where we're immediately going to add the
1634 * kse back on again. In that case it'll be added with the correct
1635 * thread and priority when the caller drops the sched_lock.
1636 */
1637 if (ke->ke_flags & KEF_ASSIGNED)
1638 return;
1639 mtx_assert(&sched_lock, MA_OWNED);
1640 KASSERT((ke->ke_state == KES_ONRUNQ),
1641 ("sched_rem: KSE not on run queue"));
1642
1643 ke->ke_state = KES_THREAD;
1644 ke->ke_ksegrp->kg_runq_kses--;
1645 kseq = KSEQ_CPU(ke->ke_cpu);
1646 kseq_runq_rem(kseq, ke);
1647 kseq_load_rem(kseq, ke);
1648}
1649
1650fixpt_t
1651sched_pctcpu(struct thread *td)
1652{
1653 fixpt_t pctcpu;
1654 struct kse *ke;
1655
1656 pctcpu = 0;
1657 ke = td->td_kse;
1658 if (ke == NULL)
1659 return (0);
1660
1661 mtx_lock_spin(&sched_lock);
1662 if (ke->ke_ticks) {
1663 int rtick;
1664
1665 /*
1666 * Don't update more frequently than twice a second. Allowing
1667 * this causes the cpu usage to decay away too quickly due to
1668 * rounding errors.
1669 */
1670 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1671 ke->ke_ltick < (ticks - (hz / 2)))
1672 sched_pctcpu_update(ke);
1673 /* How many rtick per second ? */
1674 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1675 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1676 }
1677
1678 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1679 mtx_unlock_spin(&sched_lock);
1680
1681 return (pctcpu);
1682}
1683
1684void
1685sched_bind(struct thread *td, int cpu)
1686{
1687 struct kse *ke;
1688
1689 mtx_assert(&sched_lock, MA_OWNED);
1690 ke = td->td_kse;
1691 ke->ke_flags |= KEF_BOUND;
1692#ifdef SMP
1693 if (PCPU_GET(cpuid) == cpu)
1694 return;
1695 /* sched_rem without the runq_remove */
1696 ke->ke_state = KES_THREAD;
1697 ke->ke_ksegrp->kg_runq_kses--;
1698 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1699 kseq_notify(ke, cpu);
1700 /* When we return from mi_switch we'll be on the correct cpu. */
1701 mi_switch(SW_VOL);
1702#endif
1703}
1704
1705void
1706sched_unbind(struct thread *td)
1707{
1708 mtx_assert(&sched_lock, MA_OWNED);
1709 td->td_kse->ke_flags &= ~KEF_BOUND;
1710}
1711
1712int
1713sched_load(void)
1714{
1715#ifdef SMP
1716 int total;
1717 int i;
1718
1719 total = 0;
1720 for (i = 0; i <= ksg_maxid; i++)
1721 total += KSEQ_GROUP(i)->ksg_load;
1722 return (total);
1723#else
1724 return (KSEQ_SELF()->ksq_sysload);
1725#endif
1726}
1727
1728int
1729sched_sizeof_kse(void)
1730{
1731 return (sizeof(struct kse) + sizeof(struct ke_sched));
1732}
1733
1734int
1735sched_sizeof_ksegrp(void)
1736{
1737 return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1738}
1739
1740int
1741sched_sizeof_proc(void)
1742{
1743 return (sizeof(struct proc));
1744}
1745
1746int
1747sched_sizeof_thread(void)
1748{
1749 return (sizeof(struct thread) + sizeof(struct td_sched));
1750}