Deleted Added
sdiff udiff text old ( 121872 ) new ( 121896 )
full compact
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121872 2003-11-02 04:25:59Z jeff $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>
37#include <sys/resource.h>
38#include <sys/sched.h>
39#include <sys/smp.h>
40#include <sys/sx.h>
41#include <sys/sysctl.h>
42#include <sys/sysproto.h>
43#include <sys/vmmeter.h>
44#ifdef DDB
45#include <ddb/ddb.h>
46#endif
47#ifdef KTRACE
48#include <sys/uio.h>
49#include <sys/ktrace.h>
50#endif
51
52#include <machine/cpu.h>
53#include <machine/smp.h>
54
55#define KTR_ULE KTR_NFS
56
57/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
58/* XXX This is bogus compatability crap for ps */
59static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
60SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
61
62static void sched_setup(void *dummy);
63SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
64
65static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
66
67static int sched_strict;
68SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
69
70static int slice_min = 1;
71SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
72
73static int slice_max = 10;
74SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
75
76int realstathz;
77int tickincr = 1;
78
79#ifdef SMP
80/* Callout to handle load balancing SMP systems. */
81static struct callout kseq_lb_callout;
82#endif
83
84/*
85 * These datastructures are allocated within their parent datastructure but
86 * are scheduler specific.
87 */
88
89struct ke_sched {
90 int ske_slice;
91 struct runq *ske_runq;
92 /* The following variables are only used for pctcpu calculation */
93 int ske_ltick; /* Last tick that we were running on */
94 int ske_ftick; /* First tick that we were running on */
95 int ske_ticks; /* Tick count */
96 /* CPU that we have affinity for. */
97 u_char ske_cpu;
98};
99#define ke_slice ke_sched->ske_slice
100#define ke_runq ke_sched->ske_runq
101#define ke_ltick ke_sched->ske_ltick
102#define ke_ftick ke_sched->ske_ftick
103#define ke_ticks ke_sched->ske_ticks
104#define ke_cpu ke_sched->ske_cpu
105#define ke_assign ke_procq.tqe_next
106
107#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */
108
109struct kg_sched {
110 int skg_slptime; /* Number of ticks we vol. slept */
111 int skg_runtime; /* Number of ticks we were running */
112};
113#define kg_slptime kg_sched->skg_slptime
114#define kg_runtime kg_sched->skg_runtime
115
116struct td_sched {
117 int std_slptime;
118};
119#define td_slptime td_sched->std_slptime
120
121struct td_sched td_sched;
122struct ke_sched ke_sched;
123struct kg_sched kg_sched;
124
125struct ke_sched *kse0_sched = &ke_sched;
126struct kg_sched *ksegrp0_sched = &kg_sched;
127struct p_sched *proc0_sched = NULL;
128struct td_sched *thread0_sched = &td_sched;
129
130/*
131 * The priority is primarily determined by the interactivity score. Thus, we
132 * give lower(better) priorities to kse groups that use less CPU. The nice
133 * value is then directly added to this to allow nice to have some effect
134 * on latency.
135 *
136 * PRI_RANGE: Total priority range for timeshare threads.
137 * PRI_NRESV: Number of nice values.
138 * PRI_BASE: The start of the dynamic range.
139 */
140#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
141#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1)
142#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
143#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE)
144#define SCHED_PRI_INTERACT(score) \
145 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
146
147/*
148 * These determine the interactivity of a process.
149 *
150 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
151 * before throttling back.
152 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
153 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
154 * INTERACT_THRESH: Threshhold for placement on the current runq.
155 */
156#define SCHED_SLP_RUN_MAX ((hz * 5) << 10)
157#define SCHED_SLP_RUN_FORK ((hz / 2) << 10)
158#define SCHED_INTERACT_MAX (100)
159#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
160#define SCHED_INTERACT_THRESH (30)
161
162/*
163 * These parameters and macros determine the size of the time slice that is
164 * granted to each thread.
165 *
166 * SLICE_MIN: Minimum time slice granted, in units of ticks.
167 * SLICE_MAX: Maximum time slice granted.
168 * SLICE_RANGE: Range of available time slices scaled by hz.
169 * SLICE_SCALE: The number slices granted per val in the range of [0, max].
170 * SLICE_NICE: Determine the amount of slice granted to a scaled nice.
171 * SLICE_NTHRESH: The nice cutoff point for slice assignment.
172 */
173#define SCHED_SLICE_MIN (slice_min)
174#define SCHED_SLICE_MAX (slice_max)
175#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1)
176#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
177#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max))
178#define SCHED_SLICE_NICE(nice) \
179 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
180
181/*
182 * This macro determines whether or not the kse belongs on the current or
183 * next run queue.
184 */
185#define SCHED_INTERACTIVE(kg) \
186 (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
187#define SCHED_CURR(kg, ke) \
188 (ke->ke_thread->td_priority != kg->kg_user_pri || \
189 SCHED_INTERACTIVE(kg))
190
191/*
192 * Cpu percentage computation macros and defines.
193 *
194 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across.
195 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across.
196 */
197
198#define SCHED_CPU_TIME 10
199#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME)
200
201/*
202 * kseq - per processor runqs and statistics.
203 */
204
205#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */
206
207struct kseq {
208 struct runq ksq_idle; /* Queue of IDLE threads. */
209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */
210 struct runq *ksq_next; /* Next timeshare queue. */
211 struct runq *ksq_curr; /* Current queue. */
212 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */
213 int ksq_load; /* Aggregate load. */
214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215 short ksq_nicemin; /* Least nice. */
216#ifdef SMP
217 unsigned int ksq_rslices; /* Slices on run queue */
218 int ksq_cpus; /* Count of CPUs in this kseq. */
219 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */
220#endif
221};
222
223/*
224 * One kse queue per processor.
225 */
226#ifdef SMP
227static int kseq_idle;
228static struct kseq kseq_cpu[MAXCPU];
229static struct kseq *kseq_idmap[MAXCPU];
230#define KSEQ_SELF() (kseq_idmap[PCPU_GET(cpuid)])
231#define KSEQ_CPU(x) (kseq_idmap[(x)])
232#else
233static struct kseq kseq_cpu;
234#define KSEQ_SELF() (&kseq_cpu)
235#define KSEQ_CPU(x) (&kseq_cpu)
236#endif
237
238static void sched_slice(struct kse *ke);
239static void sched_priority(struct ksegrp *kg);
240static int sched_interact_score(struct ksegrp *kg);
241static void sched_interact_update(struct ksegrp *kg);
242static void sched_interact_fork(struct ksegrp *kg);
243static void sched_pctcpu_update(struct kse *ke);
244
245/* Operations on per processor queues */
246static struct kse * kseq_choose(struct kseq *kseq);
247static void kseq_setup(struct kseq *kseq);
248static void kseq_add(struct kseq *kseq, struct kse *ke);
249static void kseq_rem(struct kseq *kseq, struct kse *ke);
250static void kseq_nice_add(struct kseq *kseq, int nice);
251static void kseq_nice_rem(struct kseq *kseq, int nice);
252void kseq_print(int cpu);
253#ifdef SMP
254#if 0
255static int sched_pickcpu(void);
256#endif
257static struct kse *runq_steal(struct runq *rq);
258static struct kseq *kseq_load_highest(void);
259static void kseq_balance(void *arg);
260static void kseq_move(struct kseq *from, int cpu);
261static int kseq_find(void);
262static void kseq_notify(struct kse *ke, int cpu);
263static void kseq_assign(struct kseq *);
264static struct kse *kseq_steal(struct kseq *kseq);
265#endif
266
267void
268kseq_print(int cpu)
269{
270 struct kseq *kseq;
271 int i;
272
273 kseq = KSEQ_CPU(cpu);
274
275 printf("kseq:\n");
276 printf("\tload: %d\n", kseq->ksq_load);
277 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]);
278 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]);
279 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
280 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]);
281 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
282 printf("\tnice counts:\n");
283 for (i = 0; i < SCHED_PRI_NRESV; i++)
284 if (kseq->ksq_nice[i])
285 printf("\t\t%d = %d\n",
286 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
287}
288
289static void
290kseq_add(struct kseq *kseq, struct kse *ke)
291{
292 mtx_assert(&sched_lock, MA_OWNED);
293 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
294 kseq->ksq_load++;
295 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
296 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
297 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
298 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
299 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
300 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
301#ifdef SMP
302 kseq->ksq_rslices += ke->ke_slice;
303#endif
304}
305
306static void
307kseq_rem(struct kseq *kseq, struct kse *ke)
308{
309 mtx_assert(&sched_lock, MA_OWNED);
310 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
311 kseq->ksq_load--;
312 ke->ke_runq = NULL;
313 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
314 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
315#ifdef SMP
316 kseq->ksq_rslices -= ke->ke_slice;
317#endif
318}
319
320static void
321kseq_nice_add(struct kseq *kseq, int nice)
322{
323 mtx_assert(&sched_lock, MA_OWNED);
324 /* Normalize to zero. */
325 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
326 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
327 kseq->ksq_nicemin = nice;
328}
329
330static void
331kseq_nice_rem(struct kseq *kseq, int nice)
332{
333 int n;
334
335 mtx_assert(&sched_lock, MA_OWNED);
336 /* Normalize to zero. */
337 n = nice + SCHED_PRI_NHALF;
338 kseq->ksq_nice[n]--;
339 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
340
341 /*
342 * If this wasn't the smallest nice value or there are more in
343 * this bucket we can just return. Otherwise we have to recalculate
344 * the smallest nice.
345 */
346 if (nice != kseq->ksq_nicemin ||
347 kseq->ksq_nice[n] != 0 ||
348 kseq->ksq_loads[PRI_TIMESHARE] == 0)
349 return;
350
351 for (; n < SCHED_PRI_NRESV; n++)
352 if (kseq->ksq_nice[n]) {
353 kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
354 return;
355 }
356}
357
358#ifdef SMP
359/*
360 * kseq_balance is a simple CPU load balancing algorithm. It operates by
361 * finding the least loaded and most loaded cpu and equalizing their load
362 * by migrating some processes.
363 *
364 * Dealing only with two CPUs at a time has two advantages. Firstly, most
365 * installations will only have 2 cpus. Secondly, load balancing too much at
366 * once can have an unpleasant effect on the system. The scheduler rarely has
367 * enough information to make perfect decisions. So this algorithm chooses
368 * algorithm simplicity and more gradual effects on load in larger systems.
369 *
370 * It could be improved by considering the priorities and slices assigned to
371 * each task prior to balancing them. There are many pathological cases with
372 * any approach and so the semi random algorithm below may work as well as any.
373 *
374 */
375static void
376kseq_balance(void *arg)
377{
378 struct kseq *kseq;
379 int high_load;
380 int low_load;
381 int high_cpu;
382 int low_cpu;
383 int move;
384 int diff;
385 int i;
386
387 high_cpu = 0;
388 low_cpu = 0;
389 high_load = 0;
390 low_load = -1;
391
392 mtx_lock_spin(&sched_lock);
393 if (smp_started == 0)
394 goto out;
395
396 for (i = 0; i < mp_maxid; i++) {
397 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
398 continue;
399 kseq = KSEQ_CPU(i);
400 if (kseq->ksq_load > high_load) {
401 high_load = kseq->ksq_load;
402 high_cpu = i;
403 }
404 if (low_load == -1 || kseq->ksq_load < low_load) {
405 low_load = kseq->ksq_load;
406 low_cpu = i;
407 }
408 }
409
410 kseq = KSEQ_CPU(high_cpu);
411
412 high_load = kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
413 kseq->ksq_loads[PRI_REALTIME];
414 /*
415 * Nothing to do.
416 */
417 if (high_load < kseq->ksq_cpus + 1)
418 goto out;
419
420 high_load -= kseq->ksq_cpus;
421
422 if (low_load >= high_load)
423 goto out;
424
425 diff = high_load - low_load;
426 move = diff / 2;
427 if (diff & 0x1)
428 move++;
429
430 for (i = 0; i < move; i++)
431 kseq_move(kseq, low_cpu);
432
433out:
434 mtx_unlock_spin(&sched_lock);
435 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
436
437 return;
438}
439
440static struct kseq *
441kseq_load_highest(void)
442{
443 struct kseq *kseq;
444 int load;
445 int cpu;
446 int i;
447
448 mtx_assert(&sched_lock, MA_OWNED);
449 cpu = 0;
450 load = 0;
451
452 for (i = 0; i < mp_maxid; i++) {
453 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
454 continue;
455 kseq = KSEQ_CPU(i);
456 if (kseq->ksq_load > load) {
457 load = kseq->ksq_load;
458 cpu = i;
459 }
460 }
461 kseq = KSEQ_CPU(cpu);
462
463 if ((kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
464 kseq->ksq_loads[PRI_REALTIME]) > kseq->ksq_cpus)
465 return (kseq);
466
467 return (NULL);
468}
469
470static void
471kseq_move(struct kseq *from, int cpu)
472{
473 struct kse *ke;
474
475 ke = kseq_steal(from);
476 runq_remove(ke->ke_runq, ke);
477 ke->ke_state = KES_THREAD;
478 kseq_rem(from, ke);
479
480 ke->ke_cpu = cpu;
481 sched_add(ke->ke_thread);
482}
483
484static int
485kseq_find(void)
486{
487 struct kseq *high;
488
489 if (!smp_started)
490 return (0);
491 if (kseq_idle & PCPU_GET(cpumask))
492 return (0);
493 /*
494 * Find the cpu with the highest load and steal one proc.
495 */
496 if ((high = kseq_load_highest()) == NULL ||
497 high == KSEQ_SELF()) {
498 /*
499 * If we couldn't find one, set ourselves in the
500 * idle map.
501 */
502 atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
503 return (0);
504 }
505 /*
506 * Remove this kse from this kseq and runq and then requeue
507 * on the current processor. We now have a load of one!
508 */
509 kseq_move(high, PCPU_GET(cpuid));
510
511 return (1);
512}
513
514static void
515kseq_assign(struct kseq *kseq)
516{
517 struct kse *nke;
518 struct kse *ke;
519
520 do {
521 ke = kseq->ksq_assigned;
522 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
523 for (; ke != NULL; ke = nke) {
524 nke = ke->ke_assign;
525 ke->ke_flags &= ~KEF_ASSIGNED;
526 sched_add(ke->ke_thread);
527 }
528}
529
530static void
531kseq_notify(struct kse *ke, int cpu)
532{
533 struct kseq *kseq;
534 struct thread *td;
535 struct pcpu *pcpu;
536
537 ke->ke_flags |= KEF_ASSIGNED;
538
539 kseq = KSEQ_CPU(cpu);
540
541 /*
542 * Place a KSE on another cpu's queue and force a resched.
543 */
544 do {
545 ke->ke_assign = kseq->ksq_assigned;
546 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
547 pcpu = pcpu_find(cpu);
548 td = pcpu->pc_curthread;
549 if (ke->ke_thread->td_priority < td->td_priority ||
550 td == pcpu->pc_idlethread) {
551 td->td_flags |= TDF_NEEDRESCHED;
552 ipi_selected(1 << cpu, IPI_AST);
553 }
554}
555
556static struct kse *
557runq_steal(struct runq *rq)
558{
559 struct rqhead *rqh;
560 struct rqbits *rqb;
561 struct kse *ke;
562 int word;
563 int bit;
564
565 mtx_assert(&sched_lock, MA_OWNED);
566 rqb = &rq->rq_status;
567 for (word = 0; word < RQB_LEN; word++) {
568 if (rqb->rqb_bits[word] == 0)
569 continue;
570 for (bit = 0; bit < RQB_BPW; bit++) {
571 if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
572 continue;
573 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
574 TAILQ_FOREACH(ke, rqh, ke_procq) {
575 if (PRI_BASE(ke->ke_ksegrp->kg_pri_class) !=
576 PRI_ITHD)
577 return (ke);
578 }
579 }
580 }
581 return (NULL);
582}
583
584static struct kse *
585kseq_steal(struct kseq *kseq)
586{
587 struct kse *ke;
588
589 if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
590 return (ke);
591 if ((ke = runq_steal(kseq->ksq_next)) != NULL)
592 return (ke);
593 return (runq_steal(&kseq->ksq_idle));
594}
595#endif /* SMP */
596
597/*
598 * Pick the highest priority task we have and return it.
599 */
600
601static struct kse *
602kseq_choose(struct kseq *kseq)
603{
604 struct kse *ke;
605 struct runq *swap;
606
607 mtx_assert(&sched_lock, MA_OWNED);
608 swap = NULL;
609
610 for (;;) {
611 ke = runq_choose(kseq->ksq_curr);
612 if (ke == NULL) {
613 /*
614 * We already swaped once and didn't get anywhere.
615 */
616 if (swap)
617 break;
618 swap = kseq->ksq_curr;
619 kseq->ksq_curr = kseq->ksq_next;
620 kseq->ksq_next = swap;
621 continue;
622 }
623 /*
624 * If we encounter a slice of 0 the kse is in a
625 * TIMESHARE kse group and its nice was too far out
626 * of the range that receives slices.
627 */
628 if (ke->ke_slice == 0) {
629 runq_remove(ke->ke_runq, ke);
630 sched_slice(ke);
631 ke->ke_runq = kseq->ksq_next;
632 runq_add(ke->ke_runq, ke);
633 continue;
634 }
635 return (ke);
636 }
637
638 return (runq_choose(&kseq->ksq_idle));
639}
640
641static void
642kseq_setup(struct kseq *kseq)
643{
644 runq_init(&kseq->ksq_timeshare[0]);
645 runq_init(&kseq->ksq_timeshare[1]);
646 runq_init(&kseq->ksq_idle);
647
648 kseq->ksq_curr = &kseq->ksq_timeshare[0];
649 kseq->ksq_next = &kseq->ksq_timeshare[1];
650
651 kseq->ksq_loads[PRI_ITHD] = 0;
652 kseq->ksq_loads[PRI_REALTIME] = 0;
653 kseq->ksq_loads[PRI_TIMESHARE] = 0;
654 kseq->ksq_loads[PRI_IDLE] = 0;
655 kseq->ksq_load = 0;
656#ifdef SMP
657 kseq->ksq_rslices = 0;
658 kseq->ksq_assigned = NULL;
659#endif
660}
661
662static void
663sched_setup(void *dummy)
664{
665#ifdef SMP
666 int i;
667#endif
668
669 slice_min = (hz/100); /* 10ms */
670 slice_max = (hz/7); /* ~140ms */
671
672#ifdef SMP
673 /* init kseqs */
674 /* Create the idmap. */
675#ifdef ULE_HTT_EXPERIMENTAL
676 if (smp_topology == NULL) {
677#else
678 if (1) {
679#endif
680 for (i = 0; i < MAXCPU; i++) {
681 kseq_setup(&kseq_cpu[i]);
682 kseq_idmap[i] = &kseq_cpu[i];
683 kseq_cpu[i].ksq_cpus = 1;
684 }
685 } else {
686 int j;
687
688 for (i = 0; i < smp_topology->ct_count; i++) {
689 struct cpu_group *cg;
690
691 cg = &smp_topology->ct_group[i];
692 kseq_setup(&kseq_cpu[i]);
693
694 for (j = 0; j < MAXCPU; j++)
695 if ((cg->cg_mask & (1 << j)) != 0)
696 kseq_idmap[j] = &kseq_cpu[i];
697 kseq_cpu[i].ksq_cpus = cg->cg_count;
698 }
699 }
700 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
701 kseq_balance(NULL);
702#else
703 kseq_setup(KSEQ_SELF());
704#endif
705 mtx_lock_spin(&sched_lock);
706 kseq_add(KSEQ_SELF(), &kse0);
707 mtx_unlock_spin(&sched_lock);
708}
709
710/*
711 * Scale the scheduling priority according to the "interactivity" of this
712 * process.
713 */
714static void
715sched_priority(struct ksegrp *kg)
716{
717 int pri;
718
719 if (kg->kg_pri_class != PRI_TIMESHARE)
720 return;
721
722 pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
723 pri += SCHED_PRI_BASE;
724 pri += kg->kg_nice;
725
726 if (pri > PRI_MAX_TIMESHARE)
727 pri = PRI_MAX_TIMESHARE;
728 else if (pri < PRI_MIN_TIMESHARE)
729 pri = PRI_MIN_TIMESHARE;
730
731 kg->kg_user_pri = pri;
732
733 return;
734}
735
736/*
737 * Calculate a time slice based on the properties of the kseg and the runq
738 * that we're on. This is only for PRI_TIMESHARE ksegrps.
739 */
740static void
741sched_slice(struct kse *ke)
742{
743 struct kseq *kseq;
744 struct ksegrp *kg;
745
746 kg = ke->ke_ksegrp;
747 kseq = KSEQ_CPU(ke->ke_cpu);
748
749 /*
750 * Rationale:
751 * KSEs in interactive ksegs get the minimum slice so that we
752 * quickly notice if it abuses its advantage.
753 *
754 * KSEs in non-interactive ksegs are assigned a slice that is
755 * based on the ksegs nice value relative to the least nice kseg
756 * on the run queue for this cpu.
757 *
758 * If the KSE is less nice than all others it gets the maximum
759 * slice and other KSEs will adjust their slice relative to
760 * this when they first expire.
761 *
762 * There is 20 point window that starts relative to the least
763 * nice kse on the run queue. Slice size is determined by
764 * the kse distance from the last nice ksegrp.
765 *
766 * If the kse is outside of the window it will get no slice
767 * and will be reevaluated each time it is selected on the
768 * run queue. The exception to this is nice 0 ksegs when
769 * a nice -20 is running. They are always granted a minimum
770 * slice.
771 */
772 if (!SCHED_INTERACTIVE(kg)) {
773 int nice;
774
775 nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
776 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
777 kg->kg_nice < kseq->ksq_nicemin)
778 ke->ke_slice = SCHED_SLICE_MAX;
779 else if (nice <= SCHED_SLICE_NTHRESH)
780 ke->ke_slice = SCHED_SLICE_NICE(nice);
781 else if (kg->kg_nice == 0)
782 ke->ke_slice = SCHED_SLICE_MIN;
783 else
784 ke->ke_slice = 0;
785 } else
786 ke->ke_slice = SCHED_SLICE_MIN;
787
788 CTR6(KTR_ULE,
789 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
790 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
791 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
792
793 return;
794}
795
796/*
797 * This routine enforces a maximum limit on the amount of scheduling history
798 * kept. It is called after either the slptime or runtime is adjusted.
799 * This routine will not operate correctly when slp or run times have been
800 * adjusted to more than double their maximum.
801 */
802static void
803sched_interact_update(struct ksegrp *kg)
804{
805 int sum;
806
807 sum = kg->kg_runtime + kg->kg_slptime;
808 if (sum < SCHED_SLP_RUN_MAX)
809 return;
810 /*
811 * If we have exceeded by more than 1/5th then the algorithm below
812 * will not bring us back into range. Dividing by two here forces
813 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
814 */
815 if (sum > (SCHED_INTERACT_MAX / 5) * 6) {
816 kg->kg_runtime /= 2;
817 kg->kg_slptime /= 2;
818 return;
819 }
820 kg->kg_runtime = (kg->kg_runtime / 5) * 4;
821 kg->kg_slptime = (kg->kg_slptime / 5) * 4;
822}
823
824static void
825sched_interact_fork(struct ksegrp *kg)
826{
827 int ratio;
828 int sum;
829
830 sum = kg->kg_runtime + kg->kg_slptime;
831 if (sum > SCHED_SLP_RUN_FORK) {
832 ratio = sum / SCHED_SLP_RUN_FORK;
833 kg->kg_runtime /= ratio;
834 kg->kg_slptime /= ratio;
835 }
836}
837
838static int
839sched_interact_score(struct ksegrp *kg)
840{
841 int div;
842
843 if (kg->kg_runtime > kg->kg_slptime) {
844 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
845 return (SCHED_INTERACT_HALF +
846 (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
847 } if (kg->kg_slptime > kg->kg_runtime) {
848 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
849 return (kg->kg_runtime / div);
850 }
851
852 /*
853 * This can happen if slptime and runtime are 0.
854 */
855 return (0);
856
857}
858
859/*
860 * This is only somewhat accurate since given many processes of the same
861 * priority they will switch when their slices run out, which will be
862 * at most SCHED_SLICE_MAX.
863 */
864int
865sched_rr_interval(void)
866{
867 return (SCHED_SLICE_MAX);
868}
869
870static void
871sched_pctcpu_update(struct kse *ke)
872{
873 /*
874 * Adjust counters and watermark for pctcpu calc.
875 */
876 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
877 /*
878 * Shift the tick count out so that the divide doesn't
879 * round away our results.
880 */
881 ke->ke_ticks <<= 10;
882 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
883 SCHED_CPU_TICKS;
884 ke->ke_ticks >>= 10;
885 } else
886 ke->ke_ticks = 0;
887 ke->ke_ltick = ticks;
888 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
889}
890
891#if 0
892/* XXX Should be changed to kseq_load_lowest() */
893int
894sched_pickcpu(void)
895{
896 struct kseq *kseq;
897 int load;
898 int cpu;
899 int i;
900
901 mtx_assert(&sched_lock, MA_OWNED);
902 if (!smp_started)
903 return (0);
904
905 load = 0;
906 cpu = 0;
907
908 for (i = 0; i < mp_maxid; i++) {
909 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
910 continue;
911 kseq = KSEQ_CPU(i);
912 if (kseq->ksq_load < load) {
913 cpu = i;
914 load = kseq->ksq_load;
915 }
916 }
917
918 CTR1(KTR_ULE, "sched_pickcpu: %d", cpu);
919 return (cpu);
920}
921#endif
922
923void
924sched_prio(struct thread *td, u_char prio)
925{
926 struct kse *ke;
927
928 ke = td->td_kse;
929 mtx_assert(&sched_lock, MA_OWNED);
930 if (TD_ON_RUNQ(td)) {
931 /*
932 * If the priority has been elevated due to priority
933 * propagation, we may have to move ourselves to a new
934 * queue. We still call adjustrunqueue below in case kse
935 * needs to fix things up.
936 */
937 if (prio < td->td_priority && ke &&
938 (ke->ke_flags & KEF_ASSIGNED) == 0 &&
939 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
940 runq_remove(ke->ke_runq, ke);
941 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
942 runq_add(ke->ke_runq, ke);
943 }
944 adjustrunqueue(td, prio);
945 } else
946 td->td_priority = prio;
947}
948
949void
950sched_switch(struct thread *td)
951{
952 struct thread *newtd;
953 struct kse *ke;
954
955 mtx_assert(&sched_lock, MA_OWNED);
956
957 ke = td->td_kse;
958
959 td->td_last_kse = ke;
960 td->td_lastcpu = td->td_oncpu;
961 td->td_oncpu = NOCPU;
962 td->td_flags &= ~TDF_NEEDRESCHED;
963
964 if (TD_IS_RUNNING(td)) {
965 if (td->td_proc->p_flag & P_SA) {
966 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
967 setrunqueue(td);
968 } else {
969 /*
970 * This queue is always correct except for idle threads
971 * which have a higher priority due to priority
972 * propagation.
973 */
974 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) {
975 if (td->td_priority < PRI_MIN_IDLE)
976 ke->ke_runq = KSEQ_SELF()->ksq_curr;
977 else
978 ke->ke_runq = &KSEQ_SELF()->ksq_idle;
979 }
980 runq_add(ke->ke_runq, ke);
981 /* setrunqueue(td); */
982 }
983 } else {
984 if (ke->ke_runq)
985 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
986 /*
987 * We will not be on the run queue. So we must be
988 * sleeping or similar.
989 */
990 if (td->td_proc->p_flag & P_SA)
991 kse_reassign(ke);
992 }
993 newtd = choosethread();
994 if (td != newtd)
995 cpu_switch(td, newtd);
996 sched_lock.mtx_lock = (uintptr_t)td;
997
998 td->td_oncpu = PCPU_GET(cpuid);
999}
1000
1001void
1002sched_nice(struct ksegrp *kg, int nice)
1003{
1004 struct kse *ke;
1005 struct thread *td;
1006 struct kseq *kseq;
1007
1008 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
1009 mtx_assert(&sched_lock, MA_OWNED);
1010 /*
1011 * We need to adjust the nice counts for running KSEs.
1012 */
1013 if (kg->kg_pri_class == PRI_TIMESHARE)
1014 FOREACH_KSE_IN_GROUP(kg, ke) {
1015 if (ke->ke_runq == NULL)
1016 continue;
1017 kseq = KSEQ_CPU(ke->ke_cpu);
1018 kseq_nice_rem(kseq, kg->kg_nice);
1019 kseq_nice_add(kseq, nice);
1020 }
1021 kg->kg_nice = nice;
1022 sched_priority(kg);
1023 FOREACH_THREAD_IN_GROUP(kg, td)
1024 td->td_flags |= TDF_NEEDRESCHED;
1025}
1026
1027void
1028sched_sleep(struct thread *td, u_char prio)
1029{
1030 mtx_assert(&sched_lock, MA_OWNED);
1031
1032 td->td_slptime = ticks;
1033 td->td_priority = prio;
1034
1035 CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1036 td->td_kse, td->td_slptime);
1037}
1038
1039void
1040sched_wakeup(struct thread *td)
1041{
1042 mtx_assert(&sched_lock, MA_OWNED);
1043
1044 /*
1045 * Let the kseg know how long we slept for. This is because process
1046 * interactivity behavior is modeled in the kseg.
1047 */
1048 if (td->td_slptime) {
1049 struct ksegrp *kg;
1050 int hzticks;
1051
1052 kg = td->td_ksegrp;
1053 hzticks = (ticks - td->td_slptime) << 10;
1054 if (hzticks >= SCHED_SLP_RUN_MAX) {
1055 kg->kg_slptime = SCHED_SLP_RUN_MAX;
1056 kg->kg_runtime = 1;
1057 } else {
1058 kg->kg_slptime += hzticks;
1059 sched_interact_update(kg);
1060 }
1061 sched_priority(kg);
1062 if (td->td_kse)
1063 sched_slice(td->td_kse);
1064 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1065 td->td_kse, hzticks);
1066 td->td_slptime = 0;
1067 }
1068 setrunqueue(td);
1069}
1070
1071/*
1072 * Penalize the parent for creating a new child and initialize the child's
1073 * priority.
1074 */
1075void
1076sched_fork(struct proc *p, struct proc *p1)
1077{
1078
1079 mtx_assert(&sched_lock, MA_OWNED);
1080
1081 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1082 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1083 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1084}
1085
1086void
1087sched_fork_kse(struct kse *ke, struct kse *child)
1088{
1089
1090 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */
1091 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
1092 child->ke_runq = NULL;
1093
1094 /* Grab our parents cpu estimation information. */
1095 child->ke_ticks = ke->ke_ticks;
1096 child->ke_ltick = ke->ke_ltick;
1097 child->ke_ftick = ke->ke_ftick;
1098}
1099
1100void
1101sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1102{
1103 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1104
1105 child->kg_slptime = kg->kg_slptime;
1106 child->kg_runtime = kg->kg_runtime;
1107 child->kg_user_pri = kg->kg_user_pri;
1108 child->kg_nice = kg->kg_nice;
1109 sched_interact_fork(child);
1110 kg->kg_runtime += tickincr << 10;
1111 sched_interact_update(kg);
1112
1113 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1114 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1115 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1116}
1117
1118void
1119sched_fork_thread(struct thread *td, struct thread *child)
1120{
1121}
1122
1123void
1124sched_class(struct ksegrp *kg, int class)
1125{
1126 struct kseq *kseq;
1127 struct kse *ke;
1128
1129 mtx_assert(&sched_lock, MA_OWNED);
1130 if (kg->kg_pri_class == class)
1131 return;
1132
1133 FOREACH_KSE_IN_GROUP(kg, ke) {
1134 if (ke->ke_state != KES_ONRUNQ &&
1135 ke->ke_state != KES_THREAD)
1136 continue;
1137 kseq = KSEQ_CPU(ke->ke_cpu);
1138
1139 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
1140 kseq->ksq_loads[PRI_BASE(class)]++;
1141
1142 if (kg->kg_pri_class == PRI_TIMESHARE)
1143 kseq_nice_rem(kseq, kg->kg_nice);
1144 else if (class == PRI_TIMESHARE)
1145 kseq_nice_add(kseq, kg->kg_nice);
1146 }
1147
1148 kg->kg_pri_class = class;
1149}
1150
1151/*
1152 * Return some of the child's priority and interactivity to the parent.
1153 */
1154void
1155sched_exit(struct proc *p, struct proc *child)
1156{
1157 mtx_assert(&sched_lock, MA_OWNED);
1158 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1159 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1160}
1161
1162void
1163sched_exit_kse(struct kse *ke, struct kse *child)
1164{
1165 kseq_rem(KSEQ_CPU(child->ke_cpu), child);
1166}
1167
1168void
1169sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1170{
1171 /* kg->kg_slptime += child->kg_slptime; */
1172 kg->kg_runtime += child->kg_runtime;
1173 sched_interact_update(kg);
1174}
1175
1176void
1177sched_exit_thread(struct thread *td, struct thread *child)
1178{
1179}
1180
1181void
1182sched_clock(struct thread *td)
1183{
1184 struct kseq *kseq;
1185 struct ksegrp *kg;
1186 struct kse *ke;
1187
1188 /*
1189 * sched_setup() apparently happens prior to stathz being set. We
1190 * need to resolve the timers earlier in the boot so we can avoid
1191 * calculating this here.
1192 */
1193 if (realstathz == 0) {
1194 realstathz = stathz ? stathz : hz;
1195 tickincr = hz / realstathz;
1196 /*
1197 * XXX This does not work for values of stathz that are much
1198 * larger than hz.
1199 */
1200 if (tickincr == 0)
1201 tickincr = 1;
1202 }
1203
1204 ke = td->td_kse;
1205 kg = ke->ke_ksegrp;
1206
1207 mtx_assert(&sched_lock, MA_OWNED);
1208 KASSERT((td != NULL), ("schedclock: null thread pointer"));
1209
1210 /* Adjust ticks for pctcpu */
1211 ke->ke_ticks++;
1212 ke->ke_ltick = ticks;
1213
1214 /* Go up to one second beyond our max and then trim back down */
1215 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1216 sched_pctcpu_update(ke);
1217
1218 if (td->td_flags & TDF_IDLETD)
1219 return;
1220
1221 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1222 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1223 /*
1224 * We only do slicing code for TIMESHARE ksegrps.
1225 */
1226 if (kg->kg_pri_class != PRI_TIMESHARE)
1227 return;
1228 /*
1229 * We used a tick charge it to the ksegrp so that we can compute our
1230 * interactivity.
1231 */
1232 kg->kg_runtime += tickincr << 10;
1233 sched_interact_update(kg);
1234
1235 /*
1236 * We used up one time slice.
1237 */
1238 ke->ke_slice--;
1239 kseq = KSEQ_SELF();
1240#ifdef SMP
1241 kseq->ksq_rslices--;
1242#endif
1243
1244 if (ke->ke_slice > 0)
1245 return;
1246 /*
1247 * We're out of time, recompute priorities and requeue.
1248 */
1249 kseq_rem(kseq, ke);
1250 sched_priority(kg);
1251 sched_slice(ke);
1252 if (SCHED_CURR(kg, ke))
1253 ke->ke_runq = kseq->ksq_curr;
1254 else
1255 ke->ke_runq = kseq->ksq_next;
1256 kseq_add(kseq, ke);
1257 td->td_flags |= TDF_NEEDRESCHED;
1258}
1259
1260int
1261sched_runnable(void)
1262{
1263 struct kseq *kseq;
1264 int load;
1265
1266 load = 1;
1267
1268 mtx_lock_spin(&sched_lock);
1269 kseq = KSEQ_SELF();
1270#ifdef SMP
1271 if (kseq->ksq_assigned)
1272 kseq_assign(kseq);
1273#endif
1274 if ((curthread->td_flags & TDF_IDLETD) != 0) {
1275 if (kseq->ksq_load > 0)
1276 goto out;
1277 } else
1278 if (kseq->ksq_load - 1 > 0)
1279 goto out;
1280 load = 0;
1281out:
1282 mtx_unlock_spin(&sched_lock);
1283 return (load);
1284}
1285
1286void
1287sched_userret(struct thread *td)
1288{
1289 struct ksegrp *kg;
1290
1291 kg = td->td_ksegrp;
1292
1293 if (td->td_priority != kg->kg_user_pri) {
1294 mtx_lock_spin(&sched_lock);
1295 td->td_priority = kg->kg_user_pri;
1296 mtx_unlock_spin(&sched_lock);
1297 }
1298}
1299
1300struct kse *
1301sched_choose(void)
1302{
1303 struct kseq *kseq;
1304 struct kse *ke;
1305
1306 mtx_assert(&sched_lock, MA_OWNED);
1307 kseq = KSEQ_SELF();
1308#ifdef SMP
1309retry:
1310 if (kseq->ksq_assigned)
1311 kseq_assign(kseq);
1312#endif
1313 ke = kseq_choose(kseq);
1314 if (ke) {
1315#ifdef SMP
1316 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1317 if (kseq_find())
1318 goto retry;
1319#endif
1320 runq_remove(ke->ke_runq, ke);
1321 ke->ke_state = KES_THREAD;
1322
1323 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1324 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1325 ke, ke->ke_runq, ke->ke_slice,
1326 ke->ke_thread->td_priority);
1327 }
1328 return (ke);
1329 }
1330#ifdef SMP
1331 if (kseq_find())
1332 goto retry;
1333#endif
1334
1335 return (NULL);
1336}
1337
1338void
1339sched_add(struct thread *td)
1340{
1341 struct kseq *kseq;
1342 struct ksegrp *kg;
1343 struct kse *ke;
1344 int class;
1345
1346 mtx_assert(&sched_lock, MA_OWNED);
1347 ke = td->td_kse;
1348 kg = td->td_ksegrp;
1349 if (ke->ke_flags & KEF_ASSIGNED)
1350 return;
1351 kseq = KSEQ_SELF();
1352 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1353 KASSERT((ke->ke_thread->td_kse != NULL),
1354 ("sched_add: No KSE on thread"));
1355 KASSERT(ke->ke_state != KES_ONRUNQ,
1356 ("sched_add: kse %p (%s) already in run queue", ke,
1357 ke->ke_proc->p_comm));
1358 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1359 ("sched_add: process swapped out"));
1360 KASSERT(ke->ke_runq == NULL,
1361 ("sched_add: KSE %p is still assigned to a run queue", ke));
1362
1363 class = PRI_BASE(kg->kg_pri_class);
1364 switch (class) {
1365 case PRI_ITHD:
1366 case PRI_REALTIME:
1367 ke->ke_runq = kseq->ksq_curr;
1368 ke->ke_slice = SCHED_SLICE_MAX;
1369 ke->ke_cpu = PCPU_GET(cpuid);
1370 break;
1371 case PRI_TIMESHARE:
1372#ifdef SMP
1373 if (ke->ke_cpu != PCPU_GET(cpuid)) {
1374 kseq_notify(ke, ke->ke_cpu);
1375 return;
1376 }
1377#endif
1378 if (SCHED_CURR(kg, ke))
1379 ke->ke_runq = kseq->ksq_curr;
1380 else
1381 ke->ke_runq = kseq->ksq_next;
1382 break;
1383 case PRI_IDLE:
1384#ifdef SMP
1385 if (ke->ke_cpu != PCPU_GET(cpuid)) {
1386 kseq_notify(ke, ke->ke_cpu);
1387 return;
1388 }
1389#endif
1390 /*
1391 * This is for priority prop.
1392 */
1393 if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1394 ke->ke_runq = kseq->ksq_curr;
1395 else
1396 ke->ke_runq = &kseq->ksq_idle;
1397 ke->ke_slice = SCHED_SLICE_MIN;
1398 break;
1399 default:
1400 panic("Unknown pri class.");
1401 break;
1402 }
1403#ifdef SMP
1404 /*
1405 * If there are any idle processors, give them our extra load.
1406 */
1407 if (kseq_idle && class != PRI_ITHD &&
1408 (kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
1409 kseq->ksq_loads[PRI_REALTIME]) >= kseq->ksq_cpus) {
1410 int cpu;
1411
1412 /*
1413 * Multiple cpus could find this bit simultaneously but the
1414 * race shouldn't be terrible.
1415 */
1416 cpu = ffs(kseq_idle);
1417 if (cpu) {
1418 cpu--;
1419 atomic_clear_int(&kseq_idle, 1 << cpu);
1420 ke->ke_cpu = cpu;
1421 ke->ke_runq = NULL;
1422 kseq_notify(ke, cpu);
1423 return;
1424 }
1425 }
1426 if (class == PRI_TIMESHARE || class == PRI_REALTIME)
1427 atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1428#endif
1429 if (td->td_priority < curthread->td_priority)
1430 curthread->td_flags |= TDF_NEEDRESCHED;
1431
1432 ke->ke_ksegrp->kg_runq_kses++;
1433 ke->ke_state = KES_ONRUNQ;
1434
1435 runq_add(ke->ke_runq, ke);
1436 kseq_add(kseq, ke);
1437}
1438
1439void
1440sched_rem(struct thread *td)
1441{
1442 struct kseq *kseq;
1443 struct kse *ke;
1444
1445 ke = td->td_kse;
1446 /*
1447 * It is safe to just return here because sched_rem() is only ever
1448 * used in places where we're immediately going to add the
1449 * kse back on again. In that case it'll be added with the correct
1450 * thread and priority when the caller drops the sched_lock.
1451 */
1452 if (ke->ke_flags & KEF_ASSIGNED)
1453 return;
1454 mtx_assert(&sched_lock, MA_OWNED);
1455 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1456
1457 ke->ke_state = KES_THREAD;
1458 ke->ke_ksegrp->kg_runq_kses--;
1459 kseq = KSEQ_CPU(ke->ke_cpu);
1460 runq_remove(ke->ke_runq, ke);
1461 kseq_rem(kseq, ke);
1462}
1463
1464fixpt_t
1465sched_pctcpu(struct thread *td)
1466{
1467 fixpt_t pctcpu;
1468 struct kse *ke;
1469
1470 pctcpu = 0;
1471 ke = td->td_kse;
1472 if (ke == NULL)
1473 return (0);
1474
1475 mtx_lock_spin(&sched_lock);
1476 if (ke->ke_ticks) {
1477 int rtick;
1478
1479 /*
1480 * Don't update more frequently than twice a second. Allowing
1481 * this causes the cpu usage to decay away too quickly due to
1482 * rounding errors.
1483 */
1484 if (ke->ke_ltick < (ticks - (hz / 2)))
1485 sched_pctcpu_update(ke);
1486 /* How many rtick per second ? */
1487 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1488 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1489 }
1490
1491 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1492 mtx_unlock_spin(&sched_lock);
1493
1494 return (pctcpu);
1495}
1496
1497int
1498sched_sizeof_kse(void)
1499{
1500 return (sizeof(struct kse) + sizeof(struct ke_sched));
1501}
1502
1503int
1504sched_sizeof_ksegrp(void)
1505{
1506 return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1507}
1508
1509int
1510sched_sizeof_proc(void)
1511{
1512 return (sizeof(struct proc));
1513}
1514
1515int
1516sched_sizeof_thread(void)
1517{
1518 return (sizeof(struct thread) + sizeof(struct td_sched));
1519}