1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39#include <sys/cdefs.h>
|
41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/ktr.h> 46#include <sys/lock.h> 47#include <sys/kthread.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/resourcevar.h> 51#include <sys/sched.h> 52#include <sys/smp.h> 53#include <sys/sysctl.h> 54#include <sys/sx.h> 55 56#define KTR_4BSD 0x0 57 58/* 59 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 60 * the range 100-256 Hz (approximately). 61 */ 62#define ESTCPULIM(e) \ 63 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 64 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 65#ifdef SMP 66#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 67#else 68#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 69#endif 70#define NICE_WEIGHT 1 /* Priorities per nice level. */ 71 72struct ke_sched { 73 int ske_cpticks; /* (j) Ticks of cpu time. */ 74 struct runq *ske_runq; /* runq the kse is currently on */ 75}; 76#define ke_runq ke_sched->ske_runq 77#define KEF_BOUND KEF_SCHED1 78 79#define SKE_RUNQ_PCPU(ke) \ 80 ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) 81 82/* 83 * KSE_CAN_MIGRATE macro returns true if the kse can migrate between 84 * cpus. 85 */ 86#define KSE_CAN_MIGRATE(ke) \ 87 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 88static struct ke_sched ke_sched; 89 90struct ke_sched *kse0_sched = &ke_sched; 91struct kg_sched *ksegrp0_sched = NULL; 92struct p_sched *proc0_sched = NULL; 93struct td_sched *thread0_sched = NULL; 94 95static int sched_tdcnt; /* Total runnable threads in the system. */ 96static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 97#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 98 99static struct callout roundrobin_callout; 100 101static void setup_runqs(void); 102static void roundrobin(void *arg); 103static void schedcpu(void); 104static void schedcpu_thread(void); 105static void sched_setup(void *dummy); 106static void maybe_resched(struct thread *td); 107static void updatepri(struct ksegrp *kg); 108static void resetpriority(struct ksegrp *kg); 109 110static struct kproc_desc sched_kp = { 111 "schedcpu", 112 schedcpu_thread, 113 NULL 114}; 115SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 116SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 117 118/* 119 * Global run queue. 120 */ 121static struct runq runq; 122 123#ifdef SMP 124/* 125 * Per-CPU run queues 126 */ 127static struct runq runq_pcpu[MAXCPU]; 128#endif 129 130static void 131setup_runqs(void) 132{ 133#ifdef SMP 134 int i; 135 136 for (i = 0; i < MAXCPU; ++i) 137 runq_init(&runq_pcpu[i]); 138#endif 139 140 runq_init(&runq); 141} 142 143static int 144sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 145{ 146 int error, new_val; 147 148 new_val = sched_quantum * tick; 149 error = sysctl_handle_int(oidp, &new_val, 0, req); 150 if (error != 0 || req->newptr == NULL) 151 return (error); 152 if (new_val < tick) 153 return (EINVAL); 154 sched_quantum = new_val / tick; 155 hogticks = 2 * sched_quantum; 156 return (0); 157} 158 159SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 160 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 161 "Roundrobin scheduling quantum in microseconds"); 162 163/* 164 * Arrange to reschedule if necessary, taking the priorities and 165 * schedulers into account. 166 */ 167static void 168maybe_resched(struct thread *td) 169{ 170 171 mtx_assert(&sched_lock, MA_OWNED); 172 if (td->td_priority < curthread->td_priority && curthread->td_kse) 173 curthread->td_flags |= TDF_NEEDRESCHED; 174} 175 176/* 177 * Force switch among equal priority processes every 100ms. 178 * We don't actually need to force a context switch of the current process. 179 * The act of firing the event triggers a context switch to softclock() and 180 * then switching back out again which is equivalent to a preemption, thus 181 * no further work is needed on the local CPU. 182 */ 183/* ARGSUSED */ 184static void 185roundrobin(void *arg) 186{ 187 188#ifdef SMP 189 mtx_lock_spin(&sched_lock); 190 forward_roundrobin(); 191 mtx_unlock_spin(&sched_lock); 192#endif 193 194 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 195} 196 197/* 198 * Constants for digital decay and forget: 199 * 90% of (kg_estcpu) usage in 5 * loadav time 200 * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) 201 * Note that, as ps(1) mentions, this can let percentages 202 * total over 100% (I've seen 137.9% for 3 processes). 203 * 204 * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. 205 * 206 * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. 207 * That is, the system wants to compute a value of decay such 208 * that the following for loop: 209 * for (i = 0; i < (5 * loadavg); i++) 210 * kg_estcpu *= decay; 211 * will compute 212 * kg_estcpu *= 0.1; 213 * for all values of loadavg: 214 * 215 * Mathematically this loop can be expressed by saying: 216 * decay ** (5 * loadavg) ~= .1 217 * 218 * The system computes decay as: 219 * decay = (2 * loadavg) / (2 * loadavg + 1) 220 * 221 * We wish to prove that the system's computation of decay 222 * will always fulfill the equation: 223 * decay ** (5 * loadavg) ~= .1 224 * 225 * If we compute b as: 226 * b = 2 * loadavg 227 * then 228 * decay = b / (b + 1) 229 * 230 * We now need to prove two things: 231 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 232 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 233 * 234 * Facts: 235 * For x close to zero, exp(x) =~ 1 + x, since 236 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 237 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 238 * For x close to zero, ln(1+x) =~ x, since 239 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 240 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 241 * ln(.1) =~ -2.30 242 * 243 * Proof of (1): 244 * Solve (factor)**(power) =~ .1 given power (5*loadav): 245 * solving for factor, 246 * ln(factor) =~ (-2.30/5*loadav), or 247 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 248 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 249 * 250 * Proof of (2): 251 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 252 * solving for power, 253 * power*ln(b/(b+1)) =~ -2.30, or 254 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 255 * 256 * Actual power values for the implemented algorithm are as follows: 257 * loadav: 1 2 3 4 258 * power: 5.68 10.32 14.94 19.55 259 */ 260 261/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 262#define loadfactor(loadav) (2 * (loadav)) 263#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 264 265/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 266static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 267SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 268 269/* 270 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 271 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 272 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 273 * 274 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 275 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 276 * 277 * If you don't want to bother with the faster/more-accurate formula, you 278 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 279 * (more general) method of calculating the %age of CPU used by a process. 280 */ 281#define CCPU_SHIFT 11 282 283/* 284 * Recompute process priorities, every hz ticks. 285 * MP-safe, called without the Giant mutex. 286 */ 287/* ARGSUSED */ 288static void 289schedcpu(void) 290{ 291 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 292 struct thread *td; 293 struct proc *p; 294 struct kse *ke; 295 struct ksegrp *kg; 296 int awake, realstathz; 297 298 realstathz = stathz ? stathz : hz; 299 sx_slock(&allproc_lock); 300 FOREACH_PROC_IN_SYSTEM(p) { 301 /* 302 * Prevent state changes and protect run queue. 303 */ 304 mtx_lock_spin(&sched_lock); 305 /* 306 * Increment time in/out of memory. We ignore overflow; with 307 * 16-bit int's (remember them?) overflow takes 45 days. 308 */ 309 p->p_swtime++; 310 FOREACH_KSEGRP_IN_PROC(p, kg) { 311 awake = 0; 312 FOREACH_KSE_IN_GROUP(kg, ke) { 313 /* 314 * Increment sleep time (if sleeping). We 315 * ignore overflow, as above. 316 */ 317 /* 318 * The kse slptimes are not touched in wakeup 319 * because the thread may not HAVE a KSE. 320 */ 321 if (ke->ke_state == KES_ONRUNQ) { 322 awake = 1; 323 ke->ke_flags &= ~KEF_DIDRUN; 324 } else if ((ke->ke_state == KES_THREAD) && 325 (TD_IS_RUNNING(ke->ke_thread))) { 326 awake = 1; 327 /* Do not clear KEF_DIDRUN */ 328 } else if (ke->ke_flags & KEF_DIDRUN) { 329 awake = 1; 330 ke->ke_flags &= ~KEF_DIDRUN; 331 } 332 333 /* 334 * ke_pctcpu is only for ps and ttyinfo(). 335 * Do it per kse, and add them up at the end? 336 * XXXKSE 337 */ 338 ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> 339 FSHIFT; 340 /* 341 * If the kse has been idle the entire second, 342 * stop recalculating its priority until 343 * it wakes up. 344 */ 345 if (ke->ke_sched->ske_cpticks == 0) 346 continue; 347#if (FSHIFT >= CCPU_SHIFT) 348 ke->ke_pctcpu += (realstathz == 100) 349 ? ((fixpt_t) ke->ke_sched->ske_cpticks) << 350 (FSHIFT - CCPU_SHIFT) : 351 100 * (((fixpt_t) ke->ke_sched->ske_cpticks) 352 << (FSHIFT - CCPU_SHIFT)) / realstathz; 353#else 354 ke->ke_pctcpu += ((FSCALE - ccpu) * 355 (ke->ke_sched->ske_cpticks * 356 FSCALE / realstathz)) >> FSHIFT; 357#endif 358 ke->ke_sched->ske_cpticks = 0; 359 } /* end of kse loop */ 360 /* 361 * If there are ANY running threads in this KSEGRP, 362 * then don't count it as sleeping. 363 */ 364 if (awake) { 365 if (kg->kg_slptime > 1) { 366 /* 367 * In an ideal world, this should not 368 * happen, because whoever woke us 369 * up from the long sleep should have 370 * unwound the slptime and reset our 371 * priority before we run at the stale 372 * priority. Should KASSERT at some 373 * point when all the cases are fixed. 374 */ 375 updatepri(kg); 376 } 377 kg->kg_slptime = 0; 378 } else 379 kg->kg_slptime++; 380 if (kg->kg_slptime > 1) 381 continue; 382 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 383 resetpriority(kg); 384 FOREACH_THREAD_IN_GROUP(kg, td) { 385 if (td->td_priority >= PUSER) { 386 sched_prio(td, kg->kg_user_pri); 387 } 388 } 389 } /* end of ksegrp loop */ 390 mtx_unlock_spin(&sched_lock); 391 } /* end of process loop */ 392 sx_sunlock(&allproc_lock); 393} 394 395/* 396 * Main loop for a kthread that executes schedcpu once a second. 397 */ 398static void 399schedcpu_thread(void) 400{ 401 int nowake; 402 403 for (;;) { 404 schedcpu(); 405 tsleep(&nowake, curthread->td_priority, "-", hz); 406 } 407} 408 409/* 410 * Recalculate the priority of a process after it has slept for a while. 411 * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at 412 * least six times the loadfactor will decay kg_estcpu to zero. 413 */ 414static void 415updatepri(struct ksegrp *kg) 416{ 417 register fixpt_t loadfac; 418 register unsigned int newcpu; 419 420 loadfac = loadfactor(averunnable.ldavg[0]); 421 if (kg->kg_slptime > 5 * loadfac) 422 kg->kg_estcpu = 0; 423 else { 424 newcpu = kg->kg_estcpu; 425 kg->kg_slptime--; /* was incremented in schedcpu() */ 426 while (newcpu && --kg->kg_slptime) 427 newcpu = decay_cpu(loadfac, newcpu); 428 kg->kg_estcpu = newcpu; 429 } 430 resetpriority(kg); 431} 432 433/* 434 * Compute the priority of a process when running in user mode. 435 * Arrange to reschedule if the resulting priority is better 436 * than that of the current process. 437 */ 438static void 439resetpriority(struct ksegrp *kg) 440{ 441 register unsigned int newpriority; 442 struct thread *td; 443 444 if (kg->kg_pri_class == PRI_TIMESHARE) { 445 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 446 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 447 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 448 PRI_MAX_TIMESHARE); 449 kg->kg_user_pri = newpriority; 450 } 451 FOREACH_THREAD_IN_GROUP(kg, td) { 452 maybe_resched(td); /* XXXKSE silly */ 453 } 454} 455 456/* ARGSUSED */ 457static void 458sched_setup(void *dummy) 459{ 460 setup_runqs(); 461 462 if (sched_quantum == 0) 463 sched_quantum = SCHED_QUANTUM; 464 hogticks = 2 * sched_quantum; 465 466 callout_init(&roundrobin_callout, 0); 467 468 /* Kick off timeout driven events by calling first time. */ 469 roundrobin(NULL); 470 471 /* Account for thread0. */ 472 sched_tdcnt++; 473} 474 475/* External interfaces start here */ 476int 477sched_runnable(void) 478{ 479#ifdef SMP 480 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 481#else 482 return runq_check(&runq); 483#endif 484} 485 486int 487sched_rr_interval(void) 488{ 489 if (sched_quantum == 0) 490 sched_quantum = SCHED_QUANTUM; 491 return (sched_quantum); 492} 493 494/* 495 * We adjust the priority of the current process. The priority of 496 * a process gets worse as it accumulates CPU time. The cpu usage 497 * estimator (kg_estcpu) is increased here. resetpriority() will 498 * compute a different priority each time kg_estcpu increases by 499 * INVERSE_ESTCPU_WEIGHT 500 * (until MAXPRI is reached). The cpu usage estimator ramps up 501 * quite quickly when the process is running (linearly), and decays 502 * away exponentially, at a rate which is proportionally slower when 503 * the system is busy. The basic principle is that the system will 504 * 90% forget that the process used a lot of CPU time in 5 * loadav 505 * seconds. This causes the system to favor processes which haven't 506 * run much recently, and to round-robin among other processes. 507 */ 508void 509sched_clock(struct thread *td) 510{ 511 struct ksegrp *kg; 512 struct kse *ke; 513 514 mtx_assert(&sched_lock, MA_OWNED); 515 kg = td->td_ksegrp; 516 ke = td->td_kse; 517 518 ke->ke_sched->ske_cpticks++; 519 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 520 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 521 resetpriority(kg); 522 if (td->td_priority >= PUSER) 523 td->td_priority = kg->kg_user_pri; 524 } 525} 526 527/* 528 * charge childs scheduling cpu usage to parent. 529 * 530 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 531 * Charge it to the ksegrp that did the wait since process estcpu is sum of 532 * all ksegrps, this is strictly as expected. Assume that the child process 533 * aggregated all the estcpu into the 'built-in' ksegrp. 534 */ 535void 536sched_exit(struct proc *p, struct proc *p1) 537{ 538 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 539 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 540 sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 541} 542 543void 544sched_exit_kse(struct kse *ke, struct kse *child) 545{ 546} 547 548void 549sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 550{ 551 552 mtx_assert(&sched_lock, MA_OWNED); 553 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 554} 555 556void 557sched_exit_thread(struct thread *td, struct thread *child) 558{ 559 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 560 sched_tdcnt--; 561} 562 563void 564sched_fork(struct proc *p, struct proc *p1) 565{ 566 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 567 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 568 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 569} 570 571void 572sched_fork_kse(struct kse *ke, struct kse *child) 573{ 574 child->ke_sched->ske_cpticks = 0; 575} 576 577void 578sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 579{ 580 mtx_assert(&sched_lock, MA_OWNED); 581 child->kg_estcpu = kg->kg_estcpu; 582} 583 584void 585sched_fork_thread(struct thread *td, struct thread *child) 586{ 587} 588 589void 590sched_nice(struct ksegrp *kg, int nice) 591{ 592 593 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 594 mtx_assert(&sched_lock, MA_OWNED); 595 kg->kg_nice = nice; 596 resetpriority(kg); 597} 598 599void 600sched_class(struct ksegrp *kg, int class) 601{ 602 mtx_assert(&sched_lock, MA_OWNED); 603 kg->kg_pri_class = class; 604} 605 606/* 607 * Adjust the priority of a thread. 608 * This may include moving the thread within the KSEGRP, 609 * changing the assignment of a kse to the thread, 610 * and moving a KSE in the system run queue. 611 */ 612void 613sched_prio(struct thread *td, u_char prio) 614{ 615 616 mtx_assert(&sched_lock, MA_OWNED); 617 if (TD_ON_RUNQ(td)) { 618 adjustrunqueue(td, prio); 619 } else { 620 td->td_priority = prio; 621 } 622} 623 624void
| 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/kernel.h> 45#include <sys/ktr.h> 46#include <sys/lock.h> 47#include <sys/kthread.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/resourcevar.h> 51#include <sys/sched.h> 52#include <sys/smp.h> 53#include <sys/sysctl.h> 54#include <sys/sx.h> 55 56#define KTR_4BSD 0x0 57 58/* 59 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in 60 * the range 100-256 Hz (approximately). 61 */ 62#define ESTCPULIM(e) \ 63 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \ 64 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1) 65#ifdef SMP 66#define INVERSE_ESTCPU_WEIGHT (8 * smp_cpus) 67#else 68#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */ 69#endif 70#define NICE_WEIGHT 1 /* Priorities per nice level. */ 71 72struct ke_sched { 73 int ske_cpticks; /* (j) Ticks of cpu time. */ 74 struct runq *ske_runq; /* runq the kse is currently on */ 75}; 76#define ke_runq ke_sched->ske_runq 77#define KEF_BOUND KEF_SCHED1 78 79#define SKE_RUNQ_PCPU(ke) \ 80 ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq) 81 82/* 83 * KSE_CAN_MIGRATE macro returns true if the kse can migrate between 84 * cpus. 85 */ 86#define KSE_CAN_MIGRATE(ke) \ 87 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 88static struct ke_sched ke_sched; 89 90struct ke_sched *kse0_sched = &ke_sched; 91struct kg_sched *ksegrp0_sched = NULL; 92struct p_sched *proc0_sched = NULL; 93struct td_sched *thread0_sched = NULL; 94 95static int sched_tdcnt; /* Total runnable threads in the system. */ 96static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */ 97#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */ 98 99static struct callout roundrobin_callout; 100 101static void setup_runqs(void); 102static void roundrobin(void *arg); 103static void schedcpu(void); 104static void schedcpu_thread(void); 105static void sched_setup(void *dummy); 106static void maybe_resched(struct thread *td); 107static void updatepri(struct ksegrp *kg); 108static void resetpriority(struct ksegrp *kg); 109 110static struct kproc_desc sched_kp = { 111 "schedcpu", 112 schedcpu_thread, 113 NULL 114}; 115SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp) 116SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 117 118/* 119 * Global run queue. 120 */ 121static struct runq runq; 122 123#ifdef SMP 124/* 125 * Per-CPU run queues 126 */ 127static struct runq runq_pcpu[MAXCPU]; 128#endif 129 130static void 131setup_runqs(void) 132{ 133#ifdef SMP 134 int i; 135 136 for (i = 0; i < MAXCPU; ++i) 137 runq_init(&runq_pcpu[i]); 138#endif 139 140 runq_init(&runq); 141} 142 143static int 144sysctl_kern_quantum(SYSCTL_HANDLER_ARGS) 145{ 146 int error, new_val; 147 148 new_val = sched_quantum * tick; 149 error = sysctl_handle_int(oidp, &new_val, 0, req); 150 if (error != 0 || req->newptr == NULL) 151 return (error); 152 if (new_val < tick) 153 return (EINVAL); 154 sched_quantum = new_val / tick; 155 hogticks = 2 * sched_quantum; 156 return (0); 157} 158 159SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, 160 0, sizeof sched_quantum, sysctl_kern_quantum, "I", 161 "Roundrobin scheduling quantum in microseconds"); 162 163/* 164 * Arrange to reschedule if necessary, taking the priorities and 165 * schedulers into account. 166 */ 167static void 168maybe_resched(struct thread *td) 169{ 170 171 mtx_assert(&sched_lock, MA_OWNED); 172 if (td->td_priority < curthread->td_priority && curthread->td_kse) 173 curthread->td_flags |= TDF_NEEDRESCHED; 174} 175 176/* 177 * Force switch among equal priority processes every 100ms. 178 * We don't actually need to force a context switch of the current process. 179 * The act of firing the event triggers a context switch to softclock() and 180 * then switching back out again which is equivalent to a preemption, thus 181 * no further work is needed on the local CPU. 182 */ 183/* ARGSUSED */ 184static void 185roundrobin(void *arg) 186{ 187 188#ifdef SMP 189 mtx_lock_spin(&sched_lock); 190 forward_roundrobin(); 191 mtx_unlock_spin(&sched_lock); 192#endif 193 194 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL); 195} 196 197/* 198 * Constants for digital decay and forget: 199 * 90% of (kg_estcpu) usage in 5 * loadav time 200 * 95% of (ke_pctcpu) usage in 60 seconds (load insensitive) 201 * Note that, as ps(1) mentions, this can let percentages 202 * total over 100% (I've seen 137.9% for 3 processes). 203 * 204 * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously. 205 * 206 * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds. 207 * That is, the system wants to compute a value of decay such 208 * that the following for loop: 209 * for (i = 0; i < (5 * loadavg); i++) 210 * kg_estcpu *= decay; 211 * will compute 212 * kg_estcpu *= 0.1; 213 * for all values of loadavg: 214 * 215 * Mathematically this loop can be expressed by saying: 216 * decay ** (5 * loadavg) ~= .1 217 * 218 * The system computes decay as: 219 * decay = (2 * loadavg) / (2 * loadavg + 1) 220 * 221 * We wish to prove that the system's computation of decay 222 * will always fulfill the equation: 223 * decay ** (5 * loadavg) ~= .1 224 * 225 * If we compute b as: 226 * b = 2 * loadavg 227 * then 228 * decay = b / (b + 1) 229 * 230 * We now need to prove two things: 231 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 232 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 233 * 234 * Facts: 235 * For x close to zero, exp(x) =~ 1 + x, since 236 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 237 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 238 * For x close to zero, ln(1+x) =~ x, since 239 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 240 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 241 * ln(.1) =~ -2.30 242 * 243 * Proof of (1): 244 * Solve (factor)**(power) =~ .1 given power (5*loadav): 245 * solving for factor, 246 * ln(factor) =~ (-2.30/5*loadav), or 247 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 248 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 249 * 250 * Proof of (2): 251 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 252 * solving for power, 253 * power*ln(b/(b+1)) =~ -2.30, or 254 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 255 * 256 * Actual power values for the implemented algorithm are as follows: 257 * loadav: 1 2 3 4 258 * power: 5.68 10.32 14.94 19.55 259 */ 260 261/* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 262#define loadfactor(loadav) (2 * (loadav)) 263#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 264 265/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 266static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 267SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 268 269/* 270 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 271 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 272 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 273 * 274 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 275 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 276 * 277 * If you don't want to bother with the faster/more-accurate formula, you 278 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 279 * (more general) method of calculating the %age of CPU used by a process. 280 */ 281#define CCPU_SHIFT 11 282 283/* 284 * Recompute process priorities, every hz ticks. 285 * MP-safe, called without the Giant mutex. 286 */ 287/* ARGSUSED */ 288static void 289schedcpu(void) 290{ 291 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 292 struct thread *td; 293 struct proc *p; 294 struct kse *ke; 295 struct ksegrp *kg; 296 int awake, realstathz; 297 298 realstathz = stathz ? stathz : hz; 299 sx_slock(&allproc_lock); 300 FOREACH_PROC_IN_SYSTEM(p) { 301 /* 302 * Prevent state changes and protect run queue. 303 */ 304 mtx_lock_spin(&sched_lock); 305 /* 306 * Increment time in/out of memory. We ignore overflow; with 307 * 16-bit int's (remember them?) overflow takes 45 days. 308 */ 309 p->p_swtime++; 310 FOREACH_KSEGRP_IN_PROC(p, kg) { 311 awake = 0; 312 FOREACH_KSE_IN_GROUP(kg, ke) { 313 /* 314 * Increment sleep time (if sleeping). We 315 * ignore overflow, as above. 316 */ 317 /* 318 * The kse slptimes are not touched in wakeup 319 * because the thread may not HAVE a KSE. 320 */ 321 if (ke->ke_state == KES_ONRUNQ) { 322 awake = 1; 323 ke->ke_flags &= ~KEF_DIDRUN; 324 } else if ((ke->ke_state == KES_THREAD) && 325 (TD_IS_RUNNING(ke->ke_thread))) { 326 awake = 1; 327 /* Do not clear KEF_DIDRUN */ 328 } else if (ke->ke_flags & KEF_DIDRUN) { 329 awake = 1; 330 ke->ke_flags &= ~KEF_DIDRUN; 331 } 332 333 /* 334 * ke_pctcpu is only for ps and ttyinfo(). 335 * Do it per kse, and add them up at the end? 336 * XXXKSE 337 */ 338 ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >> 339 FSHIFT; 340 /* 341 * If the kse has been idle the entire second, 342 * stop recalculating its priority until 343 * it wakes up. 344 */ 345 if (ke->ke_sched->ske_cpticks == 0) 346 continue; 347#if (FSHIFT >= CCPU_SHIFT) 348 ke->ke_pctcpu += (realstathz == 100) 349 ? ((fixpt_t) ke->ke_sched->ske_cpticks) << 350 (FSHIFT - CCPU_SHIFT) : 351 100 * (((fixpt_t) ke->ke_sched->ske_cpticks) 352 << (FSHIFT - CCPU_SHIFT)) / realstathz; 353#else 354 ke->ke_pctcpu += ((FSCALE - ccpu) * 355 (ke->ke_sched->ske_cpticks * 356 FSCALE / realstathz)) >> FSHIFT; 357#endif 358 ke->ke_sched->ske_cpticks = 0; 359 } /* end of kse loop */ 360 /* 361 * If there are ANY running threads in this KSEGRP, 362 * then don't count it as sleeping. 363 */ 364 if (awake) { 365 if (kg->kg_slptime > 1) { 366 /* 367 * In an ideal world, this should not 368 * happen, because whoever woke us 369 * up from the long sleep should have 370 * unwound the slptime and reset our 371 * priority before we run at the stale 372 * priority. Should KASSERT at some 373 * point when all the cases are fixed. 374 */ 375 updatepri(kg); 376 } 377 kg->kg_slptime = 0; 378 } else 379 kg->kg_slptime++; 380 if (kg->kg_slptime > 1) 381 continue; 382 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); 383 resetpriority(kg); 384 FOREACH_THREAD_IN_GROUP(kg, td) { 385 if (td->td_priority >= PUSER) { 386 sched_prio(td, kg->kg_user_pri); 387 } 388 } 389 } /* end of ksegrp loop */ 390 mtx_unlock_spin(&sched_lock); 391 } /* end of process loop */ 392 sx_sunlock(&allproc_lock); 393} 394 395/* 396 * Main loop for a kthread that executes schedcpu once a second. 397 */ 398static void 399schedcpu_thread(void) 400{ 401 int nowake; 402 403 for (;;) { 404 schedcpu(); 405 tsleep(&nowake, curthread->td_priority, "-", hz); 406 } 407} 408 409/* 410 * Recalculate the priority of a process after it has slept for a while. 411 * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at 412 * least six times the loadfactor will decay kg_estcpu to zero. 413 */ 414static void 415updatepri(struct ksegrp *kg) 416{ 417 register fixpt_t loadfac; 418 register unsigned int newcpu; 419 420 loadfac = loadfactor(averunnable.ldavg[0]); 421 if (kg->kg_slptime > 5 * loadfac) 422 kg->kg_estcpu = 0; 423 else { 424 newcpu = kg->kg_estcpu; 425 kg->kg_slptime--; /* was incremented in schedcpu() */ 426 while (newcpu && --kg->kg_slptime) 427 newcpu = decay_cpu(loadfac, newcpu); 428 kg->kg_estcpu = newcpu; 429 } 430 resetpriority(kg); 431} 432 433/* 434 * Compute the priority of a process when running in user mode. 435 * Arrange to reschedule if the resulting priority is better 436 * than that of the current process. 437 */ 438static void 439resetpriority(struct ksegrp *kg) 440{ 441 register unsigned int newpriority; 442 struct thread *td; 443 444 if (kg->kg_pri_class == PRI_TIMESHARE) { 445 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + 446 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN); 447 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE), 448 PRI_MAX_TIMESHARE); 449 kg->kg_user_pri = newpriority; 450 } 451 FOREACH_THREAD_IN_GROUP(kg, td) { 452 maybe_resched(td); /* XXXKSE silly */ 453 } 454} 455 456/* ARGSUSED */ 457static void 458sched_setup(void *dummy) 459{ 460 setup_runqs(); 461 462 if (sched_quantum == 0) 463 sched_quantum = SCHED_QUANTUM; 464 hogticks = 2 * sched_quantum; 465 466 callout_init(&roundrobin_callout, 0); 467 468 /* Kick off timeout driven events by calling first time. */ 469 roundrobin(NULL); 470 471 /* Account for thread0. */ 472 sched_tdcnt++; 473} 474 475/* External interfaces start here */ 476int 477sched_runnable(void) 478{ 479#ifdef SMP 480 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]); 481#else 482 return runq_check(&runq); 483#endif 484} 485 486int 487sched_rr_interval(void) 488{ 489 if (sched_quantum == 0) 490 sched_quantum = SCHED_QUANTUM; 491 return (sched_quantum); 492} 493 494/* 495 * We adjust the priority of the current process. The priority of 496 * a process gets worse as it accumulates CPU time. The cpu usage 497 * estimator (kg_estcpu) is increased here. resetpriority() will 498 * compute a different priority each time kg_estcpu increases by 499 * INVERSE_ESTCPU_WEIGHT 500 * (until MAXPRI is reached). The cpu usage estimator ramps up 501 * quite quickly when the process is running (linearly), and decays 502 * away exponentially, at a rate which is proportionally slower when 503 * the system is busy. The basic principle is that the system will 504 * 90% forget that the process used a lot of CPU time in 5 * loadav 505 * seconds. This causes the system to favor processes which haven't 506 * run much recently, and to round-robin among other processes. 507 */ 508void 509sched_clock(struct thread *td) 510{ 511 struct ksegrp *kg; 512 struct kse *ke; 513 514 mtx_assert(&sched_lock, MA_OWNED); 515 kg = td->td_ksegrp; 516 ke = td->td_kse; 517 518 ke->ke_sched->ske_cpticks++; 519 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); 520 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { 521 resetpriority(kg); 522 if (td->td_priority >= PUSER) 523 td->td_priority = kg->kg_user_pri; 524 } 525} 526 527/* 528 * charge childs scheduling cpu usage to parent. 529 * 530 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp. 531 * Charge it to the ksegrp that did the wait since process estcpu is sum of 532 * all ksegrps, this is strictly as expected. Assume that the child process 533 * aggregated all the estcpu into the 'built-in' ksegrp. 534 */ 535void 536sched_exit(struct proc *p, struct proc *p1) 537{ 538 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 539 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 540 sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 541} 542 543void 544sched_exit_kse(struct kse *ke, struct kse *child) 545{ 546} 547 548void 549sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 550{ 551 552 mtx_assert(&sched_lock, MA_OWNED); 553 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu); 554} 555 556void 557sched_exit_thread(struct thread *td, struct thread *child) 558{ 559 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 560 sched_tdcnt--; 561} 562 563void 564sched_fork(struct proc *p, struct proc *p1) 565{ 566 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 567 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 568 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 569} 570 571void 572sched_fork_kse(struct kse *ke, struct kse *child) 573{ 574 child->ke_sched->ske_cpticks = 0; 575} 576 577void 578sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 579{ 580 mtx_assert(&sched_lock, MA_OWNED); 581 child->kg_estcpu = kg->kg_estcpu; 582} 583 584void 585sched_fork_thread(struct thread *td, struct thread *child) 586{ 587} 588 589void 590sched_nice(struct ksegrp *kg, int nice) 591{ 592 593 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 594 mtx_assert(&sched_lock, MA_OWNED); 595 kg->kg_nice = nice; 596 resetpriority(kg); 597} 598 599void 600sched_class(struct ksegrp *kg, int class) 601{ 602 mtx_assert(&sched_lock, MA_OWNED); 603 kg->kg_pri_class = class; 604} 605 606/* 607 * Adjust the priority of a thread. 608 * This may include moving the thread within the KSEGRP, 609 * changing the assignment of a kse to the thread, 610 * and moving a KSE in the system run queue. 611 */ 612void 613sched_prio(struct thread *td, u_char prio) 614{ 615 616 mtx_assert(&sched_lock, MA_OWNED); 617 if (TD_ON_RUNQ(td)) { 618 adjustrunqueue(td, prio); 619 } else { 620 td->td_priority = prio; 621 } 622} 623 624void
|
631} 632 633void 634sched_switch(struct thread *td) 635{ 636 struct thread *newtd; 637 struct kse *ke; 638 struct proc *p; 639 640 ke = td->td_kse; 641 p = td->td_proc; 642 643 mtx_assert(&sched_lock, MA_OWNED); 644 KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?")); 645 646 if ((p->p_flag & P_NOLOAD) == 0) 647 sched_tdcnt--; 648 td->td_lastcpu = td->td_oncpu; 649 td->td_last_kse = ke; 650 td->td_flags &= ~TDF_NEEDRESCHED; 651 td->td_oncpu = NOCPU; 652 /* 653 * At the last moment, if this thread is still marked RUNNING, 654 * then put it back on the run queue as it has not been suspended 655 * or stopped or any thing else similar. 656 */ 657 if (TD_IS_RUNNING(td)) { 658 /* Put us back on the run queue (kse and all). */ 659 setrunqueue(td); 660 } else if (p->p_flag & P_SA) { 661 /* 662 * We will not be on the run queue. So we must be 663 * sleeping or similar. As it's available, 664 * someone else can use the KSE if they need it. 665 */ 666 kse_reassign(ke); 667 } 668 newtd = choosethread(); 669 if (td != newtd) 670 cpu_switch(td, newtd); 671 sched_lock.mtx_lock = (uintptr_t)td; 672 td->td_oncpu = PCPU_GET(cpuid); 673} 674 675void 676sched_wakeup(struct thread *td) 677{ 678 struct ksegrp *kg; 679 680 mtx_assert(&sched_lock, MA_OWNED); 681 kg = td->td_ksegrp; 682 if (kg->kg_slptime > 1) 683 updatepri(kg); 684 kg->kg_slptime = 0; 685 setrunqueue(td); 686 maybe_resched(td); 687} 688 689void 690sched_add(struct thread *td) 691{ 692 struct kse *ke; 693 694 ke = td->td_kse; 695 mtx_assert(&sched_lock, MA_OWNED); 696 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 697 KASSERT((ke->ke_thread->td_kse != NULL), 698 ("sched_add: No KSE on thread")); 699 KASSERT(ke->ke_state != KES_ONRUNQ, 700 ("sched_add: kse %p (%s) already in run queue", ke, 701 ke->ke_proc->p_comm)); 702 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 703 ("sched_add: process swapped out")); 704 ke->ke_ksegrp->kg_runq_kses++; 705 ke->ke_state = KES_ONRUNQ; 706 707#ifdef SMP 708 if (KSE_CAN_MIGRATE(ke)) { 709 CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke); 710 ke->ke_runq = &runq; 711 } else { 712 CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke); 713 if (!SKE_RUNQ_PCPU(ke)) 714 ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; 715 } 716#else 717 ke->ke_runq = &runq; 718#endif 719 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 720 sched_tdcnt++; 721 runq_add(ke->ke_runq, ke); 722} 723 724void 725sched_rem(struct thread *td) 726{ 727 struct kse *ke; 728 729 ke = td->td_kse; 730 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 731 ("sched_rem: process swapped out")); 732 KASSERT((ke->ke_state == KES_ONRUNQ), 733 ("sched_rem: KSE not on run queue")); 734 mtx_assert(&sched_lock, MA_OWNED); 735 736 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 737 sched_tdcnt--; 738 runq_remove(ke->ke_sched->ske_runq, ke); 739 740 ke->ke_state = KES_THREAD; 741 ke->ke_ksegrp->kg_runq_kses--; 742} 743 744struct kse * 745sched_choose(void) 746{ 747 struct kse *ke; 748 struct runq *rq; 749 750#ifdef SMP 751 struct kse *kecpu; 752 753 rq = &runq; 754 ke = runq_choose(&runq); 755 kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 756 757 if (ke == NULL || 758 (kecpu != NULL && 759 kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { 760 CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu, 761 PCPU_GET(cpuid)); 762 ke = kecpu; 763 rq = &runq_pcpu[PCPU_GET(cpuid)]; 764 } else { 765 CTR1(KTR_4BSD, "choosing kse %p from main runq", ke); 766 } 767 768#else 769 rq = &runq; 770 ke = runq_choose(&runq); 771#endif 772 773 if (ke != NULL) { 774 runq_remove(rq, ke); 775 ke->ke_state = KES_THREAD; 776 777 KASSERT((ke->ke_thread != NULL), 778 ("sched_choose: No thread on KSE")); 779 KASSERT((ke->ke_thread->td_kse != NULL), 780 ("sched_choose: No KSE on thread")); 781 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 782 ("sched_choose: process swapped out")); 783 } 784 return (ke); 785} 786 787void 788sched_userret(struct thread *td) 789{ 790 struct ksegrp *kg; 791 /* 792 * XXX we cheat slightly on the locking here to avoid locking in 793 * the usual case. Setting td_priority here is essentially an 794 * incomplete workaround for not setting it properly elsewhere. 795 * Now that some interrupt handlers are threads, not setting it 796 * properly elsewhere can clobber it in the window between setting 797 * it here and returning to user mode, so don't waste time setting 798 * it perfectly here. 799 */ 800 kg = td->td_ksegrp; 801 if (td->td_priority != kg->kg_user_pri) { 802 mtx_lock_spin(&sched_lock); 803 td->td_priority = kg->kg_user_pri; 804 mtx_unlock_spin(&sched_lock); 805 } 806} 807 808void 809sched_bind(struct thread *td, int cpu) 810{ 811 struct kse *ke; 812 813 mtx_assert(&sched_lock, MA_OWNED); 814 KASSERT(TD_IS_RUNNING(td), 815 ("sched_bind: cannot bind non-running thread")); 816 817 ke = td->td_kse; 818 819 ke->ke_flags |= KEF_BOUND; 820#ifdef SMP 821 ke->ke_runq = &runq_pcpu[cpu]; 822 if (PCPU_GET(cpuid) == cpu) 823 return; 824 825 ke->ke_state = KES_THREAD; 826 827 mi_switch(SW_VOL); 828#endif 829} 830 831void 832sched_unbind(struct thread* td) 833{ 834 mtx_assert(&sched_lock, MA_OWNED); 835 td->td_kse->ke_flags &= ~KEF_BOUND; 836} 837 838int 839sched_load(void) 840{ 841 return (sched_tdcnt); 842} 843 844int 845sched_sizeof_kse(void) 846{ 847 return (sizeof(struct kse) + sizeof(struct ke_sched)); 848} 849int 850sched_sizeof_ksegrp(void) 851{ 852 return (sizeof(struct ksegrp)); 853} 854int 855sched_sizeof_proc(void) 856{ 857 return (sizeof(struct proc)); 858} 859int 860sched_sizeof_thread(void) 861{ 862 return (sizeof(struct thread)); 863} 864 865fixpt_t 866sched_pctcpu(struct thread *td) 867{ 868 struct kse *ke; 869 870 ke = td->td_kse; 871 if (ke == NULL) 872 ke = td->td_last_kse; 873 if (ke) 874 return (ke->ke_pctcpu); 875 876 return (0); 877}
| 631} 632 633void 634sched_switch(struct thread *td) 635{ 636 struct thread *newtd; 637 struct kse *ke; 638 struct proc *p; 639 640 ke = td->td_kse; 641 p = td->td_proc; 642 643 mtx_assert(&sched_lock, MA_OWNED); 644 KASSERT((ke->ke_state == KES_THREAD), ("sched_switch: kse state?")); 645 646 if ((p->p_flag & P_NOLOAD) == 0) 647 sched_tdcnt--; 648 td->td_lastcpu = td->td_oncpu; 649 td->td_last_kse = ke; 650 td->td_flags &= ~TDF_NEEDRESCHED; 651 td->td_oncpu = NOCPU; 652 /* 653 * At the last moment, if this thread is still marked RUNNING, 654 * then put it back on the run queue as it has not been suspended 655 * or stopped or any thing else similar. 656 */ 657 if (TD_IS_RUNNING(td)) { 658 /* Put us back on the run queue (kse and all). */ 659 setrunqueue(td); 660 } else if (p->p_flag & P_SA) { 661 /* 662 * We will not be on the run queue. So we must be 663 * sleeping or similar. As it's available, 664 * someone else can use the KSE if they need it. 665 */ 666 kse_reassign(ke); 667 } 668 newtd = choosethread(); 669 if (td != newtd) 670 cpu_switch(td, newtd); 671 sched_lock.mtx_lock = (uintptr_t)td; 672 td->td_oncpu = PCPU_GET(cpuid); 673} 674 675void 676sched_wakeup(struct thread *td) 677{ 678 struct ksegrp *kg; 679 680 mtx_assert(&sched_lock, MA_OWNED); 681 kg = td->td_ksegrp; 682 if (kg->kg_slptime > 1) 683 updatepri(kg); 684 kg->kg_slptime = 0; 685 setrunqueue(td); 686 maybe_resched(td); 687} 688 689void 690sched_add(struct thread *td) 691{ 692 struct kse *ke; 693 694 ke = td->td_kse; 695 mtx_assert(&sched_lock, MA_OWNED); 696 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 697 KASSERT((ke->ke_thread->td_kse != NULL), 698 ("sched_add: No KSE on thread")); 699 KASSERT(ke->ke_state != KES_ONRUNQ, 700 ("sched_add: kse %p (%s) already in run queue", ke, 701 ke->ke_proc->p_comm)); 702 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 703 ("sched_add: process swapped out")); 704 ke->ke_ksegrp->kg_runq_kses++; 705 ke->ke_state = KES_ONRUNQ; 706 707#ifdef SMP 708 if (KSE_CAN_MIGRATE(ke)) { 709 CTR1(KTR_4BSD, "adding kse:%p to gbl runq", ke); 710 ke->ke_runq = &runq; 711 } else { 712 CTR1(KTR_4BSD, "adding kse:%p to pcpu runq", ke); 713 if (!SKE_RUNQ_PCPU(ke)) 714 ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)]; 715 } 716#else 717 ke->ke_runq = &runq; 718#endif 719 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 720 sched_tdcnt++; 721 runq_add(ke->ke_runq, ke); 722} 723 724void 725sched_rem(struct thread *td) 726{ 727 struct kse *ke; 728 729 ke = td->td_kse; 730 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 731 ("sched_rem: process swapped out")); 732 KASSERT((ke->ke_state == KES_ONRUNQ), 733 ("sched_rem: KSE not on run queue")); 734 mtx_assert(&sched_lock, MA_OWNED); 735 736 if ((td->td_proc->p_flag & P_NOLOAD) == 0) 737 sched_tdcnt--; 738 runq_remove(ke->ke_sched->ske_runq, ke); 739 740 ke->ke_state = KES_THREAD; 741 ke->ke_ksegrp->kg_runq_kses--; 742} 743 744struct kse * 745sched_choose(void) 746{ 747 struct kse *ke; 748 struct runq *rq; 749 750#ifdef SMP 751 struct kse *kecpu; 752 753 rq = &runq; 754 ke = runq_choose(&runq); 755 kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]); 756 757 if (ke == NULL || 758 (kecpu != NULL && 759 kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) { 760 CTR2(KTR_4BSD, "choosing kse %p from pcpu runq %d", kecpu, 761 PCPU_GET(cpuid)); 762 ke = kecpu; 763 rq = &runq_pcpu[PCPU_GET(cpuid)]; 764 } else { 765 CTR1(KTR_4BSD, "choosing kse %p from main runq", ke); 766 } 767 768#else 769 rq = &runq; 770 ke = runq_choose(&runq); 771#endif 772 773 if (ke != NULL) { 774 runq_remove(rq, ke); 775 ke->ke_state = KES_THREAD; 776 777 KASSERT((ke->ke_thread != NULL), 778 ("sched_choose: No thread on KSE")); 779 KASSERT((ke->ke_thread->td_kse != NULL), 780 ("sched_choose: No KSE on thread")); 781 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 782 ("sched_choose: process swapped out")); 783 } 784 return (ke); 785} 786 787void 788sched_userret(struct thread *td) 789{ 790 struct ksegrp *kg; 791 /* 792 * XXX we cheat slightly on the locking here to avoid locking in 793 * the usual case. Setting td_priority here is essentially an 794 * incomplete workaround for not setting it properly elsewhere. 795 * Now that some interrupt handlers are threads, not setting it 796 * properly elsewhere can clobber it in the window between setting 797 * it here and returning to user mode, so don't waste time setting 798 * it perfectly here. 799 */ 800 kg = td->td_ksegrp; 801 if (td->td_priority != kg->kg_user_pri) { 802 mtx_lock_spin(&sched_lock); 803 td->td_priority = kg->kg_user_pri; 804 mtx_unlock_spin(&sched_lock); 805 } 806} 807 808void 809sched_bind(struct thread *td, int cpu) 810{ 811 struct kse *ke; 812 813 mtx_assert(&sched_lock, MA_OWNED); 814 KASSERT(TD_IS_RUNNING(td), 815 ("sched_bind: cannot bind non-running thread")); 816 817 ke = td->td_kse; 818 819 ke->ke_flags |= KEF_BOUND; 820#ifdef SMP 821 ke->ke_runq = &runq_pcpu[cpu]; 822 if (PCPU_GET(cpuid) == cpu) 823 return; 824 825 ke->ke_state = KES_THREAD; 826 827 mi_switch(SW_VOL); 828#endif 829} 830 831void 832sched_unbind(struct thread* td) 833{ 834 mtx_assert(&sched_lock, MA_OWNED); 835 td->td_kse->ke_flags &= ~KEF_BOUND; 836} 837 838int 839sched_load(void) 840{ 841 return (sched_tdcnt); 842} 843 844int 845sched_sizeof_kse(void) 846{ 847 return (sizeof(struct kse) + sizeof(struct ke_sched)); 848} 849int 850sched_sizeof_ksegrp(void) 851{ 852 return (sizeof(struct ksegrp)); 853} 854int 855sched_sizeof_proc(void) 856{ 857 return (sizeof(struct proc)); 858} 859int 860sched_sizeof_thread(void) 861{ 862 return (sizeof(struct thread)); 863} 864 865fixpt_t 866sched_pctcpu(struct thread *td) 867{ 868 struct kse *ke; 869 870 ke = td->td_kse; 871 if (ke == NULL) 872 ke = td->td_last_kse; 873 if (ke) 874 return (ke->ke_pctcpu); 875 876 return (0); 877}
|