sched_ule.c revision 113417
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/kern/sched_ule.c 113417 2003-04-12 22:33:24Z jeff $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/kernel.h> 32#include <sys/ktr.h> 33#include <sys/lock.h> 34#include <sys/mutex.h> 35#include <sys/proc.h> 36#include <sys/resource.h> 37#include <sys/sched.h> 38#include <sys/smp.h> 39#include <sys/sx.h> 40#include <sys/sysctl.h> 41#include <sys/sysproto.h> 42#include <sys/vmmeter.h> 43#ifdef DDB 44#include <ddb/ddb.h> 45#endif 46#ifdef KTRACE 47#include <sys/uio.h> 48#include <sys/ktrace.h> 49#endif 50 51#include <machine/cpu.h> 52 53#define KTR_ULE KTR_NFS 54 55/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 56/* XXX This is bogus compatability crap for ps */ 57static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 58SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 59 60static void sched_setup(void *dummy); 61SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 62 63static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 64 65static int sched_strict; 66SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 67 68static int slice_min = 1; 69SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 70 71static int slice_max = 2; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 73 74int realstathz; 75int tickincr = 1; 76 77/* 78 * These datastructures are allocated within their parent datastructure but 79 * are scheduler specific. 80 */ 81 82struct ke_sched { 83 int ske_slice; 84 struct runq *ske_runq; 85 /* The following variables are only used for pctcpu calculation */ 86 int ske_ltick; /* Last tick that we were running on */ 87 int ske_ftick; /* First tick that we were running on */ 88 int ske_ticks; /* Tick count */ 89 /* CPU that we have affinity for. */ 90 u_char ske_cpu; 91}; 92#define ke_slice ke_sched->ske_slice 93#define ke_runq ke_sched->ske_runq 94#define ke_ltick ke_sched->ske_ltick 95#define ke_ftick ke_sched->ske_ftick 96#define ke_ticks ke_sched->ske_ticks 97#define ke_cpu ke_sched->ske_cpu 98 99struct kg_sched { 100 int skg_slptime; /* Number of ticks we vol. slept */ 101 int skg_runtime; /* Number of ticks we were running */ 102}; 103#define kg_slptime kg_sched->skg_slptime 104#define kg_runtime kg_sched->skg_runtime 105 106struct td_sched { 107 int std_slptime; 108}; 109#define td_slptime td_sched->std_slptime 110 111struct td_sched td_sched; 112struct ke_sched ke_sched; 113struct kg_sched kg_sched; 114 115struct ke_sched *kse0_sched = &ke_sched; 116struct kg_sched *ksegrp0_sched = &kg_sched; 117struct p_sched *proc0_sched = NULL; 118struct td_sched *thread0_sched = &td_sched; 119 120/* 121 * This priority range has 20 priorities on either end that are reachable 122 * only through nice values. 123 * 124 * PRI_RANGE: Total priority range for timeshare threads. 125 * PRI_NRESV: Reserved priorities for nice. 126 * PRI_BASE: The start of the dynamic range. 127 * DYN_RANGE: Number of priorities that are available int the dynamic 128 * priority range. 129 */ 130#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 131#define SCHED_PRI_NRESV PRIO_TOTAL 132#define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 133#define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 134#define SCHED_PRI_BASE ((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE) 135#define SCHED_DYN_RANGE (SCHED_PRI_RANGE - SCHED_PRI_NRESV) 136#define SCHED_PRI_INTERACT(score) \ 137 ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE) 138 139/* 140 * These determine the interactivity of a process. 141 * 142 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 143 * before throttling back. 144 * SLP_RUN_THROTTLE: Divisor for reducing slp/run time. 145 * INTERACT_RANGE: Range of interactivity values. Smaller is better. 146 * INTERACT_HALF: Convenience define, half of the interactivity range. 147 * INTERACT_THRESH: Threshhold for placement on the current runq. 148 */ 149#define SCHED_SLP_RUN_MAX ((hz / 10) << 10) 150#define SCHED_SLP_RUN_THROTTLE (10) 151#define SCHED_INTERACT_RANGE (100) 152#define SCHED_INTERACT_HALF (SCHED_INTERACT_RANGE / 2) 153#define SCHED_INTERACT_THRESH (10) 154 155/* 156 * These parameters and macros determine the size of the time slice that is 157 * granted to each thread. 158 * 159 * SLICE_MIN: Minimum time slice granted, in units of ticks. 160 * SLICE_MAX: Maximum time slice granted. 161 * SLICE_RANGE: Range of available time slices scaled by hz. 162 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 163 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 164 */ 165#define SCHED_SLICE_MIN (slice_min) 166#define SCHED_SLICE_MAX (slice_max) 167#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 168#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 169#define SCHED_SLICE_NICE(nice) \ 170 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 171 172/* 173 * This macro determines whether or not the kse belongs on the current or 174 * next run queue. 175 * 176 * XXX nice value should effect how interactive a kg is. 177 */ 178#define SCHED_INTERACTIVE(kg) \ 179 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 180#define SCHED_CURR(kg, ke) \ 181 (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg)) 182 183/* 184 * Cpu percentage computation macros and defines. 185 * 186 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 187 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 188 */ 189 190#define SCHED_CPU_TIME 10 191#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 192 193/* 194 * kseq - per processor runqs and statistics. 195 */ 196 197#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 198 199struct kseq { 200 struct runq ksq_idle; /* Queue of IDLE threads. */ 201 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 202 struct runq *ksq_next; /* Next timeshare queue. */ 203 struct runq *ksq_curr; /* Current queue. */ 204 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 205 int ksq_load; /* Aggregate load. */ 206 short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 207 short ksq_nicemin; /* Least nice. */ 208#ifdef SMP 209 unsigned int ksq_rslices; /* Slices on run queue */ 210#endif 211}; 212 213/* 214 * One kse queue per processor. 215 */ 216#ifdef SMP 217struct kseq kseq_cpu[MAXCPU]; 218#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 219#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 220#else 221struct kseq kseq_cpu; 222#define KSEQ_SELF() (&kseq_cpu) 223#define KSEQ_CPU(x) (&kseq_cpu) 224#endif 225 226static void sched_slice(struct kse *ke); 227static void sched_priority(struct ksegrp *kg); 228static int sched_interact_score(struct ksegrp *kg); 229void sched_pctcpu_update(struct kse *ke); 230int sched_pickcpu(void); 231 232/* Operations on per processor queues */ 233static struct kse * kseq_choose(struct kseq *kseq); 234static void kseq_setup(struct kseq *kseq); 235static void kseq_add(struct kseq *kseq, struct kse *ke); 236static void kseq_rem(struct kseq *kseq, struct kse *ke); 237static void kseq_nice_add(struct kseq *kseq, int nice); 238static void kseq_nice_rem(struct kseq *kseq, int nice); 239void kseq_print(struct kseq *kseq); 240#ifdef SMP 241struct kseq * kseq_load_highest(void); 242#endif 243 244void 245kseq_print(struct kseq *kseq) 246{ 247 int i; 248 249 if (kseq == NULL) 250 kseq = KSEQ_SELF(); 251 252 printf("kseq:\n"); 253 printf("\tload: %d\n", kseq->ksq_load); 254 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 255 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 256 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 257 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 258 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 259 printf("\tnice counts:\n"); 260 for (i = 0; i < PRIO_TOTAL + 1; i++) 261 if (kseq->ksq_nice[i]) 262 printf("\t\t%d = %d\n", 263 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 264} 265 266static void 267kseq_add(struct kseq *kseq, struct kse *ke) 268{ 269 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 270 kseq->ksq_load++; 271 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 272 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 273 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 274 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 275 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 276 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 277#ifdef SMP 278 kseq->ksq_rslices += ke->ke_slice; 279#endif 280} 281 282static void 283kseq_rem(struct kseq *kseq, struct kse *ke) 284{ 285 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 286 kseq->ksq_load--; 287 ke->ke_runq = NULL; 288 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 289 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 290#ifdef SMP 291 kseq->ksq_rslices -= ke->ke_slice; 292#endif 293} 294 295static void 296kseq_nice_add(struct kseq *kseq, int nice) 297{ 298 /* Normalize to zero. */ 299 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 300 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 0) 301 kseq->ksq_nicemin = nice; 302} 303 304static void 305kseq_nice_rem(struct kseq *kseq, int nice) 306{ 307 int n; 308 309 /* Normalize to zero. */ 310 n = nice + SCHED_PRI_NHALF; 311 kseq->ksq_nice[n]--; 312 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 313 314 /* 315 * If this wasn't the smallest nice value or there are more in 316 * this bucket we can just return. Otherwise we have to recalculate 317 * the smallest nice. 318 */ 319 if (nice != kseq->ksq_nicemin || 320 kseq->ksq_nice[n] != 0 || 321 kseq->ksq_loads[PRI_TIMESHARE] == 0) 322 return; 323 324 for (; n < SCHED_PRI_NRESV + 1; n++) 325 if (kseq->ksq_nice[n]) { 326 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 327 return; 328 } 329} 330 331#ifdef SMP 332struct kseq * 333kseq_load_highest(void) 334{ 335 struct kseq *kseq; 336 int load; 337 int cpu; 338 int i; 339 340 cpu = 0; 341 load = 0; 342 343 for (i = 0; i < mp_maxid; i++) { 344 if (CPU_ABSENT(i)) 345 continue; 346 kseq = KSEQ_CPU(i); 347 if (kseq->ksq_load > load) { 348 load = kseq->ksq_load; 349 cpu = i; 350 } 351 } 352 if (load > 1) 353 return (KSEQ_CPU(cpu)); 354 355 return (NULL); 356} 357#endif 358 359struct kse * 360kseq_choose(struct kseq *kseq) 361{ 362 struct kse *ke; 363 struct runq *swap; 364 365 swap = NULL; 366 367 for (;;) { 368 ke = runq_choose(kseq->ksq_curr); 369 if (ke == NULL) { 370 /* 371 * We already swaped once and didn't get anywhere. 372 */ 373 if (swap) 374 break; 375 swap = kseq->ksq_curr; 376 kseq->ksq_curr = kseq->ksq_next; 377 kseq->ksq_next = swap; 378 continue; 379 } 380 /* 381 * If we encounter a slice of 0 the kse is in a 382 * TIMESHARE kse group and its nice was too far out 383 * of the range that receives slices. 384 */ 385 if (ke->ke_slice == 0) { 386 runq_remove(ke->ke_runq, ke); 387 sched_slice(ke); 388 ke->ke_runq = kseq->ksq_next; 389 runq_add(ke->ke_runq, ke); 390 continue; 391 } 392 return (ke); 393 } 394 395 return (runq_choose(&kseq->ksq_idle)); 396} 397 398static void 399kseq_setup(struct kseq *kseq) 400{ 401 runq_init(&kseq->ksq_timeshare[0]); 402 runq_init(&kseq->ksq_timeshare[1]); 403 runq_init(&kseq->ksq_idle); 404 405 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 406 kseq->ksq_next = &kseq->ksq_timeshare[1]; 407 408 kseq->ksq_loads[PRI_ITHD] = 0; 409 kseq->ksq_loads[PRI_REALTIME] = 0; 410 kseq->ksq_loads[PRI_TIMESHARE] = 0; 411 kseq->ksq_loads[PRI_IDLE] = 0; 412#ifdef SMP 413 kseq->ksq_rslices = 0; 414#endif 415} 416 417static void 418sched_setup(void *dummy) 419{ 420 int i; 421 422 slice_min = (hz/100); 423 slice_max = (hz/10); 424 425 mtx_lock_spin(&sched_lock); 426 /* init kseqs */ 427 for (i = 0; i < MAXCPU; i++) 428 kseq_setup(KSEQ_CPU(i)); 429 430 kseq_add(KSEQ_SELF(), &kse0); 431 mtx_unlock_spin(&sched_lock); 432} 433 434/* 435 * Scale the scheduling priority according to the "interactivity" of this 436 * process. 437 */ 438static void 439sched_priority(struct ksegrp *kg) 440{ 441 int pri; 442 443 if (kg->kg_pri_class != PRI_TIMESHARE) 444 return; 445 446 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 447 pri += SCHED_PRI_BASE; 448 pri += kg->kg_nice; 449 450 if (pri > PRI_MAX_TIMESHARE) 451 pri = PRI_MAX_TIMESHARE; 452 else if (pri < PRI_MIN_TIMESHARE) 453 pri = PRI_MIN_TIMESHARE; 454 455 kg->kg_user_pri = pri; 456 457 return; 458} 459 460/* 461 * Calculate a time slice based on the properties of the kseg and the runq 462 * that we're on. This is only for PRI_TIMESHARE ksegrps. 463 */ 464static void 465sched_slice(struct kse *ke) 466{ 467 struct kseq *kseq; 468 struct ksegrp *kg; 469 470 kg = ke->ke_ksegrp; 471 kseq = KSEQ_CPU(ke->ke_cpu); 472 473 /* 474 * Rationale: 475 * KSEs in interactive ksegs get the minimum slice so that we 476 * quickly notice if it abuses its advantage. 477 * 478 * KSEs in non-interactive ksegs are assigned a slice that is 479 * based on the ksegs nice value relative to the least nice kseg 480 * on the run queue for this cpu. 481 * 482 * If the KSE is less nice than all others it gets the maximum 483 * slice and other KSEs will adjust their slice relative to 484 * this when they first expire. 485 * 486 * There is 20 point window that starts relative to the least 487 * nice kse on the run queue. Slice size is determined by 488 * the kse distance from the last nice ksegrp. 489 * 490 * If you are outside of the window you will get no slice and 491 * you will be reevaluated each time you are selected on the 492 * run queue. 493 * 494 */ 495 496 if (!SCHED_INTERACTIVE(kg)) { 497 int nice; 498 499 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 500 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 501 kg->kg_nice < kseq->ksq_nicemin) 502 ke->ke_slice = SCHED_SLICE_MAX; 503 else if (nice <= SCHED_PRI_NTHRESH) 504 ke->ke_slice = SCHED_SLICE_NICE(nice); 505 else 506 ke->ke_slice = 0; 507 } else 508 ke->ke_slice = SCHED_SLICE_MIN; 509 510 CTR6(KTR_ULE, 511 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 512 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 513 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 514 515 /* 516 * Check to see if we need to scale back the slp and run time 517 * in the kg. This will cause us to forget old interactivity 518 * while maintaining the current ratio. 519 */ 520 CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)", 521 ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 522 sched_interact_score(kg)); 523 524 if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 525 kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE; 526 kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE; 527 } 528 CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)", 529 ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10, 530 sched_interact_score(kg)); 531 532 return; 533} 534 535static int 536sched_interact_score(struct ksegrp *kg) 537{ 538 int big; 539 int small; 540 int base; 541 542 if (kg->kg_runtime > kg->kg_slptime) { 543 big = kg->kg_runtime; 544 small = kg->kg_slptime; 545 base = SCHED_INTERACT_HALF; 546 } else { 547 big = kg->kg_slptime; 548 small = kg->kg_runtime; 549 base = 0; 550 } 551 552 big /= SCHED_INTERACT_HALF; 553 if (big != 0) 554 small /= big; 555 else 556 small = 0; 557 558 small += base; 559 /* XXX Factor in nice */ 560 return (small); 561} 562 563/* 564 * This is only somewhat accurate since given many processes of the same 565 * priority they will switch when their slices run out, which will be 566 * at most SCHED_SLICE_MAX. 567 */ 568int 569sched_rr_interval(void) 570{ 571 return (SCHED_SLICE_MAX); 572} 573 574void 575sched_pctcpu_update(struct kse *ke) 576{ 577 /* 578 * Adjust counters and watermark for pctcpu calc. 579 * 580 * Shift the tick count out so that the divide doesn't round away 581 * our results. 582 */ 583 ke->ke_ticks <<= 10; 584 ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 585 SCHED_CPU_TICKS; 586 ke->ke_ticks >>= 10; 587 ke->ke_ltick = ticks; 588 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 589} 590 591#ifdef SMP 592/* XXX Should be changed to kseq_load_lowest() */ 593int 594sched_pickcpu(void) 595{ 596 struct kseq *kseq; 597 int load; 598 int cpu; 599 int i; 600 601 if (!smp_started) 602 return (0); 603 604 load = 0; 605 cpu = 0; 606 607 for (i = 0; i < mp_maxid; i++) { 608 if (CPU_ABSENT(i)) 609 continue; 610 kseq = KSEQ_CPU(i); 611 if (kseq->ksq_load < load) { 612 cpu = i; 613 load = kseq->ksq_load; 614 } 615 } 616 617 CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 618 return (cpu); 619} 620#else 621int 622sched_pickcpu(void) 623{ 624 return (0); 625} 626#endif 627 628void 629sched_prio(struct thread *td, u_char prio) 630{ 631 struct kse *ke; 632 struct runq *rq; 633 634 mtx_assert(&sched_lock, MA_OWNED); 635 ke = td->td_kse; 636 td->td_priority = prio; 637 638 if (TD_ON_RUNQ(td)) { 639 rq = ke->ke_runq; 640 641 runq_remove(rq, ke); 642 runq_add(rq, ke); 643 } 644} 645 646void 647sched_switchout(struct thread *td) 648{ 649 struct kse *ke; 650 651 mtx_assert(&sched_lock, MA_OWNED); 652 653 ke = td->td_kse; 654 655 td->td_last_kse = ke; 656 td->td_lastcpu = td->td_oncpu; 657 td->td_oncpu = NOCPU; 658 td->td_flags &= ~TDF_NEEDRESCHED; 659 660 if (TD_IS_RUNNING(td)) { 661 runq_add(ke->ke_runq, ke); 662 /* setrunqueue(td); */ 663 return; 664 } 665 if (ke->ke_runq) 666 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 667 /* 668 * We will not be on the run queue. So we must be 669 * sleeping or similar. 670 */ 671 if (td->td_proc->p_flag & P_THREADED) 672 kse_reassign(ke); 673} 674 675void 676sched_switchin(struct thread *td) 677{ 678 /* struct kse *ke = td->td_kse; */ 679 mtx_assert(&sched_lock, MA_OWNED); 680 681 td->td_oncpu = PCPU_GET(cpuid); 682 683 if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 684 td->td_priority != td->td_ksegrp->kg_user_pri) 685 curthread->td_flags |= TDF_NEEDRESCHED; 686} 687 688void 689sched_nice(struct ksegrp *kg, int nice) 690{ 691 struct kse *ke; 692 struct thread *td; 693 struct kseq *kseq; 694 695 /* 696 * We need to adjust the nice counts for running KSEs. 697 */ 698 if (kg->kg_pri_class == PRI_TIMESHARE) 699 FOREACH_KSE_IN_GROUP(kg, ke) { 700 if (ke->ke_state != KES_ONRUNQ && 701 ke->ke_state != KES_THREAD) 702 continue; 703 kseq = KSEQ_CPU(ke->ke_cpu); 704 kseq_nice_rem(kseq, kg->kg_nice); 705 kseq_nice_add(kseq, nice); 706 } 707 kg->kg_nice = nice; 708 sched_priority(kg); 709 FOREACH_THREAD_IN_GROUP(kg, td) 710 td->td_flags |= TDF_NEEDRESCHED; 711} 712 713void 714sched_sleep(struct thread *td, u_char prio) 715{ 716 mtx_assert(&sched_lock, MA_OWNED); 717 718 td->td_slptime = ticks; 719 td->td_priority = prio; 720 721 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 722 td->td_kse, td->td_slptime); 723} 724 725void 726sched_wakeup(struct thread *td) 727{ 728 mtx_assert(&sched_lock, MA_OWNED); 729 730 /* 731 * Let the kseg know how long we slept for. This is because process 732 * interactivity behavior is modeled in the kseg. 733 */ 734 if (td->td_slptime) { 735 struct ksegrp *kg; 736 int hzticks; 737 738 kg = td->td_ksegrp; 739 hzticks = ticks - td->td_slptime; 740 kg->kg_slptime += hzticks << 10; 741 sched_priority(kg); 742 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 743 td->td_kse, hzticks); 744 td->td_slptime = 0; 745 } 746 setrunqueue(td); 747 if (td->td_priority < curthread->td_priority) 748 curthread->td_flags |= TDF_NEEDRESCHED; 749} 750 751/* 752 * Penalize the parent for creating a new child and initialize the child's 753 * priority. 754 */ 755void 756sched_fork(struct proc *p, struct proc *p1) 757{ 758 759 mtx_assert(&sched_lock, MA_OWNED); 760 761 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 762 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 763 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 764} 765 766void 767sched_fork_kse(struct kse *ke, struct kse *child) 768{ 769 child->ke_slice = ke->ke_slice; 770 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 771 child->ke_runq = NULL; 772 773 /* 774 * Claim that we've been running for one second for statistical 775 * purposes. 776 */ 777 child->ke_ticks = 0; 778 child->ke_ltick = ticks; 779 child->ke_ftick = ticks - hz; 780} 781 782void 783sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 784{ 785 /* XXX Need something better here */ 786 if (kg->kg_slptime > kg->kg_runtime) { 787 child->kg_slptime = SCHED_DYN_RANGE; 788 child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE; 789 } else { 790 child->kg_runtime = SCHED_DYN_RANGE; 791 child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE; 792 } 793 794 child->kg_user_pri = kg->kg_user_pri; 795 child->kg_nice = kg->kg_nice; 796} 797 798void 799sched_fork_thread(struct thread *td, struct thread *child) 800{ 801} 802 803void 804sched_class(struct ksegrp *kg, int class) 805{ 806 struct kseq *kseq; 807 struct kse *ke; 808 809 if (kg->kg_pri_class == class) 810 return; 811 812 FOREACH_KSE_IN_GROUP(kg, ke) { 813 if (ke->ke_state != KES_ONRUNQ && 814 ke->ke_state != KES_THREAD) 815 continue; 816 kseq = KSEQ_CPU(ke->ke_cpu); 817 818 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 819 kseq->ksq_loads[PRI_BASE(class)]++; 820 821 if (kg->kg_pri_class == PRI_TIMESHARE) 822 kseq_nice_rem(kseq, kg->kg_nice); 823 else if (class == PRI_TIMESHARE) 824 kseq_nice_add(kseq, kg->kg_nice); 825 } 826 827 kg->kg_pri_class = class; 828} 829 830/* 831 * Return some of the child's priority and interactivity to the parent. 832 */ 833void 834sched_exit(struct proc *p, struct proc *child) 835{ 836 /* XXX Need something better here */ 837 mtx_assert(&sched_lock, MA_OWNED); 838 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 839} 840 841void 842sched_exit_kse(struct kse *ke, struct kse *child) 843{ 844 kseq_rem(KSEQ_CPU(child->ke_cpu), child); 845} 846 847void 848sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 849{ 850} 851 852void 853sched_exit_thread(struct thread *td, struct thread *child) 854{ 855} 856 857void 858sched_clock(struct kse *ke) 859{ 860 struct kseq *kseq; 861 struct ksegrp *kg; 862 struct thread *td; 863#if 0 864 struct kse *nke; 865#endif 866 867 /* 868 * sched_setup() apparently happens prior to stathz being set. We 869 * need to resolve the timers earlier in the boot so we can avoid 870 * calculating this here. 871 */ 872 if (realstathz == 0) { 873 realstathz = stathz ? stathz : hz; 874 tickincr = hz / realstathz; 875 /* 876 * XXX This does not work for values of stathz that are much 877 * larger than hz. 878 */ 879 if (tickincr == 0) 880 tickincr = 1; 881 } 882 883 td = ke->ke_thread; 884 kg = ke->ke_ksegrp; 885 886 mtx_assert(&sched_lock, MA_OWNED); 887 KASSERT((td != NULL), ("schedclock: null thread pointer")); 888 889 /* Adjust ticks for pctcpu */ 890 ke->ke_ticks++; 891 ke->ke_ltick = ticks; 892 893 /* Go up to one second beyond our max and then trim back down */ 894 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 895 sched_pctcpu_update(ke); 896 897 if (td->td_kse->ke_flags & KEF_IDLEKSE) 898 return; 899 900 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 901 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 902 903 /* 904 * We only do slicing code for TIMESHARE ksegrps. 905 */ 906 if (kg->kg_pri_class != PRI_TIMESHARE) 907 return; 908 /* 909 * Check for a higher priority task on the run queue. This can happen 910 * on SMP if another processor woke up a process on our runq. 911 */ 912 kseq = KSEQ_SELF(); 913#if 0 914 if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) { 915 if (sched_strict && 916 nke->ke_thread->td_priority < td->td_priority) 917 td->td_flags |= TDF_NEEDRESCHED; 918 else if (nke->ke_thread->td_priority < 919 td->td_priority SCHED_PRIO_SLOP) 920 921 if (nke->ke_thread->td_priority < td->td_priority) 922 td->td_flags |= TDF_NEEDRESCHED; 923 } 924#endif 925 /* 926 * We used a tick charge it to the ksegrp so that we can compute our 927 * interactivity. 928 */ 929 kg->kg_runtime += tickincr << 10; 930 931 /* 932 * We used up one time slice. 933 */ 934 ke->ke_slice--; 935#ifdef SMP 936 kseq->ksq_rslices--; 937#endif 938 939 if (ke->ke_slice > 0) 940 return; 941 /* 942 * We're out of time, recompute priorities and requeue. 943 */ 944 kseq_rem(kseq, ke); 945 sched_priority(kg); 946 sched_slice(ke); 947 if (SCHED_CURR(kg, ke)) 948 ke->ke_runq = kseq->ksq_curr; 949 else 950 ke->ke_runq = kseq->ksq_next; 951 kseq_add(kseq, ke); 952 td->td_flags |= TDF_NEEDRESCHED; 953} 954 955int 956sched_runnable(void) 957{ 958 struct kseq *kseq; 959 960 kseq = KSEQ_SELF(); 961 962 if (kseq->ksq_load) 963 return (1); 964#ifdef SMP 965 /* 966 * For SMP we may steal other processor's KSEs. Just search until we 967 * verify that at least on other cpu has a runnable task. 968 */ 969 if (smp_started) { 970 int i; 971 972 for (i = 0; i < mp_maxid; i++) { 973 if (CPU_ABSENT(i)) 974 continue; 975 kseq = KSEQ_CPU(i); 976 if (kseq->ksq_load) 977 return (1); 978 } 979 } 980#endif 981 return (0); 982} 983 984void 985sched_userret(struct thread *td) 986{ 987 struct ksegrp *kg; 988 989 kg = td->td_ksegrp; 990 991 if (td->td_priority != kg->kg_user_pri) { 992 mtx_lock_spin(&sched_lock); 993 td->td_priority = kg->kg_user_pri; 994 mtx_unlock_spin(&sched_lock); 995 } 996} 997 998struct kse * 999sched_choose(void) 1000{ 1001 struct kseq *kseq; 1002 struct kse *ke; 1003 1004#ifdef SMP 1005retry: 1006#endif 1007 kseq = KSEQ_SELF(); 1008 ke = kseq_choose(kseq); 1009 if (ke) { 1010 runq_remove(ke->ke_runq, ke); 1011 ke->ke_state = KES_THREAD; 1012 1013 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1014 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1015 ke, ke->ke_runq, ke->ke_slice, 1016 ke->ke_thread->td_priority); 1017 } 1018 return (ke); 1019 } 1020 1021#ifdef SMP 1022 if (smp_started) { 1023 /* 1024 * Find the cpu with the highest load and steal one proc. 1025 */ 1026 if ((kseq = kseq_load_highest()) == NULL) 1027 return (NULL); 1028 1029 /* 1030 * Remove this kse from this kseq and runq and then requeue 1031 * on the current processor. Then we will dequeue it 1032 * normally above. 1033 */ 1034 ke = kseq_choose(kseq); 1035 runq_remove(ke->ke_runq, ke); 1036 ke->ke_state = KES_THREAD; 1037 kseq_rem(kseq, ke); 1038 1039 ke->ke_cpu = PCPU_GET(cpuid); 1040 sched_add(ke); 1041 goto retry; 1042 } 1043#endif 1044 1045 return (NULL); 1046} 1047 1048void 1049sched_add(struct kse *ke) 1050{ 1051 struct kseq *kseq; 1052 struct ksegrp *kg; 1053 1054 mtx_assert(&sched_lock, MA_OWNED); 1055 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1056 KASSERT((ke->ke_thread->td_kse != NULL), 1057 ("sched_add: No KSE on thread")); 1058 KASSERT(ke->ke_state != KES_ONRUNQ, 1059 ("sched_add: kse %p (%s) already in run queue", ke, 1060 ke->ke_proc->p_comm)); 1061 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1062 ("sched_add: process swapped out")); 1063 KASSERT(ke->ke_runq == NULL, 1064 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1065 1066 kg = ke->ke_ksegrp; 1067 1068 switch (PRI_BASE(kg->kg_pri_class)) { 1069 case PRI_ITHD: 1070 case PRI_REALTIME: 1071 kseq = KSEQ_SELF(); 1072 ke->ke_runq = kseq->ksq_curr; 1073 ke->ke_slice = SCHED_SLICE_MAX; 1074 break; 1075 case PRI_TIMESHARE: 1076 kseq = KSEQ_CPU(ke->ke_cpu); 1077 if (SCHED_CURR(kg, ke)) 1078 ke->ke_runq = kseq->ksq_curr; 1079 else 1080 ke->ke_runq = kseq->ksq_next; 1081 break; 1082 case PRI_IDLE: 1083 kseq = KSEQ_CPU(ke->ke_cpu); 1084 /* 1085 * This is for priority prop. 1086 */ 1087 if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE) 1088 ke->ke_runq = kseq->ksq_curr; 1089 else 1090 ke->ke_runq = &kseq->ksq_idle; 1091 ke->ke_slice = SCHED_SLICE_MIN; 1092 break; 1093 default: 1094 panic("Unknown pri class.\n"); 1095 break; 1096 } 1097 1098 ke->ke_ksegrp->kg_runq_kses++; 1099 ke->ke_state = KES_ONRUNQ; 1100 1101 runq_add(ke->ke_runq, ke); 1102 kseq_add(kseq, ke); 1103} 1104 1105void 1106sched_rem(struct kse *ke) 1107{ 1108 struct kseq *kseq; 1109 1110 mtx_assert(&sched_lock, MA_OWNED); 1111 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1112 1113 ke->ke_state = KES_THREAD; 1114 ke->ke_ksegrp->kg_runq_kses--; 1115 kseq = KSEQ_CPU(ke->ke_cpu); 1116 runq_remove(ke->ke_runq, ke); 1117 kseq_rem(kseq, ke); 1118} 1119 1120fixpt_t 1121sched_pctcpu(struct kse *ke) 1122{ 1123 fixpt_t pctcpu; 1124 1125 pctcpu = 0; 1126 1127 if (ke->ke_ticks) { 1128 int rtick; 1129 1130 /* Update to account for time potentially spent sleeping */ 1131 ke->ke_ltick = ticks; 1132 sched_pctcpu_update(ke); 1133 1134 /* How many rtick per second ? */ 1135 rtick = ke->ke_ticks / SCHED_CPU_TIME; 1136 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1137 } 1138 1139 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1140 1141 return (pctcpu); 1142} 1143 1144int 1145sched_sizeof_kse(void) 1146{ 1147 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1148} 1149 1150int 1151sched_sizeof_ksegrp(void) 1152{ 1153 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1154} 1155 1156int 1157sched_sizeof_proc(void) 1158{ 1159 return (sizeof(struct proc)); 1160} 1161 1162int 1163sched_sizeof_thread(void) 1164{ 1165 return (sizeof(struct thread) + sizeof(struct td_sched)); 1166} 1167