sched_ule.c revision 121625
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121625 2003-10-28 03:28:48Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/sched.h> 39#include <sys/smp.h> 40#include <sys/sx.h> 41#include <sys/sysctl.h> 42#include <sys/sysproto.h> 43#include <sys/vmmeter.h> 44#ifdef DDB 45#include <ddb/ddb.h> 46#endif 47#ifdef KTRACE 48#include <sys/uio.h> 49#include <sys/ktrace.h> 50#endif 51 52#include <machine/cpu.h> 53 54#define KTR_ULE KTR_NFS 55 56/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 57/* XXX This is bogus compatability crap for ps */ 58static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 59SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 60 61static void sched_setup(void *dummy); 62SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 63 64static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 65 66static int sched_strict; 67SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 68 69static int slice_min = 1; 70SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 71 72static int slice_max = 10; 73SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 74 75int realstathz; 76int tickincr = 1; 77 78#ifdef SMP 79/* Callout to handle load balancing SMP systems. */ 80static struct callout kseq_lb_callout; 81#endif 82 83/* 84 * These datastructures are allocated within their parent datastructure but 85 * are scheduler specific. 86 */ 87 88struct ke_sched { 89 int ske_slice; 90 struct runq *ske_runq; 91 /* The following variables are only used for pctcpu calculation */ 92 int ske_ltick; /* Last tick that we were running on */ 93 int ske_ftick; /* First tick that we were running on */ 94 int ske_ticks; /* Tick count */ 95 /* CPU that we have affinity for. */ 96 u_char ske_cpu; 97}; 98#define ke_slice ke_sched->ske_slice 99#define ke_runq ke_sched->ske_runq 100#define ke_ltick ke_sched->ske_ltick 101#define ke_ftick ke_sched->ske_ftick 102#define ke_ticks ke_sched->ske_ticks 103#define ke_cpu ke_sched->ske_cpu 104 105struct kg_sched { 106 int skg_slptime; /* Number of ticks we vol. slept */ 107 int skg_runtime; /* Number of ticks we were running */ 108}; 109#define kg_slptime kg_sched->skg_slptime 110#define kg_runtime kg_sched->skg_runtime 111 112struct td_sched { 113 int std_slptime; 114}; 115#define td_slptime td_sched->std_slptime 116 117struct td_sched td_sched; 118struct ke_sched ke_sched; 119struct kg_sched kg_sched; 120 121struct ke_sched *kse0_sched = &ke_sched; 122struct kg_sched *ksegrp0_sched = &kg_sched; 123struct p_sched *proc0_sched = NULL; 124struct td_sched *thread0_sched = &td_sched; 125 126/* 127 * The priority is primarily determined by the interactivity score. Thus, we 128 * give lower(better) priorities to kse groups that use less CPU. The nice 129 * value is then directly added to this to allow nice to have some effect 130 * on latency. 131 * 132 * PRI_RANGE: Total priority range for timeshare threads. 133 * PRI_NRESV: Number of nice values. 134 * PRI_BASE: The start of the dynamic range. 135 */ 136#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 137#define SCHED_PRI_NRESV PRIO_TOTAL 138#define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 139#define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 140#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 141#define SCHED_PRI_INTERACT(score) \ 142 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 143 144/* 145 * These determine the interactivity of a process. 146 * 147 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148 * before throttling back. 149 * SLP_RUN_THROTTLE: Divisor for reducing slp/run time at fork time. 150 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 151 * INTERACT_THRESH: Threshhold for placement on the current runq. 152 */ 153#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 154#define SCHED_SLP_RUN_THROTTLE (100) 155#define SCHED_INTERACT_MAX (100) 156#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 157#define SCHED_INTERACT_THRESH (30) 158 159/* 160 * These parameters and macros determine the size of the time slice that is 161 * granted to each thread. 162 * 163 * SLICE_MIN: Minimum time slice granted, in units of ticks. 164 * SLICE_MAX: Maximum time slice granted. 165 * SLICE_RANGE: Range of available time slices scaled by hz. 166 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 167 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 168 */ 169#define SCHED_SLICE_MIN (slice_min) 170#define SCHED_SLICE_MAX (slice_max) 171#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 172#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 173#define SCHED_SLICE_NICE(nice) \ 174 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 175 176/* 177 * This macro determines whether or not the kse belongs on the current or 178 * next run queue. 179 * 180 * XXX nice value should effect how interactive a kg is. 181 */ 182#define SCHED_INTERACTIVE(kg) \ 183 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 184#define SCHED_CURR(kg, ke) \ 185 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 186 SCHED_INTERACTIVE(kg)) 187 188/* 189 * Cpu percentage computation macros and defines. 190 * 191 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 192 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 193 */ 194 195#define SCHED_CPU_TIME 10 196#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 197 198/* 199 * kseq - per processor runqs and statistics. 200 */ 201 202#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 203 204struct kseq { 205 struct runq ksq_idle; /* Queue of IDLE threads. */ 206 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 207 struct runq *ksq_next; /* Next timeshare queue. */ 208 struct runq *ksq_curr; /* Current queue. */ 209 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 210 int ksq_load; /* Aggregate load. */ 211 short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 212 short ksq_nicemin; /* Least nice. */ 213#ifdef SMP 214 int ksq_cpus; /* Count of CPUs in this kseq. */ 215 unsigned int ksq_rslices; /* Slices on run queue */ 216#endif 217}; 218 219/* 220 * One kse queue per processor. 221 */ 222#ifdef SMP 223struct kseq kseq_cpu[MAXCPU]; 224struct kseq *kseq_idmap[MAXCPU]; 225#define KSEQ_SELF() (kseq_idmap[PCPU_GET(cpuid)]) 226#define KSEQ_CPU(x) (kseq_idmap[(x)]) 227#else 228struct kseq kseq_cpu; 229#define KSEQ_SELF() (&kseq_cpu) 230#define KSEQ_CPU(x) (&kseq_cpu) 231#endif 232 233static void sched_slice(struct kse *ke); 234static void sched_priority(struct ksegrp *kg); 235static int sched_interact_score(struct ksegrp *kg); 236static void sched_interact_update(struct ksegrp *kg); 237void sched_pctcpu_update(struct kse *ke); 238int sched_pickcpu(void); 239 240/* Operations on per processor queues */ 241static struct kse * kseq_choose(struct kseq *kseq, int steal); 242static void kseq_setup(struct kseq *kseq); 243static void kseq_add(struct kseq *kseq, struct kse *ke); 244static void kseq_rem(struct kseq *kseq, struct kse *ke); 245static void kseq_nice_add(struct kseq *kseq, int nice); 246static void kseq_nice_rem(struct kseq *kseq, int nice); 247void kseq_print(int cpu); 248#ifdef SMP 249struct kseq * kseq_load_highest(void); 250void kseq_balance(void *arg); 251void kseq_move(struct kseq *from, int cpu); 252#endif 253 254void 255kseq_print(int cpu) 256{ 257 struct kseq *kseq; 258 int i; 259 260 kseq = KSEQ_CPU(cpu); 261 262 printf("kseq:\n"); 263 printf("\tload: %d\n", kseq->ksq_load); 264 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 265 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 266 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 267 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 268 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 269 printf("\tnice counts:\n"); 270 for (i = 0; i < PRIO_TOTAL + 1; i++) 271 if (kseq->ksq_nice[i]) 272 printf("\t\t%d = %d\n", 273 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 274} 275 276static void 277kseq_add(struct kseq *kseq, struct kse *ke) 278{ 279 mtx_assert(&sched_lock, MA_OWNED); 280 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 281 kseq->ksq_load++; 282 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 283 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 284 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 285 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 286 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 287 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 288#ifdef SMP 289 kseq->ksq_rslices += ke->ke_slice; 290#endif 291} 292 293static void 294kseq_rem(struct kseq *kseq, struct kse *ke) 295{ 296 mtx_assert(&sched_lock, MA_OWNED); 297 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 298 kseq->ksq_load--; 299 ke->ke_runq = NULL; 300 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 301 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 302#ifdef SMP 303 kseq->ksq_rslices -= ke->ke_slice; 304#endif 305} 306 307static void 308kseq_nice_add(struct kseq *kseq, int nice) 309{ 310 mtx_assert(&sched_lock, MA_OWNED); 311 /* Normalize to zero. */ 312 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 313 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 314 kseq->ksq_nicemin = nice; 315} 316 317static void 318kseq_nice_rem(struct kseq *kseq, int nice) 319{ 320 int n; 321 322 mtx_assert(&sched_lock, MA_OWNED); 323 /* Normalize to zero. */ 324 n = nice + SCHED_PRI_NHALF; 325 kseq->ksq_nice[n]--; 326 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 327 328 /* 329 * If this wasn't the smallest nice value or there are more in 330 * this bucket we can just return. Otherwise we have to recalculate 331 * the smallest nice. 332 */ 333 if (nice != kseq->ksq_nicemin || 334 kseq->ksq_nice[n] != 0 || 335 kseq->ksq_loads[PRI_TIMESHARE] == 0) 336 return; 337 338 for (; n < SCHED_PRI_NRESV + 1; n++) 339 if (kseq->ksq_nice[n]) { 340 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 341 return; 342 } 343} 344 345#ifdef SMP 346/* 347 * kseq_balance is a simple CPU load balancing algorithm. It operates by 348 * finding the least loaded and most loaded cpu and equalizing their load 349 * by migrating some processes. 350 * 351 * Dealing only with two CPUs at a time has two advantages. Firstly, most 352 * installations will only have 2 cpus. Secondly, load balancing too much at 353 * once can have an unpleasant effect on the system. The scheduler rarely has 354 * enough information to make perfect decisions. So this algorithm chooses 355 * algorithm simplicity and more gradual effects on load in larger systems. 356 * 357 * It could be improved by considering the priorities and slices assigned to 358 * each task prior to balancing them. There are many pathological cases with 359 * any approach and so the semi random algorithm below may work as well as any. 360 * 361 */ 362void 363kseq_balance(void *arg) 364{ 365 struct kseq *kseq; 366 int high_load; 367 int low_load; 368 int high_cpu; 369 int low_cpu; 370 int move; 371 int diff; 372 int i; 373 374 high_cpu = 0; 375 low_cpu = 0; 376 high_load = 0; 377 low_load = -1; 378 379 mtx_lock_spin(&sched_lock); 380 if (smp_started == 0) 381 goto out; 382 383 for (i = 0; i < mp_maxid; i++) { 384 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 385 continue; 386 kseq = KSEQ_CPU(i); 387 if (kseq->ksq_load > high_load) { 388 high_load = kseq->ksq_load; 389 high_cpu = i; 390 } 391 if (low_load == -1 || kseq->ksq_load < low_load) { 392 low_load = kseq->ksq_load; 393 low_cpu = i; 394 } 395 } 396 397 kseq = KSEQ_CPU(high_cpu); 398 399 /* 400 * Nothing to do. 401 */ 402 if (high_load < kseq->ksq_cpus + 1) 403 goto out; 404 405 high_load -= kseq->ksq_cpus; 406 407 if (low_load >= high_load) 408 goto out; 409 410 diff = high_load - low_load; 411 move = diff / 2; 412 if (diff & 0x1) 413 move++; 414 415 for (i = 0; i < move; i++) 416 kseq_move(kseq, low_cpu); 417 418out: 419 mtx_unlock_spin(&sched_lock); 420 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 421 422 return; 423} 424 425struct kseq * 426kseq_load_highest(void) 427{ 428 struct kseq *kseq; 429 int load; 430 int cpu; 431 int i; 432 433 mtx_assert(&sched_lock, MA_OWNED); 434 cpu = 0; 435 load = 0; 436 437 for (i = 0; i < mp_maxid; i++) { 438 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 439 continue; 440 kseq = KSEQ_CPU(i); 441 if (kseq->ksq_load > load) { 442 load = kseq->ksq_load; 443 cpu = i; 444 } 445 } 446 kseq = KSEQ_CPU(cpu); 447 448 if (load > kseq->ksq_cpus) 449 return (kseq); 450 451 return (NULL); 452} 453 454void 455kseq_move(struct kseq *from, int cpu) 456{ 457 struct kse *ke; 458 459 ke = kseq_choose(from, 1); 460 runq_remove(ke->ke_runq, ke); 461 ke->ke_state = KES_THREAD; 462 kseq_rem(from, ke); 463 464 ke->ke_cpu = cpu; 465 sched_add(ke->ke_thread); 466} 467#endif 468 469/* 470 * Pick the highest priority task we have and return it. If steal is 1 we 471 * will return kses that have been denied slices due to their nice being too 472 * low. In the future we should prohibit stealing interrupt threads as well. 473 */ 474 475struct kse * 476kseq_choose(struct kseq *kseq, int steal) 477{ 478 struct kse *ke; 479 struct runq *swap; 480 481 mtx_assert(&sched_lock, MA_OWNED); 482 swap = NULL; 483 484 for (;;) { 485 ke = runq_choose(kseq->ksq_curr); 486 if (ke == NULL) { 487 /* 488 * We already swaped once and didn't get anywhere. 489 */ 490 if (swap) 491 break; 492 swap = kseq->ksq_curr; 493 kseq->ksq_curr = kseq->ksq_next; 494 kseq->ksq_next = swap; 495 continue; 496 } 497 /* 498 * If we encounter a slice of 0 the kse is in a 499 * TIMESHARE kse group and its nice was too far out 500 * of the range that receives slices. 501 */ 502 if (ke->ke_slice == 0 && steal == 0) { 503 runq_remove(ke->ke_runq, ke); 504 sched_slice(ke); 505 ke->ke_runq = kseq->ksq_next; 506 runq_add(ke->ke_runq, ke); 507 continue; 508 } 509 return (ke); 510 } 511 512 return (runq_choose(&kseq->ksq_idle)); 513} 514 515static void 516kseq_setup(struct kseq *kseq) 517{ 518 runq_init(&kseq->ksq_timeshare[0]); 519 runq_init(&kseq->ksq_timeshare[1]); 520 runq_init(&kseq->ksq_idle); 521 522 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 523 kseq->ksq_next = &kseq->ksq_timeshare[1]; 524 525 kseq->ksq_loads[PRI_ITHD] = 0; 526 kseq->ksq_loads[PRI_REALTIME] = 0; 527 kseq->ksq_loads[PRI_TIMESHARE] = 0; 528 kseq->ksq_loads[PRI_IDLE] = 0; 529 kseq->ksq_load = 0; 530#ifdef SMP 531 kseq->ksq_rslices = 0; 532#endif 533} 534 535static void 536sched_setup(void *dummy) 537{ 538#ifdef SMP 539 int i; 540#endif 541 542 slice_min = (hz/100); /* 10ms */ 543 slice_max = (hz/7); /* ~140ms */ 544 545#ifdef SMP 546 /* init kseqs */ 547 /* Create the idmap. */ 548#ifdef ULE_HTT_EXPERIMENTAL 549 if (smp_topology == NULL) { 550#else 551 if (1) { 552#endif 553 for (i = 0; i < MAXCPU; i++) { 554 kseq_setup(&kseq_cpu[i]); 555 kseq_idmap[i] = &kseq_cpu[i]; 556 kseq_cpu[i].ksq_cpus = 1; 557 } 558 } else { 559 int j; 560 561 for (i = 0; i < smp_topology->ct_count; i++) { 562 struct cpu_group *cg; 563 564 cg = &smp_topology->ct_group[i]; 565 kseq_setup(&kseq_cpu[i]); 566 567 for (j = 0; j < MAXCPU; j++) 568 if ((cg->cg_mask & (1 << j)) != 0) 569 kseq_idmap[j] = &kseq_cpu[i]; 570 kseq_cpu[i].ksq_cpus = cg->cg_count; 571 } 572 } 573 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 574 kseq_balance(NULL); 575#else 576 kseq_setup(KSEQ_SELF()); 577#endif 578 mtx_lock_spin(&sched_lock); 579 kseq_add(KSEQ_SELF(), &kse0); 580 mtx_unlock_spin(&sched_lock); 581} 582 583/* 584 * Scale the scheduling priority according to the "interactivity" of this 585 * process. 586 */ 587static void 588sched_priority(struct ksegrp *kg) 589{ 590 int pri; 591 592 if (kg->kg_pri_class != PRI_TIMESHARE) 593 return; 594 595 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 596 pri += SCHED_PRI_BASE; 597 pri += kg->kg_nice; 598 599 if (pri > PRI_MAX_TIMESHARE) 600 pri = PRI_MAX_TIMESHARE; 601 else if (pri < PRI_MIN_TIMESHARE) 602 pri = PRI_MIN_TIMESHARE; 603 604 kg->kg_user_pri = pri; 605 606 return; 607} 608 609/* 610 * Calculate a time slice based on the properties of the kseg and the runq 611 * that we're on. This is only for PRI_TIMESHARE ksegrps. 612 */ 613static void 614sched_slice(struct kse *ke) 615{ 616 struct kseq *kseq; 617 struct ksegrp *kg; 618 619 kg = ke->ke_ksegrp; 620 kseq = KSEQ_CPU(ke->ke_cpu); 621 622 /* 623 * Rationale: 624 * KSEs in interactive ksegs get the minimum slice so that we 625 * quickly notice if it abuses its advantage. 626 * 627 * KSEs in non-interactive ksegs are assigned a slice that is 628 * based on the ksegs nice value relative to the least nice kseg 629 * on the run queue for this cpu. 630 * 631 * If the KSE is less nice than all others it gets the maximum 632 * slice and other KSEs will adjust their slice relative to 633 * this when they first expire. 634 * 635 * There is 20 point window that starts relative to the least 636 * nice kse on the run queue. Slice size is determined by 637 * the kse distance from the last nice ksegrp. 638 * 639 * If you are outside of the window you will get no slice and 640 * you will be reevaluated each time you are selected on the 641 * run queue. 642 * 643 */ 644 645 if (!SCHED_INTERACTIVE(kg)) { 646 int nice; 647 648 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 649 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 650 kg->kg_nice < kseq->ksq_nicemin) 651 ke->ke_slice = SCHED_SLICE_MAX; 652 else if (nice <= SCHED_PRI_NTHRESH) 653 ke->ke_slice = SCHED_SLICE_NICE(nice); 654 else 655 ke->ke_slice = 0; 656 } else 657 ke->ke_slice = SCHED_SLICE_MIN; 658 659 CTR6(KTR_ULE, 660 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 661 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 662 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 663 664 /* 665 * Check to see if we need to scale back the slp and run time 666 * in the kg. This will cause us to forget old interactivity 667 * while maintaining the current ratio. 668 */ 669 sched_interact_update(kg); 670 671 return; 672} 673 674static void 675sched_interact_update(struct ksegrp *kg) 676{ 677 int ratio; 678 679 if ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 680 ratio = ((SCHED_SLP_RUN_MAX * 15) / (kg->kg_runtime + 681 kg->kg_slptime )); 682 kg->kg_runtime = (kg->kg_runtime * ratio) / 16; 683 kg->kg_slptime = (kg->kg_slptime * ratio) / 16; 684 } 685} 686 687static int 688sched_interact_score(struct ksegrp *kg) 689{ 690 int div; 691 692 if (kg->kg_runtime > kg->kg_slptime) { 693 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 694 return (SCHED_INTERACT_HALF + 695 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 696 } if (kg->kg_slptime > kg->kg_runtime) { 697 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 698 return (kg->kg_runtime / div); 699 } 700 701 /* 702 * This can happen if slptime and runtime are 0. 703 */ 704 return (0); 705 706} 707 708/* 709 * This is only somewhat accurate since given many processes of the same 710 * priority they will switch when their slices run out, which will be 711 * at most SCHED_SLICE_MAX. 712 */ 713int 714sched_rr_interval(void) 715{ 716 return (SCHED_SLICE_MAX); 717} 718 719void 720sched_pctcpu_update(struct kse *ke) 721{ 722 /* 723 * Adjust counters and watermark for pctcpu calc. 724 */ 725 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 726 /* 727 * Shift the tick count out so that the divide doesn't 728 * round away our results. 729 */ 730 ke->ke_ticks <<= 10; 731 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 732 SCHED_CPU_TICKS; 733 ke->ke_ticks >>= 10; 734 } else 735 ke->ke_ticks = 0; 736 ke->ke_ltick = ticks; 737 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 738} 739 740#ifdef SMP 741/* XXX Should be changed to kseq_load_lowest() */ 742int 743sched_pickcpu(void) 744{ 745 struct kseq *kseq; 746 int load; 747 int cpu; 748 int i; 749 750 mtx_assert(&sched_lock, MA_OWNED); 751 if (!smp_started) 752 return (0); 753 754 load = 0; 755 cpu = 0; 756 757 for (i = 0; i < mp_maxid; i++) { 758 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 759 continue; 760 kseq = KSEQ_CPU(i); 761 if (kseq->ksq_load < load) { 762 cpu = i; 763 load = kseq->ksq_load; 764 } 765 } 766 767 CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 768 return (cpu); 769} 770#else 771int 772sched_pickcpu(void) 773{ 774 return (0); 775} 776#endif 777 778void 779sched_prio(struct thread *td, u_char prio) 780{ 781 struct kse *ke; 782 783 ke = td->td_kse; 784 mtx_assert(&sched_lock, MA_OWNED); 785 if (TD_ON_RUNQ(td)) { 786 /* 787 * If the priority has been elevated due to priority 788 * propagation, we may have to move ourselves to a new 789 * queue. We still call adjustrunqueue below in case kse 790 * needs to fix things up. 791 */ 792 if (ke && ((td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 793 prio < td->td_ksegrp->kg_user_pri) || 794 (td->td_ksegrp->kg_pri_class == PRI_IDLE && 795 prio < PRI_MIN_IDLE))) { 796 runq_remove(ke->ke_runq, ke); 797 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 798 runq_add(ke->ke_runq, ke); 799 } 800 adjustrunqueue(td, prio); 801 } else 802 td->td_priority = prio; 803} 804 805void 806sched_switch(struct thread *td) 807{ 808 struct thread *newtd; 809 u_int sched_nest; 810 struct kse *ke; 811 812 mtx_assert(&sched_lock, MA_OWNED); 813 814 ke = td->td_kse; 815 816 td->td_last_kse = ke; 817 td->td_lastcpu = td->td_oncpu; 818 td->td_oncpu = NOCPU; 819 td->td_flags &= ~TDF_NEEDRESCHED; 820 821 if (TD_IS_RUNNING(td)) { 822 if (td->td_proc->p_flag & P_SA) { 823 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 824 setrunqueue(td); 825 } else { 826 /* 827 * This queue is always correct except for idle threads 828 * which have a higher priority due to priority 829 * propagation. 830 */ 831 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) { 832 if (td->td_priority < PRI_MIN_IDLE) 833 ke->ke_runq = KSEQ_SELF()->ksq_curr; 834 else 835 ke->ke_runq = &KSEQ_SELF()->ksq_idle; 836 } 837 runq_add(ke->ke_runq, ke); 838 /* setrunqueue(td); */ 839 } 840 } else { 841 if (ke->ke_runq) 842 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 843 /* 844 * We will not be on the run queue. So we must be 845 * sleeping or similar. 846 */ 847 if (td->td_proc->p_flag & P_SA) 848 kse_reassign(ke); 849 } 850 sched_nest = sched_lock.mtx_recurse; 851 newtd = choosethread(); 852 if (td != newtd) 853 cpu_switch(td, newtd); 854 sched_lock.mtx_recurse = sched_nest; 855 sched_lock.mtx_lock = (uintptr_t)td; 856 857 td->td_oncpu = PCPU_GET(cpuid); 858} 859 860void 861sched_nice(struct ksegrp *kg, int nice) 862{ 863 struct kse *ke; 864 struct thread *td; 865 struct kseq *kseq; 866 867 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 868 mtx_assert(&sched_lock, MA_OWNED); 869 /* 870 * We need to adjust the nice counts for running KSEs. 871 */ 872 if (kg->kg_pri_class == PRI_TIMESHARE) 873 FOREACH_KSE_IN_GROUP(kg, ke) { 874 if (ke->ke_runq == NULL) 875 continue; 876 kseq = KSEQ_CPU(ke->ke_cpu); 877 kseq_nice_rem(kseq, kg->kg_nice); 878 kseq_nice_add(kseq, nice); 879 } 880 kg->kg_nice = nice; 881 sched_priority(kg); 882 FOREACH_THREAD_IN_GROUP(kg, td) 883 td->td_flags |= TDF_NEEDRESCHED; 884} 885 886void 887sched_sleep(struct thread *td, u_char prio) 888{ 889 mtx_assert(&sched_lock, MA_OWNED); 890 891 td->td_slptime = ticks; 892 td->td_priority = prio; 893 894 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 895 td->td_kse, td->td_slptime); 896} 897 898void 899sched_wakeup(struct thread *td) 900{ 901 mtx_assert(&sched_lock, MA_OWNED); 902 903 /* 904 * Let the kseg know how long we slept for. This is because process 905 * interactivity behavior is modeled in the kseg. 906 */ 907 if (td->td_slptime) { 908 struct ksegrp *kg; 909 int hzticks; 910 911 kg = td->td_ksegrp; 912 hzticks = ticks - td->td_slptime; 913 kg->kg_slptime += hzticks << 10; 914 sched_interact_update(kg); 915 sched_priority(kg); 916 if (td->td_kse) 917 sched_slice(td->td_kse); 918 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 919 td->td_kse, hzticks); 920 td->td_slptime = 0; 921 } 922 setrunqueue(td); 923 if (td->td_priority < curthread->td_priority) 924 curthread->td_flags |= TDF_NEEDRESCHED; 925} 926 927/* 928 * Penalize the parent for creating a new child and initialize the child's 929 * priority. 930 */ 931void 932sched_fork(struct proc *p, struct proc *p1) 933{ 934 935 mtx_assert(&sched_lock, MA_OWNED); 936 937 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 938 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 939 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 940} 941 942void 943sched_fork_kse(struct kse *ke, struct kse *child) 944{ 945 946 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 947 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 948 child->ke_runq = NULL; 949 950 /* Grab our parents cpu estimation information. */ 951 child->ke_ticks = ke->ke_ticks; 952 child->ke_ltick = ke->ke_ltick; 953 child->ke_ftick = ke->ke_ftick; 954} 955 956void 957sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 958{ 959 960 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 961 /* XXX Need something better here */ 962 963 child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE; 964 child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE; 965 kg->kg_runtime += tickincr << 10; 966 sched_interact_update(kg); 967 968 child->kg_user_pri = kg->kg_user_pri; 969 child->kg_nice = kg->kg_nice; 970} 971 972void 973sched_fork_thread(struct thread *td, struct thread *child) 974{ 975} 976 977void 978sched_class(struct ksegrp *kg, int class) 979{ 980 struct kseq *kseq; 981 struct kse *ke; 982 983 mtx_assert(&sched_lock, MA_OWNED); 984 if (kg->kg_pri_class == class) 985 return; 986 987 FOREACH_KSE_IN_GROUP(kg, ke) { 988 if (ke->ke_state != KES_ONRUNQ && 989 ke->ke_state != KES_THREAD) 990 continue; 991 kseq = KSEQ_CPU(ke->ke_cpu); 992 993 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 994 kseq->ksq_loads[PRI_BASE(class)]++; 995 996 if (kg->kg_pri_class == PRI_TIMESHARE) 997 kseq_nice_rem(kseq, kg->kg_nice); 998 else if (class == PRI_TIMESHARE) 999 kseq_nice_add(kseq, kg->kg_nice); 1000 } 1001 1002 kg->kg_pri_class = class; 1003} 1004 1005/* 1006 * Return some of the child's priority and interactivity to the parent. 1007 */ 1008void 1009sched_exit(struct proc *p, struct proc *child) 1010{ 1011 /* XXX Need something better here */ 1012 mtx_assert(&sched_lock, MA_OWNED); 1013 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1014 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1015} 1016 1017void 1018sched_exit_kse(struct kse *ke, struct kse *child) 1019{ 1020 kseq_rem(KSEQ_CPU(child->ke_cpu), child); 1021} 1022 1023void 1024sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1025{ 1026 /* kg->kg_slptime += child->kg_slptime; */ 1027 kg->kg_runtime += child->kg_runtime; 1028 sched_interact_update(kg); 1029} 1030 1031void 1032sched_exit_thread(struct thread *td, struct thread *child) 1033{ 1034} 1035 1036void 1037sched_clock(struct thread *td) 1038{ 1039 struct kseq *kseq; 1040 struct ksegrp *kg; 1041 struct kse *ke; 1042 1043 /* 1044 * sched_setup() apparently happens prior to stathz being set. We 1045 * need to resolve the timers earlier in the boot so we can avoid 1046 * calculating this here. 1047 */ 1048 if (realstathz == 0) { 1049 realstathz = stathz ? stathz : hz; 1050 tickincr = hz / realstathz; 1051 /* 1052 * XXX This does not work for values of stathz that are much 1053 * larger than hz. 1054 */ 1055 if (tickincr == 0) 1056 tickincr = 1; 1057 } 1058 1059 ke = td->td_kse; 1060 kg = ke->ke_ksegrp; 1061 1062 mtx_assert(&sched_lock, MA_OWNED); 1063 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1064 1065 /* Adjust ticks for pctcpu */ 1066 ke->ke_ticks++; 1067 ke->ke_ltick = ticks; 1068 1069 /* Go up to one second beyond our max and then trim back down */ 1070 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1071 sched_pctcpu_update(ke); 1072 1073 if (td->td_flags & TDF_IDLETD) 1074 return; 1075 1076 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1077 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1078 /* 1079 * We only do slicing code for TIMESHARE ksegrps. 1080 */ 1081 if (kg->kg_pri_class != PRI_TIMESHARE) 1082 return; 1083 /* 1084 * We used a tick charge it to the ksegrp so that we can compute our 1085 * interactivity. 1086 */ 1087 kg->kg_runtime += tickincr << 10; 1088 sched_interact_update(kg); 1089 1090 /* 1091 * We used up one time slice. 1092 */ 1093 ke->ke_slice--; 1094 kseq = KSEQ_SELF(); 1095#ifdef SMP 1096 kseq->ksq_rslices--; 1097#endif 1098 1099 if (ke->ke_slice > 0) 1100 return; 1101 /* 1102 * We're out of time, recompute priorities and requeue. 1103 */ 1104 kseq_rem(kseq, ke); 1105 sched_priority(kg); 1106 sched_slice(ke); 1107 if (SCHED_CURR(kg, ke)) 1108 ke->ke_runq = kseq->ksq_curr; 1109 else 1110 ke->ke_runq = kseq->ksq_next; 1111 kseq_add(kseq, ke); 1112 td->td_flags |= TDF_NEEDRESCHED; 1113} 1114 1115int 1116sched_runnable(void) 1117{ 1118 struct kseq *kseq; 1119 int load; 1120 1121 load = 1; 1122 1123 mtx_lock_spin(&sched_lock); 1124 kseq = KSEQ_SELF(); 1125 1126 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1127 if (kseq->ksq_load > 0) 1128 goto out; 1129 } else 1130 if (kseq->ksq_load - 1 > 0) 1131 goto out; 1132#ifdef SMP 1133 /* 1134 * For SMP we may steal other processor's KSEs. Just search until we 1135 * verify that at least on other cpu has a runnable task. 1136 */ 1137 if (smp_started) { 1138 int i; 1139 1140 for (i = 0; i < mp_maxid; i++) { 1141 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 1142 continue; 1143 kseq = KSEQ_CPU(i); 1144 if (kseq->ksq_load > kseq->ksq_cpus) 1145 goto out; 1146 } 1147 } 1148#endif 1149 load = 0; 1150out: 1151 mtx_unlock_spin(&sched_lock); 1152 return (load); 1153} 1154 1155void 1156sched_userret(struct thread *td) 1157{ 1158 struct ksegrp *kg; 1159 1160 kg = td->td_ksegrp; 1161 1162 if (td->td_priority != kg->kg_user_pri) { 1163 mtx_lock_spin(&sched_lock); 1164 td->td_priority = kg->kg_user_pri; 1165 mtx_unlock_spin(&sched_lock); 1166 } 1167} 1168 1169struct kse * 1170sched_choose(void) 1171{ 1172 struct kseq *kseq; 1173 struct kse *ke; 1174 1175 mtx_assert(&sched_lock, MA_OWNED); 1176#ifdef SMP 1177retry: 1178#endif 1179 kseq = KSEQ_SELF(); 1180 ke = kseq_choose(kseq, 0); 1181 if (ke) { 1182 runq_remove(ke->ke_runq, ke); 1183 ke->ke_state = KES_THREAD; 1184 1185 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1186 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1187 ke, ke->ke_runq, ke->ke_slice, 1188 ke->ke_thread->td_priority); 1189 } 1190 return (ke); 1191 } 1192 1193#ifdef SMP 1194 if (smp_started) { 1195 /* 1196 * Find the cpu with the highest load and steal one proc. 1197 */ 1198 if ((kseq = kseq_load_highest()) == NULL) 1199 return (NULL); 1200 1201 /* 1202 * Remove this kse from this kseq and runq and then requeue 1203 * on the current processor. Then we will dequeue it 1204 * normally above. 1205 */ 1206 kseq_move(kseq, PCPU_GET(cpuid)); 1207 goto retry; 1208 } 1209#endif 1210 1211 return (NULL); 1212} 1213 1214void 1215sched_add(struct thread *td) 1216{ 1217 struct kseq *kseq; 1218 struct ksegrp *kg; 1219 struct kse *ke; 1220 1221 ke = td->td_kse; 1222 kg = td->td_ksegrp; 1223 mtx_assert(&sched_lock, MA_OWNED); 1224 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1225 KASSERT((ke->ke_thread->td_kse != NULL), 1226 ("sched_add: No KSE on thread")); 1227 KASSERT(ke->ke_state != KES_ONRUNQ, 1228 ("sched_add: kse %p (%s) already in run queue", ke, 1229 ke->ke_proc->p_comm)); 1230 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1231 ("sched_add: process swapped out")); 1232 KASSERT(ke->ke_runq == NULL, 1233 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1234 1235 1236 switch (PRI_BASE(kg->kg_pri_class)) { 1237 case PRI_ITHD: 1238 case PRI_REALTIME: 1239 kseq = KSEQ_SELF(); 1240 ke->ke_runq = kseq->ksq_curr; 1241 ke->ke_slice = SCHED_SLICE_MAX; 1242 ke->ke_cpu = PCPU_GET(cpuid); 1243 break; 1244 case PRI_TIMESHARE: 1245 kseq = KSEQ_CPU(ke->ke_cpu); 1246 if (SCHED_CURR(kg, ke)) 1247 ke->ke_runq = kseq->ksq_curr; 1248 else 1249 ke->ke_runq = kseq->ksq_next; 1250 break; 1251 case PRI_IDLE: 1252 kseq = KSEQ_CPU(ke->ke_cpu); 1253 /* 1254 * This is for priority prop. 1255 */ 1256 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1257 ke->ke_runq = kseq->ksq_curr; 1258 else 1259 ke->ke_runq = &kseq->ksq_idle; 1260 ke->ke_slice = SCHED_SLICE_MIN; 1261 break; 1262 default: 1263 panic("Unknown pri class.\n"); 1264 break; 1265 } 1266 1267 ke->ke_ksegrp->kg_runq_kses++; 1268 ke->ke_state = KES_ONRUNQ; 1269 1270 runq_add(ke->ke_runq, ke); 1271 kseq_add(kseq, ke); 1272} 1273 1274void 1275sched_rem(struct thread *td) 1276{ 1277 struct kseq *kseq; 1278 struct kse *ke; 1279 1280 ke = td->td_kse; 1281 1282 mtx_assert(&sched_lock, MA_OWNED); 1283 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1284 1285 ke->ke_state = KES_THREAD; 1286 ke->ke_ksegrp->kg_runq_kses--; 1287 kseq = KSEQ_CPU(ke->ke_cpu); 1288 runq_remove(ke->ke_runq, ke); 1289 kseq_rem(kseq, ke); 1290} 1291 1292fixpt_t 1293sched_pctcpu(struct thread *td) 1294{ 1295 fixpt_t pctcpu; 1296 struct kse *ke; 1297 1298 pctcpu = 0; 1299 ke = td->td_kse; 1300 if (ke == NULL) 1301 return (0); 1302 1303 mtx_lock_spin(&sched_lock); 1304 if (ke->ke_ticks) { 1305 int rtick; 1306 1307 /* 1308 * Don't update more frequently than twice a second. Allowing 1309 * this causes the cpu usage to decay away too quickly due to 1310 * rounding errors. 1311 */ 1312 if (ke->ke_ltick < (ticks - (hz / 2))) 1313 sched_pctcpu_update(ke); 1314 /* How many rtick per second ? */ 1315 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1316 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1317 } 1318 1319 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1320 mtx_unlock_spin(&sched_lock); 1321 1322 return (pctcpu); 1323} 1324 1325int 1326sched_sizeof_kse(void) 1327{ 1328 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1329} 1330 1331int 1332sched_sizeof_ksegrp(void) 1333{ 1334 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1335} 1336 1337int 1338sched_sizeof_proc(void) 1339{ 1340 return (sizeof(struct proc)); 1341} 1342 1343int 1344sched_sizeof_thread(void) 1345{ 1346 return (sizeof(struct thread) + sizeof(struct td_sched)); 1347} 1348