sched_ule.c revision 121126
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121126 2003-10-16 08:17:43Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/sched.h> 39#include <sys/smp.h> 40#include <sys/sx.h> 41#include <sys/sysctl.h> 42#include <sys/sysproto.h> 43#include <sys/vmmeter.h> 44#ifdef DDB 45#include <ddb/ddb.h> 46#endif 47#ifdef KTRACE 48#include <sys/uio.h> 49#include <sys/ktrace.h> 50#endif 51 52#include <machine/cpu.h> 53 54#define KTR_ULE KTR_NFS 55 56/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 57/* XXX This is bogus compatability crap for ps */ 58static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 59SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 60 61static void sched_setup(void *dummy); 62SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 63 64static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 65 66static int sched_strict; 67SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 68 69static int slice_min = 1; 70SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 71 72static int slice_max = 10; 73SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 74 75int realstathz; 76int tickincr = 1; 77 78#ifdef SMP 79/* Callout to handle load balancing SMP systems. */ 80static struct callout kseq_lb_callout; 81#endif 82 83/* 84 * These datastructures are allocated within their parent datastructure but 85 * are scheduler specific. 86 */ 87 88struct ke_sched { 89 int ske_slice; 90 struct runq *ske_runq; 91 /* The following variables are only used for pctcpu calculation */ 92 int ske_ltick; /* Last tick that we were running on */ 93 int ske_ftick; /* First tick that we were running on */ 94 int ske_ticks; /* Tick count */ 95 /* CPU that we have affinity for. */ 96 u_char ske_cpu; 97}; 98#define ke_slice ke_sched->ske_slice 99#define ke_runq ke_sched->ske_runq 100#define ke_ltick ke_sched->ske_ltick 101#define ke_ftick ke_sched->ske_ftick 102#define ke_ticks ke_sched->ske_ticks 103#define ke_cpu ke_sched->ske_cpu 104 105struct kg_sched { 106 int skg_slptime; /* Number of ticks we vol. slept */ 107 int skg_runtime; /* Number of ticks we were running */ 108}; 109#define kg_slptime kg_sched->skg_slptime 110#define kg_runtime kg_sched->skg_runtime 111 112struct td_sched { 113 int std_slptime; 114}; 115#define td_slptime td_sched->std_slptime 116 117struct td_sched td_sched; 118struct ke_sched ke_sched; 119struct kg_sched kg_sched; 120 121struct ke_sched *kse0_sched = &ke_sched; 122struct kg_sched *ksegrp0_sched = &kg_sched; 123struct p_sched *proc0_sched = NULL; 124struct td_sched *thread0_sched = &td_sched; 125 126/* 127 * The priority is primarily determined by the interactivity score. Thus, we 128 * give lower(better) priorities to kse groups that use less CPU. The nice 129 * value is then directly added to this to allow nice to have some effect 130 * on latency. 131 * 132 * PRI_RANGE: Total priority range for timeshare threads. 133 * PRI_NRESV: Number of nice values. 134 * PRI_BASE: The start of the dynamic range. 135 */ 136#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 137#define SCHED_PRI_NRESV PRIO_TOTAL 138#define SCHED_PRI_NHALF (PRIO_TOTAL / 2) 139#define SCHED_PRI_NTHRESH (SCHED_PRI_NHALF - 1) 140#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 141#define SCHED_PRI_INTERACT(score) \ 142 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 143 144/* 145 * These determine the interactivity of a process. 146 * 147 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 148 * before throttling back. 149 * SLP_RUN_THROTTLE: Divisor for reducing slp/run time at fork time. 150 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 151 * INTERACT_THRESH: Threshhold for placement on the current runq. 152 */ 153#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 154#define SCHED_SLP_RUN_THROTTLE (100) 155#define SCHED_INTERACT_MAX (100) 156#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 157#define SCHED_INTERACT_THRESH (30) 158 159/* 160 * These parameters and macros determine the size of the time slice that is 161 * granted to each thread. 162 * 163 * SLICE_MIN: Minimum time slice granted, in units of ticks. 164 * SLICE_MAX: Maximum time slice granted. 165 * SLICE_RANGE: Range of available time slices scaled by hz. 166 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 167 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 168 */ 169#define SCHED_SLICE_MIN (slice_min) 170#define SCHED_SLICE_MAX (slice_max) 171#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 172#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 173#define SCHED_SLICE_NICE(nice) \ 174 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH)) 175 176/* 177 * This macro determines whether or not the kse belongs on the current or 178 * next run queue. 179 * 180 * XXX nice value should effect how interactive a kg is. 181 */ 182#define SCHED_INTERACTIVE(kg) \ 183 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 184#define SCHED_CURR(kg, ke) \ 185 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 186 SCHED_INTERACTIVE(kg)) 187 188/* 189 * Cpu percentage computation macros and defines. 190 * 191 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 192 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 193 */ 194 195#define SCHED_CPU_TIME 10 196#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 197 198/* 199 * kseq - per processor runqs and statistics. 200 */ 201 202#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 203 204struct kseq { 205 struct runq ksq_idle; /* Queue of IDLE threads. */ 206 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 207 struct runq *ksq_next; /* Next timeshare queue. */ 208 struct runq *ksq_curr; /* Current queue. */ 209 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 210 int ksq_load; /* Aggregate load. */ 211 short ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */ 212 short ksq_nicemin; /* Least nice. */ 213#ifdef SMP 214 int ksq_cpus; /* Count of CPUs in this kseq. */ 215 unsigned int ksq_rslices; /* Slices on run queue */ 216#endif 217}; 218 219/* 220 * One kse queue per processor. 221 */ 222#ifdef SMP 223struct kseq kseq_cpu[MAXCPU]; 224struct kseq *kseq_idmap[MAXCPU]; 225#define KSEQ_SELF() (kseq_idmap[PCPU_GET(cpuid)]) 226#define KSEQ_CPU(x) (kseq_idmap[(x)]) 227#else 228struct kseq kseq_cpu; 229#define KSEQ_SELF() (&kseq_cpu) 230#define KSEQ_CPU(x) (&kseq_cpu) 231#endif 232 233static void sched_slice(struct kse *ke); 234static void sched_priority(struct ksegrp *kg); 235static int sched_interact_score(struct ksegrp *kg); 236static void sched_interact_update(struct ksegrp *kg); 237void sched_pctcpu_update(struct kse *ke); 238int sched_pickcpu(void); 239 240/* Operations on per processor queues */ 241static struct kse * kseq_choose(struct kseq *kseq, int steal); 242static void kseq_setup(struct kseq *kseq); 243static void kseq_add(struct kseq *kseq, struct kse *ke); 244static void kseq_rem(struct kseq *kseq, struct kse *ke); 245static void kseq_nice_add(struct kseq *kseq, int nice); 246static void kseq_nice_rem(struct kseq *kseq, int nice); 247void kseq_print(int cpu); 248#ifdef SMP 249struct kseq * kseq_load_highest(void); 250void kseq_balance(void *arg); 251void kseq_move(struct kseq *from, int cpu); 252#endif 253 254void 255kseq_print(int cpu) 256{ 257 struct kseq *kseq; 258 int i; 259 260 kseq = KSEQ_CPU(cpu); 261 262 printf("kseq:\n"); 263 printf("\tload: %d\n", kseq->ksq_load); 264 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 265 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 266 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 267 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 268 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 269 printf("\tnice counts:\n"); 270 for (i = 0; i < PRIO_TOTAL + 1; i++) 271 if (kseq->ksq_nice[i]) 272 printf("\t\t%d = %d\n", 273 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 274} 275 276static void 277kseq_add(struct kseq *kseq, struct kse *ke) 278{ 279 mtx_assert(&sched_lock, MA_OWNED); 280 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 281 kseq->ksq_load++; 282 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 283 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 284 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 285 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 286 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 287 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 288#ifdef SMP 289 kseq->ksq_rslices += ke->ke_slice; 290#endif 291} 292 293static void 294kseq_rem(struct kseq *kseq, struct kse *ke) 295{ 296 mtx_assert(&sched_lock, MA_OWNED); 297 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 298 kseq->ksq_load--; 299 ke->ke_runq = NULL; 300 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 301 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 302#ifdef SMP 303 kseq->ksq_rslices -= ke->ke_slice; 304#endif 305} 306 307static void 308kseq_nice_add(struct kseq *kseq, int nice) 309{ 310 mtx_assert(&sched_lock, MA_OWNED); 311 /* Normalize to zero. */ 312 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 313 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 314 kseq->ksq_nicemin = nice; 315} 316 317static void 318kseq_nice_rem(struct kseq *kseq, int nice) 319{ 320 int n; 321 322 mtx_assert(&sched_lock, MA_OWNED); 323 /* Normalize to zero. */ 324 n = nice + SCHED_PRI_NHALF; 325 kseq->ksq_nice[n]--; 326 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 327 328 /* 329 * If this wasn't the smallest nice value or there are more in 330 * this bucket we can just return. Otherwise we have to recalculate 331 * the smallest nice. 332 */ 333 if (nice != kseq->ksq_nicemin || 334 kseq->ksq_nice[n] != 0 || 335 kseq->ksq_loads[PRI_TIMESHARE] == 0) 336 return; 337 338 for (; n < SCHED_PRI_NRESV + 1; n++) 339 if (kseq->ksq_nice[n]) { 340 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 341 return; 342 } 343} 344 345#ifdef SMP 346/* 347 * kseq_balance is a simple CPU load balancing algorithm. It operates by 348 * finding the least loaded and most loaded cpu and equalizing their load 349 * by migrating some processes. 350 * 351 * Dealing only with two CPUs at a time has two advantages. Firstly, most 352 * installations will only have 2 cpus. Secondly, load balancing too much at 353 * once can have an unpleasant effect on the system. The scheduler rarely has 354 * enough information to make perfect decisions. So this algorithm chooses 355 * algorithm simplicity and more gradual effects on load in larger systems. 356 * 357 * It could be improved by considering the priorities and slices assigned to 358 * each task prior to balancing them. There are many pathological cases with 359 * any approach and so the semi random algorithm below may work as well as any. 360 * 361 */ 362void 363kseq_balance(void *arg) 364{ 365 struct kseq *kseq; 366 int high_load; 367 int low_load; 368 int high_cpu; 369 int low_cpu; 370 int move; 371 int diff; 372 int i; 373 374 high_cpu = 0; 375 low_cpu = 0; 376 high_load = 0; 377 low_load = -1; 378 379 mtx_lock_spin(&sched_lock); 380 if (smp_started == 0) 381 goto out; 382 383 for (i = 0; i < mp_maxid; i++) { 384 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 385 continue; 386 kseq = KSEQ_CPU(i); 387 if (kseq->ksq_load > high_load) { 388 high_load = kseq->ksq_load; 389 high_cpu = i; 390 } 391 if (low_load == -1 || kseq->ksq_load < low_load) { 392 low_load = kseq->ksq_load; 393 low_cpu = i; 394 } 395 } 396 397 kseq = KSEQ_CPU(high_cpu); 398 399 /* 400 * Nothing to do. 401 */ 402 if (high_load < kseq->ksq_cpus + 1) 403 goto out; 404 405 high_load -= kseq->ksq_cpus; 406 407 if (low_load >= high_load) 408 goto out; 409 410 diff = high_load - low_load; 411 move = diff / 2; 412 if (diff & 0x1) 413 move++; 414 415 for (i = 0; i < move; i++) 416 kseq_move(kseq, low_cpu); 417 418out: 419 mtx_unlock_spin(&sched_lock); 420 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 421 422 return; 423} 424 425struct kseq * 426kseq_load_highest(void) 427{ 428 struct kseq *kseq; 429 int load; 430 int cpu; 431 int i; 432 433 mtx_assert(&sched_lock, MA_OWNED); 434 cpu = 0; 435 load = 0; 436 437 for (i = 0; i < mp_maxid; i++) { 438 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 439 continue; 440 kseq = KSEQ_CPU(i); 441 if (kseq->ksq_load > load) { 442 load = kseq->ksq_load; 443 cpu = i; 444 } 445 } 446 kseq = KSEQ_CPU(cpu); 447 448 if (load > kseq->ksq_cpus) 449 return (kseq); 450 451 return (NULL); 452} 453 454void 455kseq_move(struct kseq *from, int cpu) 456{ 457 struct kse *ke; 458 459 ke = kseq_choose(from, 1); 460 runq_remove(ke->ke_runq, ke); 461 ke->ke_state = KES_THREAD; 462 kseq_rem(from, ke); 463 464 ke->ke_cpu = cpu; 465 sched_add(ke); 466} 467#endif 468 469/* 470 * Pick the highest priority task we have and return it. If steal is 1 we 471 * will return kses that have been denied slices due to their nice being too 472 * low. In the future we should prohibit stealing interrupt threads as well. 473 */ 474 475struct kse * 476kseq_choose(struct kseq *kseq, int steal) 477{ 478 struct kse *ke; 479 struct runq *swap; 480 481 mtx_assert(&sched_lock, MA_OWNED); 482 swap = NULL; 483 484 for (;;) { 485 ke = runq_choose(kseq->ksq_curr); 486 if (ke == NULL) { 487 /* 488 * We already swaped once and didn't get anywhere. 489 */ 490 if (swap) 491 break; 492 swap = kseq->ksq_curr; 493 kseq->ksq_curr = kseq->ksq_next; 494 kseq->ksq_next = swap; 495 continue; 496 } 497 /* 498 * If we encounter a slice of 0 the kse is in a 499 * TIMESHARE kse group and its nice was too far out 500 * of the range that receives slices. 501 */ 502 if (ke->ke_slice == 0 && steal == 0) { 503 runq_remove(ke->ke_runq, ke); 504 sched_slice(ke); 505 ke->ke_runq = kseq->ksq_next; 506 runq_add(ke->ke_runq, ke); 507 continue; 508 } 509 return (ke); 510 } 511 512 return (runq_choose(&kseq->ksq_idle)); 513} 514 515static void 516kseq_setup(struct kseq *kseq) 517{ 518 runq_init(&kseq->ksq_timeshare[0]); 519 runq_init(&kseq->ksq_timeshare[1]); 520 runq_init(&kseq->ksq_idle); 521 522 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 523 kseq->ksq_next = &kseq->ksq_timeshare[1]; 524 525 kseq->ksq_loads[PRI_ITHD] = 0; 526 kseq->ksq_loads[PRI_REALTIME] = 0; 527 kseq->ksq_loads[PRI_TIMESHARE] = 0; 528 kseq->ksq_loads[PRI_IDLE] = 0; 529 kseq->ksq_load = 0; 530#ifdef SMP 531 kseq->ksq_rslices = 0; 532#endif 533} 534 535static void 536sched_setup(void *dummy) 537{ 538#ifdef SMP 539 int i; 540#endif 541 542 slice_min = (hz/100); /* 10ms */ 543 slice_max = (hz/7); /* ~140ms */ 544 545#ifdef SMP 546 /* init kseqs */ 547 /* Create the idmap. */ 548#ifdef ULE_HTT_EXPERIMENTAL 549 if (smp_topology == NULL) { 550#else 551 if (1) { 552#endif 553 for (i = 0; i < MAXCPU; i++) { 554 kseq_setup(&kseq_cpu[i]); 555 kseq_idmap[i] = &kseq_cpu[i]; 556 kseq_cpu[i].ksq_cpus = 1; 557 } 558 } else { 559 int j; 560 561 for (i = 0; i < smp_topology->ct_count; i++) { 562 struct cpu_group *cg; 563 564 cg = &smp_topology->ct_group[i]; 565 kseq_setup(&kseq_cpu[i]); 566 567 for (j = 0; j < MAXCPU; j++) 568 if ((cg->cg_mask & (1 << j)) != 0) 569 kseq_idmap[j] = &kseq_cpu[i]; 570 kseq_cpu[i].ksq_cpus = cg->cg_count; 571 } 572 } 573 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 574 kseq_balance(NULL); 575#else 576 kseq_setup(KSEQ_SELF()); 577#endif 578 mtx_lock_spin(&sched_lock); 579 kseq_add(KSEQ_SELF(), &kse0); 580 mtx_unlock_spin(&sched_lock); 581} 582 583/* 584 * Scale the scheduling priority according to the "interactivity" of this 585 * process. 586 */ 587static void 588sched_priority(struct ksegrp *kg) 589{ 590 int pri; 591 592 if (kg->kg_pri_class != PRI_TIMESHARE) 593 return; 594 595 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 596 pri += SCHED_PRI_BASE; 597 pri += kg->kg_nice; 598 599 if (pri > PRI_MAX_TIMESHARE) 600 pri = PRI_MAX_TIMESHARE; 601 else if (pri < PRI_MIN_TIMESHARE) 602 pri = PRI_MIN_TIMESHARE; 603 604 kg->kg_user_pri = pri; 605 606 return; 607} 608 609/* 610 * Calculate a time slice based on the properties of the kseg and the runq 611 * that we're on. This is only for PRI_TIMESHARE ksegrps. 612 */ 613static void 614sched_slice(struct kse *ke) 615{ 616 struct kseq *kseq; 617 struct ksegrp *kg; 618 619 kg = ke->ke_ksegrp; 620 kseq = KSEQ_CPU(ke->ke_cpu); 621 622 /* 623 * Rationale: 624 * KSEs in interactive ksegs get the minimum slice so that we 625 * quickly notice if it abuses its advantage. 626 * 627 * KSEs in non-interactive ksegs are assigned a slice that is 628 * based on the ksegs nice value relative to the least nice kseg 629 * on the run queue for this cpu. 630 * 631 * If the KSE is less nice than all others it gets the maximum 632 * slice and other KSEs will adjust their slice relative to 633 * this when they first expire. 634 * 635 * There is 20 point window that starts relative to the least 636 * nice kse on the run queue. Slice size is determined by 637 * the kse distance from the last nice ksegrp. 638 * 639 * If you are outside of the window you will get no slice and 640 * you will be reevaluated each time you are selected on the 641 * run queue. 642 * 643 */ 644 645 if (!SCHED_INTERACTIVE(kg)) { 646 int nice; 647 648 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 649 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 650 kg->kg_nice < kseq->ksq_nicemin) 651 ke->ke_slice = SCHED_SLICE_MAX; 652 else if (nice <= SCHED_PRI_NTHRESH) 653 ke->ke_slice = SCHED_SLICE_NICE(nice); 654 else 655 ke->ke_slice = 0; 656 } else 657 ke->ke_slice = SCHED_SLICE_MIN; 658 659 CTR6(KTR_ULE, 660 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 661 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 662 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 663 664 /* 665 * Check to see if we need to scale back the slp and run time 666 * in the kg. This will cause us to forget old interactivity 667 * while maintaining the current ratio. 668 */ 669 sched_interact_update(kg); 670 671 return; 672} 673 674static void 675sched_interact_update(struct ksegrp *kg) 676{ 677 /* XXX Fixme, use a linear algorithm and not a while loop. */ 678 while ((kg->kg_runtime + kg->kg_slptime) > SCHED_SLP_RUN_MAX) { 679 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 680 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 681 } 682} 683 684static int 685sched_interact_score(struct ksegrp *kg) 686{ 687 int div; 688 689 if (kg->kg_runtime > kg->kg_slptime) { 690 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 691 return (SCHED_INTERACT_HALF + 692 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 693 } if (kg->kg_slptime > kg->kg_runtime) { 694 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 695 return (kg->kg_runtime / div); 696 } 697 698 /* 699 * This can happen if slptime and runtime are 0. 700 */ 701 return (0); 702 703} 704 705/* 706 * This is only somewhat accurate since given many processes of the same 707 * priority they will switch when their slices run out, which will be 708 * at most SCHED_SLICE_MAX. 709 */ 710int 711sched_rr_interval(void) 712{ 713 return (SCHED_SLICE_MAX); 714} 715 716void 717sched_pctcpu_update(struct kse *ke) 718{ 719 /* 720 * Adjust counters and watermark for pctcpu calc. 721 */ 722 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 723 /* 724 * Shift the tick count out so that the divide doesn't 725 * round away our results. 726 */ 727 ke->ke_ticks <<= 10; 728 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 729 SCHED_CPU_TICKS; 730 ke->ke_ticks >>= 10; 731 } else 732 ke->ke_ticks = 0; 733 ke->ke_ltick = ticks; 734 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 735} 736 737#ifdef SMP 738/* XXX Should be changed to kseq_load_lowest() */ 739int 740sched_pickcpu(void) 741{ 742 struct kseq *kseq; 743 int load; 744 int cpu; 745 int i; 746 747 mtx_assert(&sched_lock, MA_OWNED); 748 if (!smp_started) 749 return (0); 750 751 load = 0; 752 cpu = 0; 753 754 for (i = 0; i < mp_maxid; i++) { 755 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 756 continue; 757 kseq = KSEQ_CPU(i); 758 if (kseq->ksq_load < load) { 759 cpu = i; 760 load = kseq->ksq_load; 761 } 762 } 763 764 CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 765 return (cpu); 766} 767#else 768int 769sched_pickcpu(void) 770{ 771 return (0); 772} 773#endif 774 775void 776sched_prio(struct thread *td, u_char prio) 777{ 778 779 mtx_assert(&sched_lock, MA_OWNED); 780 if (TD_ON_RUNQ(td)) { 781 adjustrunqueue(td, prio); 782 } else { 783 td->td_priority = prio; 784 } 785} 786 787void 788sched_switchout(struct thread *td) 789{ 790 struct kse *ke; 791 792 mtx_assert(&sched_lock, MA_OWNED); 793 794 ke = td->td_kse; 795 796 td->td_last_kse = ke; 797 td->td_lastcpu = td->td_oncpu; 798 td->td_oncpu = NOCPU; 799 td->td_flags &= ~TDF_NEEDRESCHED; 800 801 if (TD_IS_RUNNING(td)) { 802 if (td->td_proc->p_flag & P_SA) { 803 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 804 setrunqueue(td); 805 } else { 806 /* 807 * This queue is always correct except for idle threads which 808 * have a higher priority due to priority propagation. 809 */ 810 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE && 811 ke->ke_thread->td_priority > PRI_MIN_IDLE) 812 ke->ke_runq = KSEQ_SELF()->ksq_curr; 813 runq_add(ke->ke_runq, ke); 814 /* setrunqueue(td); */ 815 } 816 return; 817 } 818 if (ke->ke_runq) 819 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 820 /* 821 * We will not be on the run queue. So we must be 822 * sleeping or similar. 823 */ 824 if (td->td_proc->p_flag & P_SA) 825 kse_reassign(ke); 826} 827 828void 829sched_switchin(struct thread *td) 830{ 831 /* struct kse *ke = td->td_kse; */ 832 mtx_assert(&sched_lock, MA_OWNED); 833 834 td->td_oncpu = PCPU_GET(cpuid); 835} 836 837void 838sched_nice(struct ksegrp *kg, int nice) 839{ 840 struct kse *ke; 841 struct thread *td; 842 struct kseq *kseq; 843 844 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 845 mtx_assert(&sched_lock, MA_OWNED); 846 /* 847 * We need to adjust the nice counts for running KSEs. 848 */ 849 if (kg->kg_pri_class == PRI_TIMESHARE) 850 FOREACH_KSE_IN_GROUP(kg, ke) { 851 if (ke->ke_runq == NULL) 852 continue; 853 kseq = KSEQ_CPU(ke->ke_cpu); 854 kseq_nice_rem(kseq, kg->kg_nice); 855 kseq_nice_add(kseq, nice); 856 } 857 kg->kg_nice = nice; 858 sched_priority(kg); 859 FOREACH_THREAD_IN_GROUP(kg, td) 860 td->td_flags |= TDF_NEEDRESCHED; 861} 862 863void 864sched_sleep(struct thread *td, u_char prio) 865{ 866 mtx_assert(&sched_lock, MA_OWNED); 867 868 td->td_slptime = ticks; 869 td->td_priority = prio; 870 871 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 872 td->td_kse, td->td_slptime); 873} 874 875void 876sched_wakeup(struct thread *td) 877{ 878 mtx_assert(&sched_lock, MA_OWNED); 879 880 /* 881 * Let the kseg know how long we slept for. This is because process 882 * interactivity behavior is modeled in the kseg. 883 */ 884 if (td->td_slptime) { 885 struct ksegrp *kg; 886 int hzticks; 887 888 kg = td->td_ksegrp; 889 hzticks = ticks - td->td_slptime; 890 kg->kg_slptime += hzticks << 10; 891 sched_interact_update(kg); 892 sched_priority(kg); 893 if (td->td_kse) 894 sched_slice(td->td_kse); 895 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 896 td->td_kse, hzticks); 897 td->td_slptime = 0; 898 } 899 setrunqueue(td); 900 if (td->td_priority < curthread->td_priority) 901 curthread->td_flags |= TDF_NEEDRESCHED; 902} 903 904/* 905 * Penalize the parent for creating a new child and initialize the child's 906 * priority. 907 */ 908void 909sched_fork(struct proc *p, struct proc *p1) 910{ 911 912 mtx_assert(&sched_lock, MA_OWNED); 913 914 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 915 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 916 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 917} 918 919void 920sched_fork_kse(struct kse *ke, struct kse *child) 921{ 922 923 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 924 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 925 child->ke_runq = NULL; 926 927 /* Grab our parents cpu estimation information. */ 928 child->ke_ticks = ke->ke_ticks; 929 child->ke_ltick = ke->ke_ltick; 930 child->ke_ftick = ke->ke_ftick; 931} 932 933void 934sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 935{ 936 937 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 938 /* XXX Need something better here */ 939 940 child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE; 941 child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE; 942 kg->kg_runtime += tickincr << 10; 943 sched_interact_update(kg); 944 945 child->kg_user_pri = kg->kg_user_pri; 946 child->kg_nice = kg->kg_nice; 947} 948 949void 950sched_fork_thread(struct thread *td, struct thread *child) 951{ 952} 953 954void 955sched_class(struct ksegrp *kg, int class) 956{ 957 struct kseq *kseq; 958 struct kse *ke; 959 960 mtx_assert(&sched_lock, MA_OWNED); 961 if (kg->kg_pri_class == class) 962 return; 963 964 FOREACH_KSE_IN_GROUP(kg, ke) { 965 if (ke->ke_state != KES_ONRUNQ && 966 ke->ke_state != KES_THREAD) 967 continue; 968 kseq = KSEQ_CPU(ke->ke_cpu); 969 970 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 971 kseq->ksq_loads[PRI_BASE(class)]++; 972 973 if (kg->kg_pri_class == PRI_TIMESHARE) 974 kseq_nice_rem(kseq, kg->kg_nice); 975 else if (class == PRI_TIMESHARE) 976 kseq_nice_add(kseq, kg->kg_nice); 977 } 978 979 kg->kg_pri_class = class; 980} 981 982/* 983 * Return some of the child's priority and interactivity to the parent. 984 */ 985void 986sched_exit(struct proc *p, struct proc *child) 987{ 988 /* XXX Need something better here */ 989 mtx_assert(&sched_lock, MA_OWNED); 990 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 991 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 992} 993 994void 995sched_exit_kse(struct kse *ke, struct kse *child) 996{ 997 kseq_rem(KSEQ_CPU(child->ke_cpu), child); 998} 999 1000void 1001sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1002{ 1003 /* kg->kg_slptime += child->kg_slptime; */ 1004 kg->kg_runtime += child->kg_runtime; 1005 sched_interact_update(kg); 1006} 1007 1008void 1009sched_exit_thread(struct thread *td, struct thread *child) 1010{ 1011} 1012 1013void 1014sched_clock(struct kse *ke) 1015{ 1016 struct kseq *kseq; 1017 struct ksegrp *kg; 1018 struct thread *td; 1019#if 0 1020 struct kse *nke; 1021#endif 1022 1023 /* 1024 * sched_setup() apparently happens prior to stathz being set. We 1025 * need to resolve the timers earlier in the boot so we can avoid 1026 * calculating this here. 1027 */ 1028 if (realstathz == 0) { 1029 realstathz = stathz ? stathz : hz; 1030 tickincr = hz / realstathz; 1031 /* 1032 * XXX This does not work for values of stathz that are much 1033 * larger than hz. 1034 */ 1035 if (tickincr == 0) 1036 tickincr = 1; 1037 } 1038 1039 td = ke->ke_thread; 1040 kg = ke->ke_ksegrp; 1041 1042 mtx_assert(&sched_lock, MA_OWNED); 1043 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1044 1045 /* Adjust ticks for pctcpu */ 1046 ke->ke_ticks++; 1047 ke->ke_ltick = ticks; 1048 1049 /* Go up to one second beyond our max and then trim back down */ 1050 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1051 sched_pctcpu_update(ke); 1052 1053 if (td->td_flags & TDF_IDLETD) 1054 return; 1055 1056 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1057 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1058 1059 /* 1060 * We only do slicing code for TIMESHARE ksegrps. 1061 */ 1062 if (kg->kg_pri_class != PRI_TIMESHARE) 1063 return; 1064 /* 1065 * Check for a higher priority task on the run queue. This can happen 1066 * on SMP if another processor woke up a process on our runq. 1067 */ 1068 kseq = KSEQ_SELF(); 1069#if 0 1070 if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq, 0)) != NULL) { 1071 if (sched_strict && 1072 nke->ke_thread->td_priority < td->td_priority) 1073 td->td_flags |= TDF_NEEDRESCHED; 1074 else if (nke->ke_thread->td_priority < 1075 td->td_priority SCHED_PRIO_SLOP) 1076 1077 if (nke->ke_thread->td_priority < td->td_priority) 1078 td->td_flags |= TDF_NEEDRESCHED; 1079 } 1080#endif 1081 /* 1082 * We used a tick charge it to the ksegrp so that we can compute our 1083 * interactivity. 1084 */ 1085 kg->kg_runtime += tickincr << 10; 1086 sched_interact_update(kg); 1087 1088 /* 1089 * We used up one time slice. 1090 */ 1091 ke->ke_slice--; 1092#ifdef SMP 1093 kseq->ksq_rslices--; 1094#endif 1095 1096 if (ke->ke_slice > 0) 1097 return; 1098 /* 1099 * We're out of time, recompute priorities and requeue. 1100 */ 1101 kseq_rem(kseq, ke); 1102 sched_priority(kg); 1103 sched_slice(ke); 1104 if (SCHED_CURR(kg, ke)) 1105 ke->ke_runq = kseq->ksq_curr; 1106 else 1107 ke->ke_runq = kseq->ksq_next; 1108 kseq_add(kseq, ke); 1109 td->td_flags |= TDF_NEEDRESCHED; 1110} 1111 1112int 1113sched_runnable(void) 1114{ 1115 struct kseq *kseq; 1116 int load; 1117 1118 load = 1; 1119 1120 mtx_lock_spin(&sched_lock); 1121 kseq = KSEQ_SELF(); 1122 1123 if (kseq->ksq_load) 1124 goto out; 1125#ifdef SMP 1126 /* 1127 * For SMP we may steal other processor's KSEs. Just search until we 1128 * verify that at least on other cpu has a runnable task. 1129 */ 1130 if (smp_started) { 1131 int i; 1132 1133 for (i = 0; i < mp_maxid; i++) { 1134 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 1135 continue; 1136 kseq = KSEQ_CPU(i); 1137 if (kseq->ksq_load > kseq->ksq_cpus) 1138 goto out; 1139 } 1140 } 1141#endif 1142 load = 0; 1143out: 1144 mtx_unlock_spin(&sched_lock); 1145 return (load); 1146} 1147 1148void 1149sched_userret(struct thread *td) 1150{ 1151 struct ksegrp *kg; 1152#if 0 1153 struct kseq *kseq; 1154 struct kse *ke; 1155#endif 1156 1157 kg = td->td_ksegrp; 1158 1159 if (td->td_priority != kg->kg_user_pri) { 1160 mtx_lock_spin(&sched_lock); 1161 td->td_priority = kg->kg_user_pri; 1162 /* 1163 * This optimization is temporarily disabled because it 1164 * breaks priority propagation. 1165 */ 1166#if 0 1167 kseq = KSEQ_SELF(); 1168 if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 1169#ifdef SMP 1170 kseq->ksq_load > kseq->ksq_cpus && 1171#else 1172 kseq->ksq_load > 1 && 1173#endif 1174 (ke = kseq_choose(kseq, 0)) != NULL && 1175 ke->ke_thread->td_priority < td->td_priority) 1176#endif 1177 curthread->td_flags |= TDF_NEEDRESCHED; 1178 mtx_unlock_spin(&sched_lock); 1179 } 1180} 1181 1182struct kse * 1183sched_choose(void) 1184{ 1185 struct kseq *kseq; 1186 struct kse *ke; 1187 1188 mtx_assert(&sched_lock, MA_OWNED); 1189#ifdef SMP 1190retry: 1191#endif 1192 kseq = KSEQ_SELF(); 1193 ke = kseq_choose(kseq, 0); 1194 if (ke) { 1195 runq_remove(ke->ke_runq, ke); 1196 ke->ke_state = KES_THREAD; 1197 1198 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1199 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1200 ke, ke->ke_runq, ke->ke_slice, 1201 ke->ke_thread->td_priority); 1202 } 1203 return (ke); 1204 } 1205 1206#ifdef SMP 1207 if (smp_started) { 1208 /* 1209 * Find the cpu with the highest load and steal one proc. 1210 */ 1211 if ((kseq = kseq_load_highest()) == NULL) 1212 return (NULL); 1213 1214 /* 1215 * Remove this kse from this kseq and runq and then requeue 1216 * on the current processor. Then we will dequeue it 1217 * normally above. 1218 */ 1219 kseq_move(kseq, PCPU_GET(cpuid)); 1220 goto retry; 1221 } 1222#endif 1223 1224 return (NULL); 1225} 1226 1227void 1228sched_add(struct kse *ke) 1229{ 1230 struct kseq *kseq; 1231 struct ksegrp *kg; 1232 1233 mtx_assert(&sched_lock, MA_OWNED); 1234 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1235 KASSERT((ke->ke_thread->td_kse != NULL), 1236 ("sched_add: No KSE on thread")); 1237 KASSERT(ke->ke_state != KES_ONRUNQ, 1238 ("sched_add: kse %p (%s) already in run queue", ke, 1239 ke->ke_proc->p_comm)); 1240 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1241 ("sched_add: process swapped out")); 1242 KASSERT(ke->ke_runq == NULL, 1243 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1244 1245 kg = ke->ke_ksegrp; 1246 1247 switch (PRI_BASE(kg->kg_pri_class)) { 1248 case PRI_ITHD: 1249 case PRI_REALTIME: 1250 kseq = KSEQ_SELF(); 1251 ke->ke_runq = kseq->ksq_curr; 1252 ke->ke_slice = SCHED_SLICE_MAX; 1253 ke->ke_cpu = PCPU_GET(cpuid); 1254 break; 1255 case PRI_TIMESHARE: 1256 kseq = KSEQ_CPU(ke->ke_cpu); 1257 if (SCHED_CURR(kg, ke)) 1258 ke->ke_runq = kseq->ksq_curr; 1259 else 1260 ke->ke_runq = kseq->ksq_next; 1261 break; 1262 case PRI_IDLE: 1263 kseq = KSEQ_CPU(ke->ke_cpu); 1264 /* 1265 * This is for priority prop. 1266 */ 1267 if (ke->ke_thread->td_priority > PRI_MIN_IDLE) 1268 ke->ke_runq = kseq->ksq_curr; 1269 else 1270 ke->ke_runq = &kseq->ksq_idle; 1271 ke->ke_slice = SCHED_SLICE_MIN; 1272 break; 1273 default: 1274 panic("Unknown pri class.\n"); 1275 break; 1276 } 1277 1278 ke->ke_ksegrp->kg_runq_kses++; 1279 ke->ke_state = KES_ONRUNQ; 1280 1281 runq_add(ke->ke_runq, ke); 1282 kseq_add(kseq, ke); 1283} 1284 1285void 1286sched_rem(struct kse *ke) 1287{ 1288 struct kseq *kseq; 1289 1290 mtx_assert(&sched_lock, MA_OWNED); 1291 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1292 1293 ke->ke_state = KES_THREAD; 1294 ke->ke_ksegrp->kg_runq_kses--; 1295 kseq = KSEQ_CPU(ke->ke_cpu); 1296 runq_remove(ke->ke_runq, ke); 1297 kseq_rem(kseq, ke); 1298} 1299 1300fixpt_t 1301sched_pctcpu(struct kse *ke) 1302{ 1303 fixpt_t pctcpu; 1304 1305 pctcpu = 0; 1306 1307 mtx_lock_spin(&sched_lock); 1308 if (ke->ke_ticks) { 1309 int rtick; 1310 1311 /* 1312 * Don't update more frequently than twice a second. Allowing 1313 * this causes the cpu usage to decay away too quickly due to 1314 * rounding errors. 1315 */ 1316 if (ke->ke_ltick < (ticks - (hz / 2))) 1317 sched_pctcpu_update(ke); 1318 /* How many rtick per second ? */ 1319 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1320 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1321 } 1322 1323 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1324 mtx_unlock_spin(&sched_lock); 1325 1326 return (pctcpu); 1327} 1328 1329int 1330sched_sizeof_kse(void) 1331{ 1332 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1333} 1334 1335int 1336sched_sizeof_ksegrp(void) 1337{ 1338 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1339} 1340 1341int 1342sched_sizeof_proc(void) 1343{ 1344 return (sizeof(struct proc)); 1345} 1346 1347int 1348sched_sizeof_thread(void) 1349{ 1350 return (sizeof(struct thread) + sizeof(struct td_sched)); 1351} 1352