sched_ule.c revision 121871
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121871 2003-11-02 04:10:15Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/sched.h> 39#include <sys/smp.h> 40#include <sys/sx.h> 41#include <sys/sysctl.h> 42#include <sys/sysproto.h> 43#include <sys/vmmeter.h> 44#ifdef DDB 45#include <ddb/ddb.h> 46#endif 47#ifdef KTRACE 48#include <sys/uio.h> 49#include <sys/ktrace.h> 50#endif 51 52#include <machine/cpu.h> 53#include <machine/smp.h> 54 55#define KTR_ULE KTR_NFS 56 57/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 58/* XXX This is bogus compatability crap for ps */ 59static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 60SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 61 62static void sched_setup(void *dummy); 63SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 64 65static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 66 67static int sched_strict; 68SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 69 70static int slice_min = 1; 71SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 72 73static int slice_max = 10; 74SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 75 76int realstathz; 77int tickincr = 1; 78 79#ifdef SMP 80/* Callout to handle load balancing SMP systems. */ 81static struct callout kseq_lb_callout; 82#endif 83 84/* 85 * These datastructures are allocated within their parent datastructure but 86 * are scheduler specific. 87 */ 88 89struct ke_sched { 90 int ske_slice; 91 struct runq *ske_runq; 92 /* The following variables are only used for pctcpu calculation */ 93 int ske_ltick; /* Last tick that we were running on */ 94 int ske_ftick; /* First tick that we were running on */ 95 int ske_ticks; /* Tick count */ 96 /* CPU that we have affinity for. */ 97 u_char ske_cpu; 98}; 99#define ke_slice ke_sched->ske_slice 100#define ke_runq ke_sched->ske_runq 101#define ke_ltick ke_sched->ske_ltick 102#define ke_ftick ke_sched->ske_ftick 103#define ke_ticks ke_sched->ske_ticks 104#define ke_cpu ke_sched->ske_cpu 105#define ke_assign ke_procq.tqe_next 106 107#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 108 109struct kg_sched { 110 int skg_slptime; /* Number of ticks we vol. slept */ 111 int skg_runtime; /* Number of ticks we were running */ 112}; 113#define kg_slptime kg_sched->skg_slptime 114#define kg_runtime kg_sched->skg_runtime 115 116struct td_sched { 117 int std_slptime; 118}; 119#define td_slptime td_sched->std_slptime 120 121struct td_sched td_sched; 122struct ke_sched ke_sched; 123struct kg_sched kg_sched; 124 125struct ke_sched *kse0_sched = &ke_sched; 126struct kg_sched *ksegrp0_sched = &kg_sched; 127struct p_sched *proc0_sched = NULL; 128struct td_sched *thread0_sched = &td_sched; 129 130/* 131 * The priority is primarily determined by the interactivity score. Thus, we 132 * give lower(better) priorities to kse groups that use less CPU. The nice 133 * value is then directly added to this to allow nice to have some effect 134 * on latency. 135 * 136 * PRI_RANGE: Total priority range for timeshare threads. 137 * PRI_NRESV: Number of nice values. 138 * PRI_BASE: The start of the dynamic range. 139 */ 140#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 141#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 142#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 143#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 144#define SCHED_PRI_INTERACT(score) \ 145 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 146 147/* 148 * These determine the interactivity of a process. 149 * 150 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 151 * before throttling back. 152 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 153 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 154 * INTERACT_THRESH: Threshhold for placement on the current runq. 155 */ 156#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 157#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 158#define SCHED_INTERACT_MAX (100) 159#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 160#define SCHED_INTERACT_THRESH (30) 161 162/* 163 * These parameters and macros determine the size of the time slice that is 164 * granted to each thread. 165 * 166 * SLICE_MIN: Minimum time slice granted, in units of ticks. 167 * SLICE_MAX: Maximum time slice granted. 168 * SLICE_RANGE: Range of available time slices scaled by hz. 169 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 170 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 171 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 172 */ 173#define SCHED_SLICE_MIN (slice_min) 174#define SCHED_SLICE_MAX (slice_max) 175#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 176#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 177#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 178#define SCHED_SLICE_NICE(nice) \ 179 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 180 181/* 182 * This macro determines whether or not the kse belongs on the current or 183 * next run queue. 184 */ 185#define SCHED_INTERACTIVE(kg) \ 186 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 187#define SCHED_CURR(kg, ke) \ 188 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 189 SCHED_INTERACTIVE(kg)) 190 191/* 192 * Cpu percentage computation macros and defines. 193 * 194 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 195 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 196 */ 197 198#define SCHED_CPU_TIME 10 199#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 200 201/* 202 * kseq - per processor runqs and statistics. 203 */ 204 205#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 206 207struct kseq { 208 struct runq ksq_idle; /* Queue of IDLE threads. */ 209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 210 struct runq *ksq_next; /* Next timeshare queue. */ 211 struct runq *ksq_curr; /* Current queue. */ 212 int ksq_loads[KSEQ_NCLASS]; /* Load for each class */ 213 int ksq_load; /* Aggregate load. */ 214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 215 short ksq_nicemin; /* Least nice. */ 216#ifdef SMP 217 unsigned int ksq_rslices; /* Slices on run queue */ 218 int ksq_cpus; /* Count of CPUs in this kseq. */ 219 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */ 220#endif 221}; 222 223/* 224 * One kse queue per processor. 225 */ 226#ifdef SMP 227static int kseq_idle; 228static struct kseq kseq_cpu[MAXCPU]; 229static struct kseq *kseq_idmap[MAXCPU]; 230#define KSEQ_SELF() (kseq_idmap[PCPU_GET(cpuid)]) 231#define KSEQ_CPU(x) (kseq_idmap[(x)]) 232#else 233static struct kseq kseq_cpu; 234#define KSEQ_SELF() (&kseq_cpu) 235#define KSEQ_CPU(x) (&kseq_cpu) 236#endif 237 238static void sched_slice(struct kse *ke); 239static void sched_priority(struct ksegrp *kg); 240static int sched_interact_score(struct ksegrp *kg); 241static void sched_interact_update(struct ksegrp *kg); 242static void sched_interact_fork(struct ksegrp *kg); 243static void sched_pctcpu_update(struct kse *ke); 244 245/* Operations on per processor queues */ 246static struct kse * kseq_choose(struct kseq *kseq); 247static void kseq_setup(struct kseq *kseq); 248static void kseq_add(struct kseq *kseq, struct kse *ke); 249static void kseq_rem(struct kseq *kseq, struct kse *ke); 250static void kseq_nice_add(struct kseq *kseq, int nice); 251static void kseq_nice_rem(struct kseq *kseq, int nice); 252void kseq_print(int cpu); 253#ifdef SMP 254#if 0 255static int sched_pickcpu(void); 256#endif 257static struct kse *runq_steal(struct runq *rq); 258static struct kseq *kseq_load_highest(void); 259static void kseq_balance(void *arg); 260static void kseq_move(struct kseq *from, int cpu); 261static int kseq_find(void); 262static void kseq_notify(struct kse *ke, int cpu); 263static void kseq_assign(struct kseq *); 264static struct kse *kseq_steal(struct kseq *kseq); 265#endif 266 267void 268kseq_print(int cpu) 269{ 270 struct kseq *kseq; 271 int i; 272 273 kseq = KSEQ_CPU(cpu); 274 275 printf("kseq:\n"); 276 printf("\tload: %d\n", kseq->ksq_load); 277 printf("\tload ITHD: %d\n", kseq->ksq_loads[PRI_ITHD]); 278 printf("\tload REALTIME: %d\n", kseq->ksq_loads[PRI_REALTIME]); 279 printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]); 280 printf("\tload IDLE: %d\n", kseq->ksq_loads[PRI_IDLE]); 281 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 282 printf("\tnice counts:\n"); 283 for (i = 0; i < SCHED_PRI_NRESV; i++) 284 if (kseq->ksq_nice[i]) 285 printf("\t\t%d = %d\n", 286 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 287} 288 289static void 290kseq_add(struct kseq *kseq, struct kse *ke) 291{ 292 mtx_assert(&sched_lock, MA_OWNED); 293 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++; 294 kseq->ksq_load++; 295 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 296 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 297 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 298 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 299 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 300 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 301#ifdef SMP 302 kseq->ksq_rslices += ke->ke_slice; 303#endif 304} 305 306static void 307kseq_rem(struct kseq *kseq, struct kse *ke) 308{ 309 mtx_assert(&sched_lock, MA_OWNED); 310 kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--; 311 kseq->ksq_load--; 312 ke->ke_runq = NULL; 313 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 314 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 315#ifdef SMP 316 kseq->ksq_rslices -= ke->ke_slice; 317#endif 318} 319 320static void 321kseq_nice_add(struct kseq *kseq, int nice) 322{ 323 mtx_assert(&sched_lock, MA_OWNED); 324 /* Normalize to zero. */ 325 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 326 if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1) 327 kseq->ksq_nicemin = nice; 328} 329 330static void 331kseq_nice_rem(struct kseq *kseq, int nice) 332{ 333 int n; 334 335 mtx_assert(&sched_lock, MA_OWNED); 336 /* Normalize to zero. */ 337 n = nice + SCHED_PRI_NHALF; 338 kseq->ksq_nice[n]--; 339 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 340 341 /* 342 * If this wasn't the smallest nice value or there are more in 343 * this bucket we can just return. Otherwise we have to recalculate 344 * the smallest nice. 345 */ 346 if (nice != kseq->ksq_nicemin || 347 kseq->ksq_nice[n] != 0 || 348 kseq->ksq_loads[PRI_TIMESHARE] == 0) 349 return; 350 351 for (; n < SCHED_PRI_NRESV; n++) 352 if (kseq->ksq_nice[n]) { 353 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 354 return; 355 } 356} 357 358#ifdef SMP 359/* 360 * kseq_balance is a simple CPU load balancing algorithm. It operates by 361 * finding the least loaded and most loaded cpu and equalizing their load 362 * by migrating some processes. 363 * 364 * Dealing only with two CPUs at a time has two advantages. Firstly, most 365 * installations will only have 2 cpus. Secondly, load balancing too much at 366 * once can have an unpleasant effect on the system. The scheduler rarely has 367 * enough information to make perfect decisions. So this algorithm chooses 368 * algorithm simplicity and more gradual effects on load in larger systems. 369 * 370 * It could be improved by considering the priorities and slices assigned to 371 * each task prior to balancing them. There are many pathological cases with 372 * any approach and so the semi random algorithm below may work as well as any. 373 * 374 */ 375static void 376kseq_balance(void *arg) 377{ 378 struct kseq *kseq; 379 int high_load; 380 int low_load; 381 int high_cpu; 382 int low_cpu; 383 int move; 384 int diff; 385 int i; 386 387 high_cpu = 0; 388 low_cpu = 0; 389 high_load = 0; 390 low_load = -1; 391 392 mtx_lock_spin(&sched_lock); 393 if (smp_started == 0) 394 goto out; 395 396 for (i = 0; i < mp_maxid; i++) { 397 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 398 continue; 399 kseq = KSEQ_CPU(i); 400 if (kseq->ksq_load > high_load) { 401 high_load = kseq->ksq_load; 402 high_cpu = i; 403 } 404 if (low_load == -1 || kseq->ksq_load < low_load) { 405 low_load = kseq->ksq_load; 406 low_cpu = i; 407 } 408 } 409 410 kseq = KSEQ_CPU(high_cpu); 411 412 high_load = kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] + 413 kseq->ksq_loads[PRI_REALTIME]; 414 /* 415 * Nothing to do. 416 */ 417 if (high_load < kseq->ksq_cpus + 1) 418 goto out; 419 420 high_load -= kseq->ksq_cpus; 421 422 if (low_load >= high_load) 423 goto out; 424 425 diff = high_load - low_load; 426 move = diff / 2; 427 if (diff & 0x1) 428 move++; 429 430 for (i = 0; i < move; i++) 431 kseq_move(kseq, low_cpu); 432 433out: 434 mtx_unlock_spin(&sched_lock); 435 callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL); 436 437 return; 438} 439 440static struct kseq * 441kseq_load_highest(void) 442{ 443 struct kseq *kseq; 444 int load; 445 int cpu; 446 int i; 447 448 mtx_assert(&sched_lock, MA_OWNED); 449 cpu = 0; 450 load = 0; 451 452 for (i = 0; i < mp_maxid; i++) { 453 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 454 continue; 455 kseq = KSEQ_CPU(i); 456 if (kseq->ksq_load > load) { 457 load = kseq->ksq_load; 458 cpu = i; 459 } 460 } 461 kseq = KSEQ_CPU(cpu); 462 463 if ((kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] + 464 kseq->ksq_loads[PRI_REALTIME]) > kseq->ksq_cpus) 465 return (kseq); 466 467 return (NULL); 468} 469 470static void 471kseq_move(struct kseq *from, int cpu) 472{ 473 struct kse *ke; 474 475 ke = kseq_steal(from); 476 runq_remove(ke->ke_runq, ke); 477 ke->ke_state = KES_THREAD; 478 kseq_rem(from, ke); 479 480 ke->ke_cpu = cpu; 481 sched_add(ke->ke_thread); 482} 483 484static int 485kseq_find(void) 486{ 487 struct kseq *high; 488 489 if (!smp_started) 490 return (0); 491 if (kseq_idle & PCPU_GET(cpumask)) 492 return (0); 493 /* 494 * Find the cpu with the highest load and steal one proc. 495 */ 496 if ((high = kseq_load_highest()) == NULL || 497 high == KSEQ_SELF()) { 498 /* 499 * If we couldn't find one, set ourselves in the 500 * idle map. 501 */ 502 atomic_set_int(&kseq_idle, PCPU_GET(cpumask)); 503 return (0); 504 } 505 /* 506 * Remove this kse from this kseq and runq and then requeue 507 * on the current processor. We now have a load of one! 508 */ 509 kseq_move(high, PCPU_GET(cpuid)); 510 511 return (1); 512} 513 514static void 515kseq_assign(struct kseq *kseq) 516{ 517 struct kse *nke; 518 struct kse *ke; 519 520 do { 521 ke = kseq->ksq_assigned; 522 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 523 for (; ke != NULL; ke = nke) { 524 nke = ke->ke_assign; 525 ke->ke_flags &= ~KEF_ASSIGNED; 526 sched_add(ke->ke_thread); 527 } 528} 529 530static void 531kseq_notify(struct kse *ke, int cpu) 532{ 533 struct kseq *kseq; 534 struct thread *td; 535 struct pcpu *pcpu; 536 537 ke->ke_flags |= KEF_ASSIGNED; 538 539 kseq = KSEQ_CPU(cpu); 540 541 /* 542 * Place a KSE on another cpu's queue and force a resched. 543 */ 544 do { 545 ke->ke_assign = kseq->ksq_assigned; 546 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 547 pcpu = pcpu_find(cpu); 548 td = pcpu->pc_curthread; 549 if (ke->ke_thread->td_priority < td->td_priority || 550 td == pcpu->pc_idlethread) { 551 td->td_flags |= TDF_NEEDRESCHED; 552 ipi_selected(1 << cpu, IPI_AST); 553 } 554} 555 556static struct kse * 557runq_steal(struct runq *rq) 558{ 559 struct rqhead *rqh; 560 struct rqbits *rqb; 561 struct kse *ke; 562 int word; 563 int bit; 564 565 mtx_assert(&sched_lock, MA_OWNED); 566 rqb = &rq->rq_status; 567 for (word = 0; word < RQB_LEN; word++) { 568 if (rqb->rqb_bits[word] == 0) 569 continue; 570 for (bit = 0; bit < RQB_BPW; bit++) { 571 if ((rqb->rqb_bits[word] & (1 << bit)) == 0) 572 continue; 573 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 574 TAILQ_FOREACH(ke, rqh, ke_procq) { 575 if (PRI_BASE(ke->ke_ksegrp->kg_pri_class) != 576 PRI_ITHD) 577 return (ke); 578 } 579 } 580 } 581 return (NULL); 582} 583 584static struct kse * 585kseq_steal(struct kseq *kseq) 586{ 587 struct kse *ke; 588 589 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 590 return (ke); 591 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 592 return (ke); 593 return (runq_steal(&kseq->ksq_idle)); 594} 595#endif /* SMP */ 596 597/* 598 * Pick the highest priority task we have and return it. 599 */ 600 601static struct kse * 602kseq_choose(struct kseq *kseq) 603{ 604 struct kse *ke; 605 struct runq *swap; 606 607 mtx_assert(&sched_lock, MA_OWNED); 608 swap = NULL; 609 610 for (;;) { 611 ke = runq_choose(kseq->ksq_curr); 612 if (ke == NULL) { 613 /* 614 * We already swaped once and didn't get anywhere. 615 */ 616 if (swap) 617 break; 618 swap = kseq->ksq_curr; 619 kseq->ksq_curr = kseq->ksq_next; 620 kseq->ksq_next = swap; 621 continue; 622 } 623 /* 624 * If we encounter a slice of 0 the kse is in a 625 * TIMESHARE kse group and its nice was too far out 626 * of the range that receives slices. 627 */ 628 if (ke->ke_slice == 0) { 629 runq_remove(ke->ke_runq, ke); 630 sched_slice(ke); 631 ke->ke_runq = kseq->ksq_next; 632 runq_add(ke->ke_runq, ke); 633 continue; 634 } 635 return (ke); 636 } 637 638 return (runq_choose(&kseq->ksq_idle)); 639} 640 641static void 642kseq_setup(struct kseq *kseq) 643{ 644 runq_init(&kseq->ksq_timeshare[0]); 645 runq_init(&kseq->ksq_timeshare[1]); 646 runq_init(&kseq->ksq_idle); 647 648 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 649 kseq->ksq_next = &kseq->ksq_timeshare[1]; 650 651 kseq->ksq_loads[PRI_ITHD] = 0; 652 kseq->ksq_loads[PRI_REALTIME] = 0; 653 kseq->ksq_loads[PRI_TIMESHARE] = 0; 654 kseq->ksq_loads[PRI_IDLE] = 0; 655 kseq->ksq_load = 0; 656#ifdef SMP 657 kseq->ksq_rslices = 0; 658 kseq->ksq_assigned = NULL; 659#endif 660} 661 662static void 663sched_setup(void *dummy) 664{ 665#ifdef SMP 666 int i; 667#endif 668 669 slice_min = (hz/100); /* 10ms */ 670 slice_max = (hz/7); /* ~140ms */ 671 672#ifdef SMP 673 /* init kseqs */ 674 /* Create the idmap. */ 675#ifdef ULE_HTT_EXPERIMENTAL 676 if (smp_topology == NULL) { 677#else 678 if (1) { 679#endif 680 for (i = 0; i < MAXCPU; i++) { 681 kseq_setup(&kseq_cpu[i]); 682 kseq_idmap[i] = &kseq_cpu[i]; 683 kseq_cpu[i].ksq_cpus = 1; 684 } 685 } else { 686 int j; 687 688 for (i = 0; i < smp_topology->ct_count; i++) { 689 struct cpu_group *cg; 690 691 cg = &smp_topology->ct_group[i]; 692 kseq_setup(&kseq_cpu[i]); 693 694 for (j = 0; j < MAXCPU; j++) 695 if ((cg->cg_mask & (1 << j)) != 0) 696 kseq_idmap[j] = &kseq_cpu[i]; 697 kseq_cpu[i].ksq_cpus = cg->cg_count; 698 } 699 } 700 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 701 kseq_balance(NULL); 702#else 703 kseq_setup(KSEQ_SELF()); 704#endif 705 mtx_lock_spin(&sched_lock); 706 kseq_add(KSEQ_SELF(), &kse0); 707 mtx_unlock_spin(&sched_lock); 708} 709 710/* 711 * Scale the scheduling priority according to the "interactivity" of this 712 * process. 713 */ 714static void 715sched_priority(struct ksegrp *kg) 716{ 717 int pri; 718 719 if (kg->kg_pri_class != PRI_TIMESHARE) 720 return; 721 722 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 723 pri += SCHED_PRI_BASE; 724 pri += kg->kg_nice; 725 726 if (pri > PRI_MAX_TIMESHARE) 727 pri = PRI_MAX_TIMESHARE; 728 else if (pri < PRI_MIN_TIMESHARE) 729 pri = PRI_MIN_TIMESHARE; 730 731 kg->kg_user_pri = pri; 732 733 return; 734} 735 736/* 737 * Calculate a time slice based on the properties of the kseg and the runq 738 * that we're on. This is only for PRI_TIMESHARE ksegrps. 739 */ 740static void 741sched_slice(struct kse *ke) 742{ 743 struct kseq *kseq; 744 struct ksegrp *kg; 745 746 kg = ke->ke_ksegrp; 747 kseq = KSEQ_CPU(ke->ke_cpu); 748 749 /* 750 * Rationale: 751 * KSEs in interactive ksegs get the minimum slice so that we 752 * quickly notice if it abuses its advantage. 753 * 754 * KSEs in non-interactive ksegs are assigned a slice that is 755 * based on the ksegs nice value relative to the least nice kseg 756 * on the run queue for this cpu. 757 * 758 * If the KSE is less nice than all others it gets the maximum 759 * slice and other KSEs will adjust their slice relative to 760 * this when they first expire. 761 * 762 * There is 20 point window that starts relative to the least 763 * nice kse on the run queue. Slice size is determined by 764 * the kse distance from the last nice ksegrp. 765 * 766 * If the kse is outside of the window it will get no slice 767 * and will be reevaluated each time it is selected on the 768 * run queue. The exception to this is nice 0 ksegs when 769 * a nice -20 is running. They are always granted a minimum 770 * slice. 771 */ 772 if (!SCHED_INTERACTIVE(kg)) { 773 int nice; 774 775 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 776 if (kseq->ksq_loads[PRI_TIMESHARE] == 0 || 777 kg->kg_nice < kseq->ksq_nicemin) 778 ke->ke_slice = SCHED_SLICE_MAX; 779 else if (nice <= SCHED_SLICE_NTHRESH) 780 ke->ke_slice = SCHED_SLICE_NICE(nice); 781 else if (kg->kg_nice == 0) 782 ke->ke_slice = SCHED_SLICE_MIN; 783 else 784 ke->ke_slice = 0; 785 } else 786 ke->ke_slice = SCHED_SLICE_MIN; 787 788 CTR6(KTR_ULE, 789 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 790 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 791 kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg)); 792 793 return; 794} 795 796/* 797 * This routine enforces a maximum limit on the amount of scheduling history 798 * kept. It is called after either the slptime or runtime is adjusted. 799 * This routine will not operate correctly when slp or run times have been 800 * adjusted to more than double their maximum. 801 */ 802static void 803sched_interact_update(struct ksegrp *kg) 804{ 805 int sum; 806 807 sum = kg->kg_runtime + kg->kg_slptime; 808 if (sum < SCHED_SLP_RUN_MAX) 809 return; 810 /* 811 * If we have exceeded by more than 1/5th then the algorithm below 812 * will not bring us back into range. Dividing by two here forces 813 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 814 */ 815 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 816 kg->kg_runtime /= 2; 817 kg->kg_slptime /= 2; 818 return; 819 } 820 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 821 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 822} 823 824static void 825sched_interact_fork(struct ksegrp *kg) 826{ 827 int ratio; 828 int sum; 829 830 sum = kg->kg_runtime + kg->kg_slptime; 831 if (sum > SCHED_SLP_RUN_FORK) { 832 ratio = sum / SCHED_SLP_RUN_FORK; 833 kg->kg_runtime /= ratio; 834 kg->kg_slptime /= ratio; 835 } 836} 837 838static int 839sched_interact_score(struct ksegrp *kg) 840{ 841 int div; 842 843 if (kg->kg_runtime > kg->kg_slptime) { 844 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 845 return (SCHED_INTERACT_HALF + 846 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 847 } if (kg->kg_slptime > kg->kg_runtime) { 848 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 849 return (kg->kg_runtime / div); 850 } 851 852 /* 853 * This can happen if slptime and runtime are 0. 854 */ 855 return (0); 856 857} 858 859/* 860 * This is only somewhat accurate since given many processes of the same 861 * priority they will switch when their slices run out, which will be 862 * at most SCHED_SLICE_MAX. 863 */ 864int 865sched_rr_interval(void) 866{ 867 return (SCHED_SLICE_MAX); 868} 869 870static void 871sched_pctcpu_update(struct kse *ke) 872{ 873 /* 874 * Adjust counters and watermark for pctcpu calc. 875 */ 876 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 877 /* 878 * Shift the tick count out so that the divide doesn't 879 * round away our results. 880 */ 881 ke->ke_ticks <<= 10; 882 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 883 SCHED_CPU_TICKS; 884 ke->ke_ticks >>= 10; 885 } else 886 ke->ke_ticks = 0; 887 ke->ke_ltick = ticks; 888 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 889} 890 891#if 0 892/* XXX Should be changed to kseq_load_lowest() */ 893int 894sched_pickcpu(void) 895{ 896 struct kseq *kseq; 897 int load; 898 int cpu; 899 int i; 900 901 mtx_assert(&sched_lock, MA_OWNED); 902 if (!smp_started) 903 return (0); 904 905 load = 0; 906 cpu = 0; 907 908 for (i = 0; i < mp_maxid; i++) { 909 if (CPU_ABSENT(i) || (i & stopped_cpus) != 0) 910 continue; 911 kseq = KSEQ_CPU(i); 912 if (kseq->ksq_load < load) { 913 cpu = i; 914 load = kseq->ksq_load; 915 } 916 } 917 918 CTR1(KTR_ULE, "sched_pickcpu: %d", cpu); 919 return (cpu); 920} 921#endif 922 923void 924sched_prio(struct thread *td, u_char prio) 925{ 926 struct kse *ke; 927 928 ke = td->td_kse; 929 mtx_assert(&sched_lock, MA_OWNED); 930 if (TD_ON_RUNQ(td)) { 931 /* 932 * If the priority has been elevated due to priority 933 * propagation, we may have to move ourselves to a new 934 * queue. We still call adjustrunqueue below in case kse 935 * needs to fix things up. 936 */ 937 if (ke && (ke->ke_flags & KEF_ASSIGNED) == 0 && 938 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 939 runq_remove(ke->ke_runq, ke); 940 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 941 runq_add(ke->ke_runq, ke); 942 } 943 adjustrunqueue(td, prio); 944 } else 945 td->td_priority = prio; 946} 947 948void 949sched_switch(struct thread *td) 950{ 951 struct thread *newtd; 952 struct kse *ke; 953 954 mtx_assert(&sched_lock, MA_OWNED); 955 956 ke = td->td_kse; 957 958 td->td_last_kse = ke; 959 td->td_lastcpu = td->td_oncpu; 960 td->td_oncpu = NOCPU; 961 td->td_flags &= ~TDF_NEEDRESCHED; 962 963 if (TD_IS_RUNNING(td)) { 964 if (td->td_proc->p_flag & P_SA) { 965 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 966 setrunqueue(td); 967 } else { 968 /* 969 * This queue is always correct except for idle threads 970 * which have a higher priority due to priority 971 * propagation. 972 */ 973 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) { 974 if (td->td_priority < PRI_MIN_IDLE) 975 ke->ke_runq = KSEQ_SELF()->ksq_curr; 976 else 977 ke->ke_runq = &KSEQ_SELF()->ksq_idle; 978 } 979 runq_add(ke->ke_runq, ke); 980 /* setrunqueue(td); */ 981 } 982 } else { 983 if (ke->ke_runq) 984 kseq_rem(KSEQ_CPU(ke->ke_cpu), ke); 985 /* 986 * We will not be on the run queue. So we must be 987 * sleeping or similar. 988 */ 989 if (td->td_proc->p_flag & P_SA) 990 kse_reassign(ke); 991 } 992 newtd = choosethread(); 993 if (td != newtd) 994 cpu_switch(td, newtd); 995 sched_lock.mtx_lock = (uintptr_t)td; 996 997 td->td_oncpu = PCPU_GET(cpuid); 998} 999 1000void 1001sched_nice(struct ksegrp *kg, int nice) 1002{ 1003 struct kse *ke; 1004 struct thread *td; 1005 struct kseq *kseq; 1006 1007 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1008 mtx_assert(&sched_lock, MA_OWNED); 1009 /* 1010 * We need to adjust the nice counts for running KSEs. 1011 */ 1012 if (kg->kg_pri_class == PRI_TIMESHARE) 1013 FOREACH_KSE_IN_GROUP(kg, ke) { 1014 if (ke->ke_runq == NULL) 1015 continue; 1016 kseq = KSEQ_CPU(ke->ke_cpu); 1017 kseq_nice_rem(kseq, kg->kg_nice); 1018 kseq_nice_add(kseq, nice); 1019 } 1020 kg->kg_nice = nice; 1021 sched_priority(kg); 1022 FOREACH_THREAD_IN_GROUP(kg, td) 1023 td->td_flags |= TDF_NEEDRESCHED; 1024} 1025 1026void 1027sched_sleep(struct thread *td, u_char prio) 1028{ 1029 mtx_assert(&sched_lock, MA_OWNED); 1030 1031 td->td_slptime = ticks; 1032 td->td_priority = prio; 1033 1034 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1035 td->td_kse, td->td_slptime); 1036} 1037 1038void 1039sched_wakeup(struct thread *td) 1040{ 1041 mtx_assert(&sched_lock, MA_OWNED); 1042 1043 /* 1044 * Let the kseg know how long we slept for. This is because process 1045 * interactivity behavior is modeled in the kseg. 1046 */ 1047 if (td->td_slptime) { 1048 struct ksegrp *kg; 1049 int hzticks; 1050 1051 kg = td->td_ksegrp; 1052 hzticks = (ticks - td->td_slptime) << 10; 1053 if (hzticks >= SCHED_SLP_RUN_MAX) { 1054 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1055 kg->kg_runtime = 1; 1056 } else { 1057 kg->kg_slptime += hzticks; 1058 sched_interact_update(kg); 1059 } 1060 sched_priority(kg); 1061 if (td->td_kse) 1062 sched_slice(td->td_kse); 1063 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1064 td->td_kse, hzticks); 1065 td->td_slptime = 0; 1066 } 1067 setrunqueue(td); 1068} 1069 1070/* 1071 * Penalize the parent for creating a new child and initialize the child's 1072 * priority. 1073 */ 1074void 1075sched_fork(struct proc *p, struct proc *p1) 1076{ 1077 1078 mtx_assert(&sched_lock, MA_OWNED); 1079 1080 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1081 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1082 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1083} 1084 1085void 1086sched_fork_kse(struct kse *ke, struct kse *child) 1087{ 1088 1089 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1090 child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */ 1091 child->ke_runq = NULL; 1092 1093 /* Grab our parents cpu estimation information. */ 1094 child->ke_ticks = ke->ke_ticks; 1095 child->ke_ltick = ke->ke_ltick; 1096 child->ke_ftick = ke->ke_ftick; 1097} 1098 1099void 1100sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1101{ 1102 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1103 1104 child->kg_slptime = kg->kg_slptime; 1105 child->kg_runtime = kg->kg_runtime; 1106 child->kg_user_pri = kg->kg_user_pri; 1107 child->kg_nice = kg->kg_nice; 1108 sched_interact_fork(child); 1109 kg->kg_runtime += tickincr << 10; 1110 sched_interact_update(kg); 1111 1112 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1113 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1114 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1115} 1116 1117void 1118sched_fork_thread(struct thread *td, struct thread *child) 1119{ 1120} 1121 1122void 1123sched_class(struct ksegrp *kg, int class) 1124{ 1125 struct kseq *kseq; 1126 struct kse *ke; 1127 1128 mtx_assert(&sched_lock, MA_OWNED); 1129 if (kg->kg_pri_class == class) 1130 return; 1131 1132 FOREACH_KSE_IN_GROUP(kg, ke) { 1133 if (ke->ke_state != KES_ONRUNQ && 1134 ke->ke_state != KES_THREAD) 1135 continue; 1136 kseq = KSEQ_CPU(ke->ke_cpu); 1137 1138 kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--; 1139 kseq->ksq_loads[PRI_BASE(class)]++; 1140 1141 if (kg->kg_pri_class == PRI_TIMESHARE) 1142 kseq_nice_rem(kseq, kg->kg_nice); 1143 else if (class == PRI_TIMESHARE) 1144 kseq_nice_add(kseq, kg->kg_nice); 1145 } 1146 1147 kg->kg_pri_class = class; 1148} 1149 1150/* 1151 * Return some of the child's priority and interactivity to the parent. 1152 */ 1153void 1154sched_exit(struct proc *p, struct proc *child) 1155{ 1156 mtx_assert(&sched_lock, MA_OWNED); 1157 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1158 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1159} 1160 1161void 1162sched_exit_kse(struct kse *ke, struct kse *child) 1163{ 1164 kseq_rem(KSEQ_CPU(child->ke_cpu), child); 1165} 1166 1167void 1168sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1169{ 1170 /* kg->kg_slptime += child->kg_slptime; */ 1171 kg->kg_runtime += child->kg_runtime; 1172 sched_interact_update(kg); 1173} 1174 1175void 1176sched_exit_thread(struct thread *td, struct thread *child) 1177{ 1178} 1179 1180void 1181sched_clock(struct thread *td) 1182{ 1183 struct kseq *kseq; 1184 struct ksegrp *kg; 1185 struct kse *ke; 1186 1187 /* 1188 * sched_setup() apparently happens prior to stathz being set. We 1189 * need to resolve the timers earlier in the boot so we can avoid 1190 * calculating this here. 1191 */ 1192 if (realstathz == 0) { 1193 realstathz = stathz ? stathz : hz; 1194 tickincr = hz / realstathz; 1195 /* 1196 * XXX This does not work for values of stathz that are much 1197 * larger than hz. 1198 */ 1199 if (tickincr == 0) 1200 tickincr = 1; 1201 } 1202 1203 ke = td->td_kse; 1204 kg = ke->ke_ksegrp; 1205 1206 mtx_assert(&sched_lock, MA_OWNED); 1207 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1208 1209 /* Adjust ticks for pctcpu */ 1210 ke->ke_ticks++; 1211 ke->ke_ltick = ticks; 1212 1213 /* Go up to one second beyond our max and then trim back down */ 1214 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1215 sched_pctcpu_update(ke); 1216 1217 if (td->td_flags & TDF_IDLETD) 1218 return; 1219 1220 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1221 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1222 /* 1223 * We only do slicing code for TIMESHARE ksegrps. 1224 */ 1225 if (kg->kg_pri_class != PRI_TIMESHARE) 1226 return; 1227 /* 1228 * We used a tick charge it to the ksegrp so that we can compute our 1229 * interactivity. 1230 */ 1231 kg->kg_runtime += tickincr << 10; 1232 sched_interact_update(kg); 1233 1234 /* 1235 * We used up one time slice. 1236 */ 1237 ke->ke_slice--; 1238 kseq = KSEQ_SELF(); 1239#ifdef SMP 1240 kseq->ksq_rslices--; 1241#endif 1242 1243 if (ke->ke_slice > 0) 1244 return; 1245 /* 1246 * We're out of time, recompute priorities and requeue. 1247 */ 1248 kseq_rem(kseq, ke); 1249 sched_priority(kg); 1250 sched_slice(ke); 1251 if (SCHED_CURR(kg, ke)) 1252 ke->ke_runq = kseq->ksq_curr; 1253 else 1254 ke->ke_runq = kseq->ksq_next; 1255 kseq_add(kseq, ke); 1256 td->td_flags |= TDF_NEEDRESCHED; 1257} 1258 1259int 1260sched_runnable(void) 1261{ 1262 struct kseq *kseq; 1263 int load; 1264 1265 load = 1; 1266 1267 mtx_lock_spin(&sched_lock); 1268 kseq = KSEQ_SELF(); 1269#ifdef SMP 1270 if (kseq->ksq_assigned) 1271 kseq_assign(kseq); 1272#endif 1273 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1274 if (kseq->ksq_load > 0) 1275 goto out; 1276 } else 1277 if (kseq->ksq_load - 1 > 0) 1278 goto out; 1279 load = 0; 1280out: 1281 mtx_unlock_spin(&sched_lock); 1282 return (load); 1283} 1284 1285void 1286sched_userret(struct thread *td) 1287{ 1288 struct ksegrp *kg; 1289 1290 kg = td->td_ksegrp; 1291 1292 if (td->td_priority != kg->kg_user_pri) { 1293 mtx_lock_spin(&sched_lock); 1294 td->td_priority = kg->kg_user_pri; 1295 mtx_unlock_spin(&sched_lock); 1296 } 1297} 1298 1299struct kse * 1300sched_choose(void) 1301{ 1302 struct kseq *kseq; 1303 struct kse *ke; 1304 1305 mtx_assert(&sched_lock, MA_OWNED); 1306 kseq = KSEQ_SELF(); 1307#ifdef SMP 1308retry: 1309 if (kseq->ksq_assigned) 1310 kseq_assign(kseq); 1311#endif 1312 ke = kseq_choose(kseq); 1313 if (ke) { 1314#ifdef SMP 1315 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1316 if (kseq_find()) 1317 goto retry; 1318#endif 1319 runq_remove(ke->ke_runq, ke); 1320 ke->ke_state = KES_THREAD; 1321 1322 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1323 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1324 ke, ke->ke_runq, ke->ke_slice, 1325 ke->ke_thread->td_priority); 1326 } 1327 return (ke); 1328 } 1329#ifdef SMP 1330 if (kseq_find()) 1331 goto retry; 1332#endif 1333 1334 return (NULL); 1335} 1336 1337void 1338sched_add(struct thread *td) 1339{ 1340 struct kseq *kseq; 1341 struct ksegrp *kg; 1342 struct kse *ke; 1343 int class; 1344 1345 mtx_assert(&sched_lock, MA_OWNED); 1346 ke = td->td_kse; 1347 kg = td->td_ksegrp; 1348 if (ke->ke_flags & KEF_ASSIGNED) 1349 return; 1350 kseq = KSEQ_SELF(); 1351 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1352 KASSERT((ke->ke_thread->td_kse != NULL), 1353 ("sched_add: No KSE on thread")); 1354 KASSERT(ke->ke_state != KES_ONRUNQ, 1355 ("sched_add: kse %p (%s) already in run queue", ke, 1356 ke->ke_proc->p_comm)); 1357 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1358 ("sched_add: process swapped out")); 1359 KASSERT(ke->ke_runq == NULL, 1360 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1361 1362 class = PRI_BASE(kg->kg_pri_class); 1363 switch (class) { 1364 case PRI_ITHD: 1365 case PRI_REALTIME: 1366 ke->ke_runq = kseq->ksq_curr; 1367 ke->ke_slice = SCHED_SLICE_MAX; 1368 ke->ke_cpu = PCPU_GET(cpuid); 1369 break; 1370 case PRI_TIMESHARE: 1371#ifdef SMP 1372 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1373 kseq_notify(ke, ke->ke_cpu); 1374 return; 1375 } 1376#endif 1377 if (SCHED_CURR(kg, ke)) 1378 ke->ke_runq = kseq->ksq_curr; 1379 else 1380 ke->ke_runq = kseq->ksq_next; 1381 break; 1382 case PRI_IDLE: 1383#ifdef SMP 1384 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1385 kseq_notify(ke, ke->ke_cpu); 1386 return; 1387 } 1388#endif 1389 /* 1390 * This is for priority prop. 1391 */ 1392 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1393 ke->ke_runq = kseq->ksq_curr; 1394 else 1395 ke->ke_runq = &kseq->ksq_idle; 1396 ke->ke_slice = SCHED_SLICE_MIN; 1397 break; 1398 default: 1399 panic("Unknown pri class."); 1400 break; 1401 } 1402#ifdef SMP 1403 /* 1404 * If there are any idle processors, give them our extra load. 1405 */ 1406 if (kseq_idle && class != PRI_ITHD && 1407 (kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] + 1408 kseq->ksq_loads[PRI_REALTIME]) >= kseq->ksq_cpus) { 1409 int cpu; 1410 1411 /* 1412 * Multiple cpus could find this bit simultaneously but the 1413 * race shouldn't be terrible. 1414 */ 1415 cpu = ffs(kseq_idle); 1416 if (cpu) { 1417 cpu--; 1418 atomic_clear_int(&kseq_idle, 1 << cpu); 1419 ke->ke_cpu = cpu; 1420 ke->ke_runq = NULL; 1421 kseq_notify(ke, cpu); 1422 return; 1423 } 1424 } 1425 if (class == PRI_TIMESHARE || class == PRI_REALTIME) 1426 atomic_clear_int(&kseq_idle, PCPU_GET(cpumask)); 1427#endif 1428 if (td->td_priority < curthread->td_priority) 1429 curthread->td_flags |= TDF_NEEDRESCHED; 1430 1431 ke->ke_ksegrp->kg_runq_kses++; 1432 ke->ke_state = KES_ONRUNQ; 1433 1434 runq_add(ke->ke_runq, ke); 1435 kseq_add(kseq, ke); 1436} 1437 1438void 1439sched_rem(struct thread *td) 1440{ 1441 struct kseq *kseq; 1442 struct kse *ke; 1443 1444 ke = td->td_kse; 1445 /* 1446 * It is safe to just return here because sched_rem() is only ever 1447 * used in places where we're immediately going to add the 1448 * kse back on again. In that case it'll be added with the correct 1449 * thread and priority when the caller drops the sched_lock. 1450 */ 1451 if (ke->ke_flags & KEF_ASSIGNED) 1452 return; 1453 mtx_assert(&sched_lock, MA_OWNED); 1454 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1455 1456 ke->ke_state = KES_THREAD; 1457 ke->ke_ksegrp->kg_runq_kses--; 1458 kseq = KSEQ_CPU(ke->ke_cpu); 1459 runq_remove(ke->ke_runq, ke); 1460 kseq_rem(kseq, ke); 1461} 1462 1463fixpt_t 1464sched_pctcpu(struct thread *td) 1465{ 1466 fixpt_t pctcpu; 1467 struct kse *ke; 1468 1469 pctcpu = 0; 1470 ke = td->td_kse; 1471 if (ke == NULL) 1472 return (0); 1473 1474 mtx_lock_spin(&sched_lock); 1475 if (ke->ke_ticks) { 1476 int rtick; 1477 1478 /* 1479 * Don't update more frequently than twice a second. Allowing 1480 * this causes the cpu usage to decay away too quickly due to 1481 * rounding errors. 1482 */ 1483 if (ke->ke_ltick < (ticks - (hz / 2))) 1484 sched_pctcpu_update(ke); 1485 /* How many rtick per second ? */ 1486 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1487 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1488 } 1489 1490 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1491 mtx_unlock_spin(&sched_lock); 1492 1493 return (pctcpu); 1494} 1495 1496int 1497sched_sizeof_kse(void) 1498{ 1499 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1500} 1501 1502int 1503sched_sizeof_ksegrp(void) 1504{ 1505 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1506} 1507 1508int 1509sched_sizeof_proc(void) 1510{ 1511 return (sizeof(struct proc)); 1512} 1513 1514int 1515sched_sizeof_thread(void) 1516{ 1517 return (sizeof(struct thread) + sizeof(struct td_sched)); 1518} 1519