sched_ule.c revision 123684
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 123684 2003-12-20 12:54:35Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/resourcevar.h> 39#include <sys/sched.h> 40#include <sys/smp.h> 41#include <sys/sx.h> 42#include <sys/sysctl.h> 43#include <sys/sysproto.h> 44#include <sys/vmmeter.h> 45#ifdef DDB 46#include <ddb/ddb.h> 47#endif 48#ifdef KTRACE 49#include <sys/uio.h> 50#include <sys/ktrace.h> 51#endif 52 53#include <machine/cpu.h> 54#include <machine/smp.h> 55 56#define KTR_ULE KTR_NFS 57 58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59/* XXX This is bogus compatability crap for ps */ 60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62 63static void sched_setup(void *dummy); 64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65 66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 67 68static int sched_strict; 69SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 70 71static int slice_min = 1; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73 74static int slice_max = 10; 75SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76 77int realstathz; 78int tickincr = 1; 79 80#ifdef SMP 81/* Callouts to handle load balancing SMP systems. */ 82static struct callout kseq_lb_callout; 83static struct callout kseq_group_callout; 84#endif 85 86/* 87 * These datastructures are allocated within their parent datastructure but 88 * are scheduler specific. 89 */ 90 91struct ke_sched { 92 int ske_slice; 93 struct runq *ske_runq; 94 /* The following variables are only used for pctcpu calculation */ 95 int ske_ltick; /* Last tick that we were running on */ 96 int ske_ftick; /* First tick that we were running on */ 97 int ske_ticks; /* Tick count */ 98 /* CPU that we have affinity for. */ 99 u_char ske_cpu; 100}; 101#define ke_slice ke_sched->ske_slice 102#define ke_runq ke_sched->ske_runq 103#define ke_ltick ke_sched->ske_ltick 104#define ke_ftick ke_sched->ske_ftick 105#define ke_ticks ke_sched->ske_ticks 106#define ke_cpu ke_sched->ske_cpu 107#define ke_assign ke_procq.tqe_next 108 109#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 110#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 111 112struct kg_sched { 113 int skg_slptime; /* Number of ticks we vol. slept */ 114 int skg_runtime; /* Number of ticks we were running */ 115}; 116#define kg_slptime kg_sched->skg_slptime 117#define kg_runtime kg_sched->skg_runtime 118 119struct td_sched { 120 int std_slptime; 121}; 122#define td_slptime td_sched->std_slptime 123 124struct td_sched td_sched; 125struct ke_sched ke_sched; 126struct kg_sched kg_sched; 127 128struct ke_sched *kse0_sched = &ke_sched; 129struct kg_sched *ksegrp0_sched = &kg_sched; 130struct p_sched *proc0_sched = NULL; 131struct td_sched *thread0_sched = &td_sched; 132 133/* 134 * The priority is primarily determined by the interactivity score. Thus, we 135 * give lower(better) priorities to kse groups that use less CPU. The nice 136 * value is then directly added to this to allow nice to have some effect 137 * on latency. 138 * 139 * PRI_RANGE: Total priority range for timeshare threads. 140 * PRI_NRESV: Number of nice values. 141 * PRI_BASE: The start of the dynamic range. 142 */ 143#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 144#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 145#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 146#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 147#define SCHED_PRI_INTERACT(score) \ 148 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 149 150/* 151 * These determine the interactivity of a process. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * These parameters and macros determine the size of the time slice that is 167 * granted to each thread. 168 * 169 * SLICE_MIN: Minimum time slice granted, in units of ticks. 170 * SLICE_MAX: Maximum time slice granted. 171 * SLICE_RANGE: Range of available time slices scaled by hz. 172 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 173 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 174 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 175 */ 176#define SCHED_SLICE_MIN (slice_min) 177#define SCHED_SLICE_MAX (slice_max) 178#define SCHED_SLICE_INTERACTIVE (slice_min * 4) 179#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 180#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 181#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 182#define SCHED_SLICE_NICE(nice) \ 183 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 184 185/* 186 * This macro determines whether or not the kse belongs on the current or 187 * next run queue. 188 */ 189#define SCHED_INTERACTIVE(kg) \ 190 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 191#define SCHED_CURR(kg, ke) \ 192 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 193 SCHED_INTERACTIVE(kg)) 194 195/* 196 * Cpu percentage computation macros and defines. 197 * 198 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 199 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 200 */ 201 202#define SCHED_CPU_TIME 10 203#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 204 205/* 206 * kseq - per processor runqs and statistics. 207 */ 208struct kseq { 209 struct runq ksq_idle; /* Queue of IDLE threads. */ 210 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 211 struct runq *ksq_next; /* Next timeshare queue. */ 212 struct runq *ksq_curr; /* Current queue. */ 213 int ksq_load_timeshare; /* Load for timeshare. */ 214 int ksq_load; /* Aggregate load. */ 215 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 216 short ksq_nicemin; /* Least nice. */ 217#ifdef SMP 218 int ksq_transferable; 219 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 220 struct kseq_group *ksq_group; /* Our processor group. */ 221 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 222#endif 223}; 224 225#ifdef SMP 226/* 227 * kseq groups are groups of processors which can cheaply share threads. When 228 * one processor in the group goes idle it will check the runqs of the other 229 * processors in its group prior to halting and waiting for an interrupt. 230 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 231 * In a numa environment we'd want an idle bitmap per group and a two tiered 232 * load balancer. 233 */ 234struct kseq_group { 235 int ksg_cpus; /* Count of CPUs in this kseq group. */ 236 int ksg_cpumask; /* Mask of cpus in this group. */ 237 int ksg_idlemask; /* Idle cpus in this group. */ 238 int ksg_mask; /* Bit mask for first cpu. */ 239 int ksg_load; /* Total load of this group. */ 240 int ksg_transferable; /* Transferable load of this group. */ 241 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 242}; 243#endif 244 245/* 246 * One kse queue per processor. 247 */ 248#ifdef SMP 249static int kseq_idle; 250static int ksg_maxid; 251static struct kseq kseq_cpu[MAXCPU]; 252static struct kseq_group kseq_groups[MAXCPU]; 253#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 254#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 255#define KSEQ_ID(x) ((x) - kseq_cpu) 256#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 257#else /* !SMP */ 258static struct kseq kseq_cpu; 259#define KSEQ_SELF() (&kseq_cpu) 260#define KSEQ_CPU(x) (&kseq_cpu) 261#endif 262 263static void sched_slice(struct kse *ke); 264static void sched_priority(struct ksegrp *kg); 265static int sched_interact_score(struct ksegrp *kg); 266static void sched_interact_update(struct ksegrp *kg); 267static void sched_interact_fork(struct ksegrp *kg); 268static void sched_pctcpu_update(struct kse *ke); 269 270/* Operations on per processor queues */ 271static struct kse * kseq_choose(struct kseq *kseq); 272static void kseq_setup(struct kseq *kseq); 273static void kseq_load_add(struct kseq *kseq, struct kse *ke); 274static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 275static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 276static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 277static void kseq_nice_add(struct kseq *kseq, int nice); 278static void kseq_nice_rem(struct kseq *kseq, int nice); 279void kseq_print(int cpu); 280#ifdef SMP 281static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 282static struct kse *runq_steal(struct runq *rq); 283static void sched_balance(void *arg); 284static void sched_balance_group(struct kseq_group *ksg); 285static void sched_balance_pair(struct kseq *high, struct kseq *low); 286static void kseq_move(struct kseq *from, int cpu); 287static int kseq_idled(struct kseq *kseq); 288static void kseq_notify(struct kse *ke, int cpu); 289static void kseq_assign(struct kseq *); 290static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 291#define KSE_CAN_MIGRATE(ke, class) \ 292 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 293 ((ke)->ke_flags & KEF_BOUND) == 0) 294#endif 295 296void 297kseq_print(int cpu) 298{ 299 struct kseq *kseq; 300 int i; 301 302 kseq = KSEQ_CPU(cpu); 303 304 printf("kseq:\n"); 305 printf("\tload: %d\n", kseq->ksq_load); 306 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 307#ifdef SMP 308 printf("\tload transferable: %d\n", kseq->ksq_transferable); 309#endif 310 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 311 printf("\tnice counts:\n"); 312 for (i = 0; i < SCHED_PRI_NRESV; i++) 313 if (kseq->ksq_nice[i]) 314 printf("\t\t%d = %d\n", 315 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 316} 317 318static __inline void 319kseq_runq_add(struct kseq *kseq, struct kse *ke) 320{ 321#ifdef SMP 322 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 323 kseq->ksq_transferable++; 324 kseq->ksq_group->ksg_transferable++; 325 } 326#endif 327 runq_add(ke->ke_runq, ke); 328} 329 330static __inline void 331kseq_runq_rem(struct kseq *kseq, struct kse *ke) 332{ 333#ifdef SMP 334 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 335 kseq->ksq_transferable--; 336 kseq->ksq_group->ksg_transferable--; 337 } 338#endif 339 runq_remove(ke->ke_runq, ke); 340} 341 342static void 343kseq_load_add(struct kseq *kseq, struct kse *ke) 344{ 345 int class; 346 mtx_assert(&sched_lock, MA_OWNED); 347 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 348 if (class == PRI_TIMESHARE) 349 kseq->ksq_load_timeshare++; 350 kseq->ksq_load++; 351#ifdef SMP 352 if (class != PRI_ITHD) 353 kseq->ksq_group->ksg_load++; 354#endif 355 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 356 CTR6(KTR_ULE, 357 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 358 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 359 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 360 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 361 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 362} 363 364static void 365kseq_load_rem(struct kseq *kseq, struct kse *ke) 366{ 367 int class; 368 mtx_assert(&sched_lock, MA_OWNED); 369 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 370 if (class == PRI_TIMESHARE) 371 kseq->ksq_load_timeshare--; 372#ifdef SMP 373 if (class != PRI_ITHD) 374 kseq->ksq_group->ksg_load--; 375#endif 376 kseq->ksq_load--; 377 ke->ke_runq = NULL; 378 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 379 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 380} 381 382static void 383kseq_nice_add(struct kseq *kseq, int nice) 384{ 385 mtx_assert(&sched_lock, MA_OWNED); 386 /* Normalize to zero. */ 387 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 388 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 389 kseq->ksq_nicemin = nice; 390} 391 392static void 393kseq_nice_rem(struct kseq *kseq, int nice) 394{ 395 int n; 396 397 mtx_assert(&sched_lock, MA_OWNED); 398 /* Normalize to zero. */ 399 n = nice + SCHED_PRI_NHALF; 400 kseq->ksq_nice[n]--; 401 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 402 403 /* 404 * If this wasn't the smallest nice value or there are more in 405 * this bucket we can just return. Otherwise we have to recalculate 406 * the smallest nice. 407 */ 408 if (nice != kseq->ksq_nicemin || 409 kseq->ksq_nice[n] != 0 || 410 kseq->ksq_load_timeshare == 0) 411 return; 412 413 for (; n < SCHED_PRI_NRESV; n++) 414 if (kseq->ksq_nice[n]) { 415 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 416 return; 417 } 418} 419 420#ifdef SMP 421/* 422 * sched_balance is a simple CPU load balancing algorithm. It operates by 423 * finding the least loaded and most loaded cpu and equalizing their load 424 * by migrating some processes. 425 * 426 * Dealing only with two CPUs at a time has two advantages. Firstly, most 427 * installations will only have 2 cpus. Secondly, load balancing too much at 428 * once can have an unpleasant effect on the system. The scheduler rarely has 429 * enough information to make perfect decisions. So this algorithm chooses 430 * algorithm simplicity and more gradual effects on load in larger systems. 431 * 432 * It could be improved by considering the priorities and slices assigned to 433 * each task prior to balancing them. There are many pathological cases with 434 * any approach and so the semi random algorithm below may work as well as any. 435 * 436 */ 437static void 438sched_balance(void *arg) 439{ 440 struct kseq_group *high; 441 struct kseq_group *low; 442 struct kseq_group *ksg; 443 int timo; 444 int cnt; 445 int i; 446 447 mtx_lock_spin(&sched_lock); 448 if (smp_started == 0) 449 goto out; 450 low = high = NULL; 451 i = random() % (ksg_maxid + 1); 452 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 453 ksg = KSEQ_GROUP(i); 454 /* 455 * Find the CPU with the highest load that has some 456 * threads to transfer. 457 */ 458 if ((high == NULL || ksg->ksg_load > high->ksg_load) 459 && ksg->ksg_transferable) 460 high = ksg; 461 if (low == NULL || ksg->ksg_load < low->ksg_load) 462 low = ksg; 463 if (++i > ksg_maxid) 464 i = 0; 465 } 466 if (low != NULL && high != NULL && high != low) 467 sched_balance_pair(LIST_FIRST(&high->ksg_members), 468 LIST_FIRST(&low->ksg_members)); 469out: 470 mtx_unlock_spin(&sched_lock); 471 timo = random() % (hz * 2); 472 callout_reset(&kseq_lb_callout, timo, sched_balance, NULL); 473} 474 475static void 476sched_balance_groups(void *arg) 477{ 478 int timo; 479 int i; 480 481 mtx_lock_spin(&sched_lock); 482 if (smp_started) 483 for (i = 0; i <= ksg_maxid; i++) 484 sched_balance_group(KSEQ_GROUP(i)); 485 mtx_unlock_spin(&sched_lock); 486 timo = random() % (hz * 2); 487 callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL); 488} 489 490static void 491sched_balance_group(struct kseq_group *ksg) 492{ 493 struct kseq *kseq; 494 struct kseq *high; 495 struct kseq *low; 496 int load; 497 498 if (ksg->ksg_transferable == 0) 499 return; 500 low = NULL; 501 high = NULL; 502 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 503 load = kseq->ksq_load; 504 if (kseq == KSEQ_CPU(0)) 505 load--; 506 if (high == NULL || load > high->ksq_load) 507 high = kseq; 508 if (low == NULL || load < low->ksq_load) 509 low = kseq; 510 } 511 if (high != NULL && low != NULL && high != low) 512 sched_balance_pair(high, low); 513} 514 515static void 516sched_balance_pair(struct kseq *high, struct kseq *low) 517{ 518 int transferable; 519 int high_load; 520 int low_load; 521 int move; 522 int diff; 523 int i; 524 525 /* 526 * If we're transfering within a group we have to use this specific 527 * kseq's transferable count, otherwise we can steal from other members 528 * of the group. 529 */ 530 if (high->ksq_group == low->ksq_group) { 531 transferable = high->ksq_transferable; 532 high_load = high->ksq_load; 533 low_load = low->ksq_load; 534 /* 535 * XXX If we encounter cpu 0 we must remember to reduce it's 536 * load by 1 to reflect the swi that is running the callout. 537 * At some point we should really fix load balancing of the 538 * swi and then this wont matter. 539 */ 540 if (high == KSEQ_CPU(0)) 541 high_load--; 542 if (low == KSEQ_CPU(0)) 543 low_load--; 544 } else { 545 transferable = high->ksq_group->ksg_transferable; 546 high_load = high->ksq_group->ksg_load; 547 low_load = low->ksq_group->ksg_load; 548 } 549 if (transferable == 0) 550 return; 551 /* 552 * Determine what the imbalance is and then adjust that to how many 553 * kses we actually have to give up (transferable). 554 */ 555 diff = high_load - low_load; 556 move = diff / 2; 557 if (diff & 0x1) 558 move++; 559 move = min(move, transferable); 560 for (i = 0; i < move; i++) 561 kseq_move(high, KSEQ_ID(low)); 562 return; 563} 564 565static void 566kseq_move(struct kseq *from, int cpu) 567{ 568 struct kseq *kseq; 569 struct kseq *to; 570 struct kse *ke; 571 572 kseq = from; 573 to = KSEQ_CPU(cpu); 574 ke = kseq_steal(kseq, 1); 575 if (ke == NULL) { 576 struct kseq_group *ksg; 577 578 ksg = kseq->ksq_group; 579 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 580 if (kseq == from || kseq->ksq_transferable == 0) 581 continue; 582 ke = kseq_steal(kseq, 1); 583 break; 584 } 585 if (ke == NULL) 586 panic("kseq_move: No KSEs available with a " 587 "transferable count of %d\n", 588 ksg->ksg_transferable); 589 } 590 if (kseq == to) 591 return; 592 ke->ke_state = KES_THREAD; 593 kseq_runq_rem(kseq, ke); 594 kseq_load_rem(kseq, ke); 595 kseq_notify(ke, cpu); 596} 597 598static int 599kseq_idled(struct kseq *kseq) 600{ 601 struct kseq_group *ksg; 602 struct kseq *steal; 603 struct kse *ke; 604 605 ksg = kseq->ksq_group; 606 /* 607 * If we're in a cpu group, try and steal kses from another cpu in 608 * the group before idling. 609 */ 610 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 611 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 612 if (steal == kseq || steal->ksq_transferable == 0) 613 continue; 614 ke = kseq_steal(steal, 0); 615 if (ke == NULL) 616 continue; 617 ke->ke_state = KES_THREAD; 618 kseq_runq_rem(steal, ke); 619 kseq_load_rem(steal, ke); 620 ke->ke_cpu = PCPU_GET(cpuid); 621 sched_add(ke->ke_thread); 622 return (0); 623 } 624 } 625 /* 626 * We only set the idled bit when all of the cpus in the group are 627 * idle. Otherwise we could get into a situation where a KSE bounces 628 * back and forth between two idle cores on seperate physical CPUs. 629 */ 630 ksg->ksg_idlemask |= PCPU_GET(cpumask); 631 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 632 return (1); 633 atomic_set_int(&kseq_idle, ksg->ksg_mask); 634 return (1); 635} 636 637static void 638kseq_assign(struct kseq *kseq) 639{ 640 struct kse *nke; 641 struct kse *ke; 642 643 do { 644 (volatile struct kse *)ke = kseq->ksq_assigned; 645 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 646 for (; ke != NULL; ke = nke) { 647 nke = ke->ke_assign; 648 ke->ke_flags &= ~KEF_ASSIGNED; 649 sched_add(ke->ke_thread); 650 } 651} 652 653static void 654kseq_notify(struct kse *ke, int cpu) 655{ 656 struct kseq *kseq; 657 struct thread *td; 658 struct pcpu *pcpu; 659 660 ke->ke_cpu = cpu; 661 ke->ke_flags |= KEF_ASSIGNED; 662 663 kseq = KSEQ_CPU(cpu); 664 665 /* 666 * Place a KSE on another cpu's queue and force a resched. 667 */ 668 do { 669 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned; 670 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 671 pcpu = pcpu_find(cpu); 672 td = pcpu->pc_curthread; 673 if (ke->ke_thread->td_priority < td->td_priority || 674 td == pcpu->pc_idlethread) { 675 td->td_flags |= TDF_NEEDRESCHED; 676 ipi_selected(1 << cpu, IPI_AST); 677 } 678} 679 680static struct kse * 681runq_steal(struct runq *rq) 682{ 683 struct rqhead *rqh; 684 struct rqbits *rqb; 685 struct kse *ke; 686 int word; 687 int bit; 688 689 mtx_assert(&sched_lock, MA_OWNED); 690 rqb = &rq->rq_status; 691 for (word = 0; word < RQB_LEN; word++) { 692 if (rqb->rqb_bits[word] == 0) 693 continue; 694 for (bit = 0; bit < RQB_BPW; bit++) { 695 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 696 continue; 697 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 698 TAILQ_FOREACH(ke, rqh, ke_procq) { 699 if (KSE_CAN_MIGRATE(ke, 700 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 701 return (ke); 702 } 703 } 704 } 705 return (NULL); 706} 707 708static struct kse * 709kseq_steal(struct kseq *kseq, int stealidle) 710{ 711 struct kse *ke; 712 713 /* 714 * Steal from next first to try to get a non-interactive task that 715 * may not have run for a while. 716 */ 717 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 718 return (ke); 719 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 720 return (ke); 721 if (stealidle) 722 return (runq_steal(&kseq->ksq_idle)); 723 return (NULL); 724} 725 726int 727kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 728{ 729 struct kseq_group *ksg; 730 int cpu; 731 732 cpu = 0; 733 ksg = kseq->ksq_group; 734 735 /* 736 * XXX This ksg_transferable might work better if we were checking 737 * against a global group load. As it is now, this prevents us from 738 * transfering a thread from a group that is potentially bogged down 739 * with non transferable load. 740 */ 741 if (ksg->ksg_transferable > ksg->ksg_cpus && kseq_idle) { 742 /* 743 * Multiple cpus could find this bit simultaneously 744 * but the race shouldn't be terrible. 745 */ 746 cpu = ffs(kseq_idle); 747 if (cpu) 748 atomic_clear_int(&kseq_idle, 1 << (cpu - 1)); 749 } 750 /* 751 * If another cpu in this group has idled, assign a thread over 752 * to them after checking to see if there are idled groups. 753 */ 754 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) { 755 cpu = ffs(ksg->ksg_idlemask); 756 if (cpu) 757 ksg->ksg_idlemask &= ~(1 << (cpu - 1)); 758 } 759 /* 760 * Now that we've found an idle CPU, migrate the thread. 761 */ 762 if (cpu) { 763 cpu--; 764 ke->ke_runq = NULL; 765 kseq_notify(ke, cpu); 766 return (1); 767 } 768 return (0); 769} 770 771#endif /* SMP */ 772 773/* 774 * Pick the highest priority task we have and return it. 775 */ 776 777static struct kse * 778kseq_choose(struct kseq *kseq) 779{ 780 struct kse *ke; 781 struct runq *swap; 782 783 mtx_assert(&sched_lock, MA_OWNED); 784 swap = NULL; 785 786 for (;;) { 787 ke = runq_choose(kseq->ksq_curr); 788 if (ke == NULL) { 789 /* 790 * We already swaped once and didn't get anywhere. 791 */ 792 if (swap) 793 break; 794 swap = kseq->ksq_curr; 795 kseq->ksq_curr = kseq->ksq_next; 796 kseq->ksq_next = swap; 797 continue; 798 } 799 /* 800 * If we encounter a slice of 0 the kse is in a 801 * TIMESHARE kse group and its nice was too far out 802 * of the range that receives slices. 803 */ 804 if (ke->ke_slice == 0) { 805 runq_remove(ke->ke_runq, ke); 806 sched_slice(ke); 807 ke->ke_runq = kseq->ksq_next; 808 runq_add(ke->ke_runq, ke); 809 continue; 810 } 811 return (ke); 812 } 813 814 return (runq_choose(&kseq->ksq_idle)); 815} 816 817static void 818kseq_setup(struct kseq *kseq) 819{ 820 runq_init(&kseq->ksq_timeshare[0]); 821 runq_init(&kseq->ksq_timeshare[1]); 822 runq_init(&kseq->ksq_idle); 823 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 824 kseq->ksq_next = &kseq->ksq_timeshare[1]; 825 kseq->ksq_load = 0; 826 kseq->ksq_load_timeshare = 0; 827} 828 829static void 830sched_setup(void *dummy) 831{ 832#ifdef SMP 833 int balance_groups; 834 int i; 835#endif 836 837 slice_min = (hz/100); /* 10ms */ 838 slice_max = (hz/7); /* ~140ms */ 839 840#ifdef SMP 841 balance_groups = 0; 842 /* 843 * Initialize the kseqs. 844 */ 845 for (i = 0; i < MAXCPU; i++) { 846 struct kseq *ksq; 847 848 ksq = &kseq_cpu[i]; 849 ksq->ksq_assigned = NULL; 850 kseq_setup(&kseq_cpu[i]); 851 } 852 if (smp_topology == NULL) { 853 struct kseq_group *ksg; 854 struct kseq *ksq; 855 856 for (i = 0; i < MAXCPU; i++) { 857 ksq = &kseq_cpu[i]; 858 ksg = &kseq_groups[i]; 859 /* 860 * Setup a kse group with one member. 861 */ 862 ksq->ksq_transferable = 0; 863 ksq->ksq_group = ksg; 864 ksg->ksg_cpus = 1; 865 ksg->ksg_idlemask = 0; 866 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 867 ksg->ksg_load = 0; 868 ksg->ksg_transferable = 0; 869 LIST_INIT(&ksg->ksg_members); 870 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 871 } 872 } else { 873 struct kseq_group *ksg; 874 struct cpu_group *cg; 875 int j; 876 877 for (i = 0; i < smp_topology->ct_count; i++) { 878 cg = &smp_topology->ct_group[i]; 879 ksg = &kseq_groups[i]; 880 /* 881 * Initialize the group. 882 */ 883 ksg->ksg_idlemask = 0; 884 ksg->ksg_load = 0; 885 ksg->ksg_transferable = 0; 886 ksg->ksg_cpus = cg->cg_count; 887 ksg->ksg_cpumask = cg->cg_mask; 888 LIST_INIT(&ksg->ksg_members); 889 /* 890 * Find all of the group members and add them. 891 */ 892 for (j = 0; j < MAXCPU; j++) { 893 if ((cg->cg_mask & (1 << j)) != 0) { 894 if (ksg->ksg_mask == 0) 895 ksg->ksg_mask = 1 << j; 896 kseq_cpu[j].ksq_transferable = 0; 897 kseq_cpu[j].ksq_group = ksg; 898 LIST_INSERT_HEAD(&ksg->ksg_members, 899 &kseq_cpu[j], ksq_siblings); 900 } 901 } 902 if (ksg->ksg_cpus > 1) 903 balance_groups = 1; 904 } 905 ksg_maxid = smp_topology->ct_count - 1; 906 } 907 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 908 callout_init(&kseq_group_callout, CALLOUT_MPSAFE); 909 sched_balance(NULL); 910 /* 911 * Stagger the group and global load balancer so they do not 912 * interfere with each other. 913 */ 914 if (balance_groups) 915 callout_reset(&kseq_group_callout, hz / 2, 916 sched_balance_groups, NULL); 917#else 918 kseq_setup(KSEQ_SELF()); 919#endif 920 mtx_lock_spin(&sched_lock); 921 kseq_load_add(KSEQ_SELF(), &kse0); 922 mtx_unlock_spin(&sched_lock); 923} 924 925/* 926 * Scale the scheduling priority according to the "interactivity" of this 927 * process. 928 */ 929static void 930sched_priority(struct ksegrp *kg) 931{ 932 int pri; 933 934 if (kg->kg_pri_class != PRI_TIMESHARE) 935 return; 936 937 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 938 pri += SCHED_PRI_BASE; 939 pri += kg->kg_nice; 940 941 if (pri > PRI_MAX_TIMESHARE) 942 pri = PRI_MAX_TIMESHARE; 943 else if (pri < PRI_MIN_TIMESHARE) 944 pri = PRI_MIN_TIMESHARE; 945 946 kg->kg_user_pri = pri; 947 948 return; 949} 950 951/* 952 * Calculate a time slice based on the properties of the kseg and the runq 953 * that we're on. This is only for PRI_TIMESHARE ksegrps. 954 */ 955static void 956sched_slice(struct kse *ke) 957{ 958 struct kseq *kseq; 959 struct ksegrp *kg; 960 961 kg = ke->ke_ksegrp; 962 kseq = KSEQ_CPU(ke->ke_cpu); 963 964 /* 965 * Rationale: 966 * KSEs in interactive ksegs get the minimum slice so that we 967 * quickly notice if it abuses its advantage. 968 * 969 * KSEs in non-interactive ksegs are assigned a slice that is 970 * based on the ksegs nice value relative to the least nice kseg 971 * on the run queue for this cpu. 972 * 973 * If the KSE is less nice than all others it gets the maximum 974 * slice and other KSEs will adjust their slice relative to 975 * this when they first expire. 976 * 977 * There is 20 point window that starts relative to the least 978 * nice kse on the run queue. Slice size is determined by 979 * the kse distance from the last nice ksegrp. 980 * 981 * If the kse is outside of the window it will get no slice 982 * and will be reevaluated each time it is selected on the 983 * run queue. The exception to this is nice 0 ksegs when 984 * a nice -20 is running. They are always granted a minimum 985 * slice. 986 */ 987 if (!SCHED_INTERACTIVE(kg)) { 988 int nice; 989 990 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 991 if (kseq->ksq_load_timeshare == 0 || 992 kg->kg_nice < kseq->ksq_nicemin) 993 ke->ke_slice = SCHED_SLICE_MAX; 994 else if (nice <= SCHED_SLICE_NTHRESH) 995 ke->ke_slice = SCHED_SLICE_NICE(nice); 996 else if (kg->kg_nice == 0) 997 ke->ke_slice = SCHED_SLICE_MIN; 998 else 999 ke->ke_slice = 0; 1000 } else 1001 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1002 1003 CTR6(KTR_ULE, 1004 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1005 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 1006 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1007 1008 return; 1009} 1010 1011/* 1012 * This routine enforces a maximum limit on the amount of scheduling history 1013 * kept. It is called after either the slptime or runtime is adjusted. 1014 * This routine will not operate correctly when slp or run times have been 1015 * adjusted to more than double their maximum. 1016 */ 1017static void 1018sched_interact_update(struct ksegrp *kg) 1019{ 1020 int sum; 1021 1022 sum = kg->kg_runtime + kg->kg_slptime; 1023 if (sum < SCHED_SLP_RUN_MAX) 1024 return; 1025 /* 1026 * If we have exceeded by more than 1/5th then the algorithm below 1027 * will not bring us back into range. Dividing by two here forces 1028 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1029 */ 1030 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 1031 kg->kg_runtime /= 2; 1032 kg->kg_slptime /= 2; 1033 return; 1034 } 1035 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1036 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1037} 1038 1039static void 1040sched_interact_fork(struct ksegrp *kg) 1041{ 1042 int ratio; 1043 int sum; 1044 1045 sum = kg->kg_runtime + kg->kg_slptime; 1046 if (sum > SCHED_SLP_RUN_FORK) { 1047 ratio = sum / SCHED_SLP_RUN_FORK; 1048 kg->kg_runtime /= ratio; 1049 kg->kg_slptime /= ratio; 1050 } 1051} 1052 1053static int 1054sched_interact_score(struct ksegrp *kg) 1055{ 1056 int div; 1057 1058 if (kg->kg_runtime > kg->kg_slptime) { 1059 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1060 return (SCHED_INTERACT_HALF + 1061 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1062 } if (kg->kg_slptime > kg->kg_runtime) { 1063 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1064 return (kg->kg_runtime / div); 1065 } 1066 1067 /* 1068 * This can happen if slptime and runtime are 0. 1069 */ 1070 return (0); 1071 1072} 1073 1074/* 1075 * This is only somewhat accurate since given many processes of the same 1076 * priority they will switch when their slices run out, which will be 1077 * at most SCHED_SLICE_MAX. 1078 */ 1079int 1080sched_rr_interval(void) 1081{ 1082 return (SCHED_SLICE_MAX); 1083} 1084 1085static void 1086sched_pctcpu_update(struct kse *ke) 1087{ 1088 /* 1089 * Adjust counters and watermark for pctcpu calc. 1090 */ 1091 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1092 /* 1093 * Shift the tick count out so that the divide doesn't 1094 * round away our results. 1095 */ 1096 ke->ke_ticks <<= 10; 1097 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1098 SCHED_CPU_TICKS; 1099 ke->ke_ticks >>= 10; 1100 } else 1101 ke->ke_ticks = 0; 1102 ke->ke_ltick = ticks; 1103 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1104} 1105 1106void 1107sched_prio(struct thread *td, u_char prio) 1108{ 1109 struct kse *ke; 1110 1111 ke = td->td_kse; 1112 mtx_assert(&sched_lock, MA_OWNED); 1113 if (TD_ON_RUNQ(td)) { 1114 /* 1115 * If the priority has been elevated due to priority 1116 * propagation, we may have to move ourselves to a new 1117 * queue. We still call adjustrunqueue below in case kse 1118 * needs to fix things up. 1119 */ 1120 if (prio < td->td_priority && ke && 1121 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1122 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1123 runq_remove(ke->ke_runq, ke); 1124 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1125 runq_add(ke->ke_runq, ke); 1126 } 1127 adjustrunqueue(td, prio); 1128 } else 1129 td->td_priority = prio; 1130} 1131 1132void 1133sched_switch(struct thread *td) 1134{ 1135 struct thread *newtd; 1136 struct kse *ke; 1137 1138 mtx_assert(&sched_lock, MA_OWNED); 1139 1140 ke = td->td_kse; 1141 1142 td->td_last_kse = ke; 1143 td->td_lastcpu = td->td_oncpu; 1144 td->td_oncpu = NOCPU; 1145 td->td_flags &= ~TDF_NEEDRESCHED; 1146 1147 /* 1148 * If the KSE has been assigned it may be in the process of switching 1149 * to the new cpu. This is the case in sched_bind(). 1150 */ 1151 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1152 if (TD_IS_RUNNING(td)) { 1153 if (td->td_proc->p_flag & P_SA) { 1154 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1155 setrunqueue(td); 1156 } else 1157 kseq_runq_add(KSEQ_SELF(), ke); 1158 } else { 1159 if (ke->ke_runq) 1160 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1161 /* 1162 * We will not be on the run queue. So we must be 1163 * sleeping or similar. 1164 */ 1165 if (td->td_proc->p_flag & P_SA) 1166 kse_reassign(ke); 1167 } 1168 } 1169 newtd = choosethread(); 1170 if (td != newtd) 1171 cpu_switch(td, newtd); 1172 sched_lock.mtx_lock = (uintptr_t)td; 1173 1174 td->td_oncpu = PCPU_GET(cpuid); 1175} 1176 1177void 1178sched_nice(struct ksegrp *kg, int nice) 1179{ 1180 struct kse *ke; 1181 struct thread *td; 1182 struct kseq *kseq; 1183 1184 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1185 mtx_assert(&sched_lock, MA_OWNED); 1186 /* 1187 * We need to adjust the nice counts for running KSEs. 1188 */ 1189 if (kg->kg_pri_class == PRI_TIMESHARE) 1190 FOREACH_KSE_IN_GROUP(kg, ke) { 1191 if (ke->ke_runq == NULL) 1192 continue; 1193 kseq = KSEQ_CPU(ke->ke_cpu); 1194 kseq_nice_rem(kseq, kg->kg_nice); 1195 kseq_nice_add(kseq, nice); 1196 } 1197 kg->kg_nice = nice; 1198 sched_priority(kg); 1199 FOREACH_THREAD_IN_GROUP(kg, td) 1200 td->td_flags |= TDF_NEEDRESCHED; 1201} 1202 1203void 1204sched_sleep(struct thread *td, u_char prio) 1205{ 1206 mtx_assert(&sched_lock, MA_OWNED); 1207 1208 td->td_slptime = ticks; 1209 td->td_priority = prio; 1210 1211 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1212 td->td_kse, td->td_slptime); 1213} 1214 1215void 1216sched_wakeup(struct thread *td) 1217{ 1218 mtx_assert(&sched_lock, MA_OWNED); 1219 1220 /* 1221 * Let the kseg know how long we slept for. This is because process 1222 * interactivity behavior is modeled in the kseg. 1223 */ 1224 if (td->td_slptime) { 1225 struct ksegrp *kg; 1226 int hzticks; 1227 1228 kg = td->td_ksegrp; 1229 hzticks = (ticks - td->td_slptime) << 10; 1230 if (hzticks >= SCHED_SLP_RUN_MAX) { 1231 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1232 kg->kg_runtime = 1; 1233 } else { 1234 kg->kg_slptime += hzticks; 1235 sched_interact_update(kg); 1236 } 1237 sched_priority(kg); 1238 if (td->td_kse) 1239 sched_slice(td->td_kse); 1240 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1241 td->td_kse, hzticks); 1242 td->td_slptime = 0; 1243 } 1244 setrunqueue(td); 1245} 1246 1247/* 1248 * Penalize the parent for creating a new child and initialize the child's 1249 * priority. 1250 */ 1251void 1252sched_fork(struct proc *p, struct proc *p1) 1253{ 1254 1255 mtx_assert(&sched_lock, MA_OWNED); 1256 1257 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1258 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1259 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1260} 1261 1262void 1263sched_fork_kse(struct kse *ke, struct kse *child) 1264{ 1265 1266 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1267 child->ke_cpu = ke->ke_cpu; 1268 child->ke_runq = NULL; 1269 1270 /* Grab our parents cpu estimation information. */ 1271 child->ke_ticks = ke->ke_ticks; 1272 child->ke_ltick = ke->ke_ltick; 1273 child->ke_ftick = ke->ke_ftick; 1274} 1275 1276void 1277sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1278{ 1279 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1280 1281 child->kg_slptime = kg->kg_slptime; 1282 child->kg_runtime = kg->kg_runtime; 1283 child->kg_user_pri = kg->kg_user_pri; 1284 child->kg_nice = kg->kg_nice; 1285 sched_interact_fork(child); 1286 kg->kg_runtime += tickincr << 10; 1287 sched_interact_update(kg); 1288 1289 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1290 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1291 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1292} 1293 1294void 1295sched_fork_thread(struct thread *td, struct thread *child) 1296{ 1297} 1298 1299void 1300sched_class(struct ksegrp *kg, int class) 1301{ 1302 struct kseq *kseq; 1303 struct kse *ke; 1304 int nclass; 1305 int oclass; 1306 1307 mtx_assert(&sched_lock, MA_OWNED); 1308 if (kg->kg_pri_class == class) 1309 return; 1310 1311 nclass = PRI_BASE(class); 1312 oclass = PRI_BASE(kg->kg_pri_class); 1313 FOREACH_KSE_IN_GROUP(kg, ke) { 1314 if (ke->ke_state != KES_ONRUNQ && 1315 ke->ke_state != KES_THREAD) 1316 continue; 1317 kseq = KSEQ_CPU(ke->ke_cpu); 1318 1319#ifdef SMP 1320 /* 1321 * On SMP if we're on the RUNQ we must adjust the transferable 1322 * count because could be changing to or from an interrupt 1323 * class. 1324 */ 1325 if (ke->ke_state == KES_ONRUNQ) { 1326 if (KSE_CAN_MIGRATE(ke, oclass)) { 1327 kseq->ksq_transferable--; 1328 kseq->ksq_group->ksg_transferable--; 1329 } 1330 if (KSE_CAN_MIGRATE(ke, nclass)) { 1331 kseq->ksq_transferable++; 1332 kseq->ksq_group->ksg_transferable++; 1333 } 1334 } 1335#endif 1336 if (oclass == PRI_TIMESHARE) { 1337 kseq->ksq_load_timeshare--; 1338 kseq_nice_rem(kseq, kg->kg_nice); 1339 } 1340 if (nclass == PRI_TIMESHARE) { 1341 kseq->ksq_load_timeshare++; 1342 kseq_nice_add(kseq, kg->kg_nice); 1343 } 1344 } 1345 1346 kg->kg_pri_class = class; 1347} 1348 1349/* 1350 * Return some of the child's priority and interactivity to the parent. 1351 */ 1352void 1353sched_exit(struct proc *p, struct proc *child) 1354{ 1355 mtx_assert(&sched_lock, MA_OWNED); 1356 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1357 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1358} 1359 1360void 1361sched_exit_kse(struct kse *ke, struct kse *child) 1362{ 1363 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child); 1364} 1365 1366void 1367sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1368{ 1369 /* kg->kg_slptime += child->kg_slptime; */ 1370 kg->kg_runtime += child->kg_runtime; 1371 sched_interact_update(kg); 1372} 1373 1374void 1375sched_exit_thread(struct thread *td, struct thread *child) 1376{ 1377} 1378 1379void 1380sched_clock(struct thread *td) 1381{ 1382 struct kseq *kseq; 1383 struct ksegrp *kg; 1384 struct kse *ke; 1385 1386 /* 1387 * sched_setup() apparently happens prior to stathz being set. We 1388 * need to resolve the timers earlier in the boot so we can avoid 1389 * calculating this here. 1390 */ 1391 if (realstathz == 0) { 1392 realstathz = stathz ? stathz : hz; 1393 tickincr = hz / realstathz; 1394 /* 1395 * XXX This does not work for values of stathz that are much 1396 * larger than hz. 1397 */ 1398 if (tickincr == 0) 1399 tickincr = 1; 1400 } 1401 1402 ke = td->td_kse; 1403 kg = ke->ke_ksegrp; 1404 1405 mtx_assert(&sched_lock, MA_OWNED); 1406 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1407 1408 /* Adjust ticks for pctcpu */ 1409 ke->ke_ticks++; 1410 ke->ke_ltick = ticks; 1411 1412 /* Go up to one second beyond our max and then trim back down */ 1413 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1414 sched_pctcpu_update(ke); 1415 1416 if (td->td_flags & TDF_IDLETD) 1417 return; 1418 1419 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1420 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1421 /* 1422 * We only do slicing code for TIMESHARE ksegrps. 1423 */ 1424 if (kg->kg_pri_class != PRI_TIMESHARE) 1425 return; 1426 /* 1427 * We used a tick charge it to the ksegrp so that we can compute our 1428 * interactivity. 1429 */ 1430 kg->kg_runtime += tickincr << 10; 1431 sched_interact_update(kg); 1432 1433 /* 1434 * We used up one time slice. 1435 */ 1436 if (--ke->ke_slice > 0) 1437 return; 1438 /* 1439 * We're out of time, recompute priorities and requeue. 1440 */ 1441 kseq = KSEQ_SELF(); 1442 kseq_load_rem(kseq, ke); 1443 sched_priority(kg); 1444 sched_slice(ke); 1445 if (SCHED_CURR(kg, ke)) 1446 ke->ke_runq = kseq->ksq_curr; 1447 else 1448 ke->ke_runq = kseq->ksq_next; 1449 kseq_load_add(kseq, ke); 1450 td->td_flags |= TDF_NEEDRESCHED; 1451} 1452 1453int 1454sched_runnable(void) 1455{ 1456 struct kseq *kseq; 1457 int load; 1458 1459 load = 1; 1460 1461 kseq = KSEQ_SELF(); 1462#ifdef SMP 1463 if (kseq->ksq_assigned) { 1464 mtx_lock_spin(&sched_lock); 1465 kseq_assign(kseq); 1466 mtx_unlock_spin(&sched_lock); 1467 } 1468#endif 1469 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1470 if (kseq->ksq_load > 0) 1471 goto out; 1472 } else 1473 if (kseq->ksq_load - 1 > 0) 1474 goto out; 1475 load = 0; 1476out: 1477 return (load); 1478} 1479 1480void 1481sched_userret(struct thread *td) 1482{ 1483 struct ksegrp *kg; 1484 1485 kg = td->td_ksegrp; 1486 1487 if (td->td_priority != kg->kg_user_pri) { 1488 mtx_lock_spin(&sched_lock); 1489 td->td_priority = kg->kg_user_pri; 1490 mtx_unlock_spin(&sched_lock); 1491 } 1492} 1493 1494struct kse * 1495sched_choose(void) 1496{ 1497 struct kseq *kseq; 1498 struct kse *ke; 1499 1500 mtx_assert(&sched_lock, MA_OWNED); 1501 kseq = KSEQ_SELF(); 1502#ifdef SMP 1503restart: 1504 if (kseq->ksq_assigned) 1505 kseq_assign(kseq); 1506#endif 1507 ke = kseq_choose(kseq); 1508 if (ke) { 1509#ifdef SMP 1510 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1511 if (kseq_idled(kseq) == 0) 1512 goto restart; 1513#endif 1514 kseq_runq_rem(kseq, ke); 1515 ke->ke_state = KES_THREAD; 1516 1517 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1518 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1519 ke, ke->ke_runq, ke->ke_slice, 1520 ke->ke_thread->td_priority); 1521 } 1522 return (ke); 1523 } 1524#ifdef SMP 1525 if (kseq_idled(kseq) == 0) 1526 goto restart; 1527#endif 1528 return (NULL); 1529} 1530 1531void 1532sched_add(struct thread *td) 1533{ 1534 struct kseq *kseq; 1535 struct ksegrp *kg; 1536 struct kse *ke; 1537 int class; 1538 1539 mtx_assert(&sched_lock, MA_OWNED); 1540 ke = td->td_kse; 1541 kg = td->td_ksegrp; 1542 if (ke->ke_flags & KEF_ASSIGNED) 1543 return; 1544 kseq = KSEQ_SELF(); 1545 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1546 KASSERT((ke->ke_thread->td_kse != NULL), 1547 ("sched_add: No KSE on thread")); 1548 KASSERT(ke->ke_state != KES_ONRUNQ, 1549 ("sched_add: kse %p (%s) already in run queue", ke, 1550 ke->ke_proc->p_comm)); 1551 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1552 ("sched_add: process swapped out")); 1553 KASSERT(ke->ke_runq == NULL, 1554 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1555 1556 class = PRI_BASE(kg->kg_pri_class); 1557 switch (class) { 1558 case PRI_ITHD: 1559 case PRI_REALTIME: 1560 ke->ke_runq = kseq->ksq_curr; 1561 ke->ke_slice = SCHED_SLICE_MAX; 1562 ke->ke_cpu = PCPU_GET(cpuid); 1563 break; 1564 case PRI_TIMESHARE: 1565 if (SCHED_CURR(kg, ke)) 1566 ke->ke_runq = kseq->ksq_curr; 1567 else 1568 ke->ke_runq = kseq->ksq_next; 1569 break; 1570 case PRI_IDLE: 1571 /* 1572 * This is for priority prop. 1573 */ 1574 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1575 ke->ke_runq = kseq->ksq_curr; 1576 else 1577 ke->ke_runq = &kseq->ksq_idle; 1578 ke->ke_slice = SCHED_SLICE_MIN; 1579 break; 1580 default: 1581 panic("Unknown pri class."); 1582 break; 1583 } 1584#ifdef SMP 1585 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1586 ke->ke_runq = NULL; 1587 kseq_notify(ke, ke->ke_cpu); 1588 return; 1589 } 1590 /* 1591 * If there are any idle groups, give them our extra load. The 1592 * threshold at which we start to reassign kses has a large impact 1593 * on the overall performance of the system. Tuned too high and 1594 * some CPUs may idle. Too low and there will be excess migration 1595 * and context swiches. 1596 */ 1597 if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class)) 1598 if (kseq_transfer(kseq, ke, class)) 1599 return; 1600 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1601 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1602 /* 1603 * Check to see if our group is unidling, and if so, remove it 1604 * from the global idle mask. 1605 */ 1606 if (kseq->ksq_group->ksg_idlemask == 1607 kseq->ksq_group->ksg_cpumask) 1608 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1609 /* 1610 * Now remove ourselves from the group specific idle mask. 1611 */ 1612 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1613 } 1614#endif 1615 if (td->td_priority < curthread->td_priority) 1616 curthread->td_flags |= TDF_NEEDRESCHED; 1617 1618 ke->ke_ksegrp->kg_runq_kses++; 1619 ke->ke_state = KES_ONRUNQ; 1620 1621 kseq_runq_add(kseq, ke); 1622 kseq_load_add(kseq, ke); 1623} 1624 1625void 1626sched_rem(struct thread *td) 1627{ 1628 struct kseq *kseq; 1629 struct kse *ke; 1630 1631 ke = td->td_kse; 1632 /* 1633 * It is safe to just return here because sched_rem() is only ever 1634 * used in places where we're immediately going to add the 1635 * kse back on again. In that case it'll be added with the correct 1636 * thread and priority when the caller drops the sched_lock. 1637 */ 1638 if (ke->ke_flags & KEF_ASSIGNED) 1639 return; 1640 mtx_assert(&sched_lock, MA_OWNED); 1641 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1642 1643 ke->ke_state = KES_THREAD; 1644 ke->ke_ksegrp->kg_runq_kses--; 1645 kseq = KSEQ_CPU(ke->ke_cpu); 1646 kseq_runq_rem(kseq, ke); 1647 kseq_load_rem(kseq, ke); 1648} 1649 1650fixpt_t 1651sched_pctcpu(struct thread *td) 1652{ 1653 fixpt_t pctcpu; 1654 struct kse *ke; 1655 1656 pctcpu = 0; 1657 ke = td->td_kse; 1658 if (ke == NULL) 1659 return (0); 1660 1661 mtx_lock_spin(&sched_lock); 1662 if (ke->ke_ticks) { 1663 int rtick; 1664 1665 /* 1666 * Don't update more frequently than twice a second. Allowing 1667 * this causes the cpu usage to decay away too quickly due to 1668 * rounding errors. 1669 */ 1670 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1671 ke->ke_ltick < (ticks - (hz / 2))) 1672 sched_pctcpu_update(ke); 1673 /* How many rtick per second ? */ 1674 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1675 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1676 } 1677 1678 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1679 mtx_unlock_spin(&sched_lock); 1680 1681 return (pctcpu); 1682} 1683 1684void 1685sched_bind(struct thread *td, int cpu) 1686{ 1687 struct kse *ke; 1688 1689 mtx_assert(&sched_lock, MA_OWNED); 1690 ke = td->td_kse; 1691 ke->ke_flags |= KEF_BOUND; 1692#ifdef SMP 1693 if (PCPU_GET(cpuid) == cpu) 1694 return; 1695 /* sched_rem without the runq_remove */ 1696 ke->ke_state = KES_THREAD; 1697 ke->ke_ksegrp->kg_runq_kses--; 1698 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1699 kseq_notify(ke, cpu); 1700 /* When we return from mi_switch we'll be on the correct cpu. */ 1701 td->td_proc->p_stats->p_ru.ru_nvcsw++; 1702 mi_switch(); 1703#endif 1704} 1705 1706void 1707sched_unbind(struct thread *td) 1708{ 1709 mtx_assert(&sched_lock, MA_OWNED); 1710 td->td_kse->ke_flags &= ~KEF_BOUND; 1711} 1712 1713int 1714sched_sizeof_kse(void) 1715{ 1716 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1717} 1718 1719int 1720sched_sizeof_ksegrp(void) 1721{ 1722 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1723} 1724 1725int 1726sched_sizeof_proc(void) 1727{ 1728 return (sizeof(struct proc)); 1729} 1730 1731int 1732sched_sizeof_thread(void) 1733{ 1734 return (sizeof(struct thread) + sizeof(struct td_sched)); 1735} 1736