sched_ule.c revision 123685
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 123685 2003-12-20 14:03:14Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/resourcevar.h> 39#include <sys/sched.h> 40#include <sys/smp.h> 41#include <sys/sx.h> 42#include <sys/sysctl.h> 43#include <sys/sysproto.h> 44#include <sys/vmmeter.h> 45#ifdef DDB 46#include <ddb/ddb.h> 47#endif 48#ifdef KTRACE 49#include <sys/uio.h> 50#include <sys/ktrace.h> 51#endif 52 53#include <machine/cpu.h> 54#include <machine/smp.h> 55 56#define KTR_ULE KTR_NFS 57 58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59/* XXX This is bogus compatability crap for ps */ 60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62 63static void sched_setup(void *dummy); 64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65 66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 67 68static int sched_strict; 69SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 70 71static int slice_min = 1; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73 74static int slice_max = 10; 75SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76 77int realstathz; 78int tickincr = 1; 79 80#ifdef SMP 81/* Callouts to handle load balancing SMP systems. */ 82static struct callout kseq_lb_callout; 83static struct callout kseq_group_callout; 84#endif 85 86/* 87 * These datastructures are allocated within their parent datastructure but 88 * are scheduler specific. 89 */ 90 91struct ke_sched { 92 int ske_slice; 93 struct runq *ske_runq; 94 /* The following variables are only used for pctcpu calculation */ 95 int ske_ltick; /* Last tick that we were running on */ 96 int ske_ftick; /* First tick that we were running on */ 97 int ske_ticks; /* Tick count */ 98 /* CPU that we have affinity for. */ 99 u_char ske_cpu; 100}; 101#define ke_slice ke_sched->ske_slice 102#define ke_runq ke_sched->ske_runq 103#define ke_ltick ke_sched->ske_ltick 104#define ke_ftick ke_sched->ske_ftick 105#define ke_ticks ke_sched->ske_ticks 106#define ke_cpu ke_sched->ske_cpu 107#define ke_assign ke_procq.tqe_next 108 109#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 110#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 111 112struct kg_sched { 113 int skg_slptime; /* Number of ticks we vol. slept */ 114 int skg_runtime; /* Number of ticks we were running */ 115}; 116#define kg_slptime kg_sched->skg_slptime 117#define kg_runtime kg_sched->skg_runtime 118 119struct td_sched { 120 int std_slptime; 121}; 122#define td_slptime td_sched->std_slptime 123 124struct td_sched td_sched; 125struct ke_sched ke_sched; 126struct kg_sched kg_sched; 127 128struct ke_sched *kse0_sched = &ke_sched; 129struct kg_sched *ksegrp0_sched = &kg_sched; 130struct p_sched *proc0_sched = NULL; 131struct td_sched *thread0_sched = &td_sched; 132 133/* 134 * The priority is primarily determined by the interactivity score. Thus, we 135 * give lower(better) priorities to kse groups that use less CPU. The nice 136 * value is then directly added to this to allow nice to have some effect 137 * on latency. 138 * 139 * PRI_RANGE: Total priority range for timeshare threads. 140 * PRI_NRESV: Number of nice values. 141 * PRI_BASE: The start of the dynamic range. 142 */ 143#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 144#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 145#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 146#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 147#define SCHED_PRI_INTERACT(score) \ 148 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 149 150/* 151 * These determine the interactivity of a process. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * These parameters and macros determine the size of the time slice that is 167 * granted to each thread. 168 * 169 * SLICE_MIN: Minimum time slice granted, in units of ticks. 170 * SLICE_MAX: Maximum time slice granted. 171 * SLICE_RANGE: Range of available time slices scaled by hz. 172 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 173 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 174 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 175 */ 176#define SCHED_SLICE_MIN (slice_min) 177#define SCHED_SLICE_MAX (slice_max) 178#define SCHED_SLICE_INTERACTIVE (slice_min * 4) 179#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 180#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 181#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 182#define SCHED_SLICE_NICE(nice) \ 183 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 184 185/* 186 * This macro determines whether or not the kse belongs on the current or 187 * next run queue. 188 */ 189#define SCHED_INTERACTIVE(kg) \ 190 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 191#define SCHED_CURR(kg, ke) \ 192 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 193 SCHED_INTERACTIVE(kg)) 194 195/* 196 * Cpu percentage computation macros and defines. 197 * 198 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 199 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 200 */ 201 202#define SCHED_CPU_TIME 10 203#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 204 205/* 206 * kseq - per processor runqs and statistics. 207 */ 208struct kseq { 209 struct runq ksq_idle; /* Queue of IDLE threads. */ 210 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 211 struct runq *ksq_next; /* Next timeshare queue. */ 212 struct runq *ksq_curr; /* Current queue. */ 213 int ksq_load_timeshare; /* Load for timeshare. */ 214 int ksq_load; /* Aggregate load. */ 215 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 216 short ksq_nicemin; /* Least nice. */ 217#ifdef SMP 218 int ksq_transferable; 219 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 220 struct kseq_group *ksq_group; /* Our processor group. */ 221 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 222#endif 223}; 224 225#ifdef SMP 226/* 227 * kseq groups are groups of processors which can cheaply share threads. When 228 * one processor in the group goes idle it will check the runqs of the other 229 * processors in its group prior to halting and waiting for an interrupt. 230 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 231 * In a numa environment we'd want an idle bitmap per group and a two tiered 232 * load balancer. 233 */ 234struct kseq_group { 235 int ksg_cpus; /* Count of CPUs in this kseq group. */ 236 int ksg_cpumask; /* Mask of cpus in this group. */ 237 int ksg_idlemask; /* Idle cpus in this group. */ 238 int ksg_mask; /* Bit mask for first cpu. */ 239 int ksg_load; /* Total load of this group. */ 240 int ksg_transferable; /* Transferable load of this group. */ 241 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 242}; 243#endif 244 245/* 246 * One kse queue per processor. 247 */ 248#ifdef SMP 249static int kseq_idle; 250static int ksg_maxid; 251static struct kseq kseq_cpu[MAXCPU]; 252static struct kseq_group kseq_groups[MAXCPU]; 253#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 254#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 255#define KSEQ_ID(x) ((x) - kseq_cpu) 256#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 257#else /* !SMP */ 258static struct kseq kseq_cpu; 259#define KSEQ_SELF() (&kseq_cpu) 260#define KSEQ_CPU(x) (&kseq_cpu) 261#endif 262 263static void sched_slice(struct kse *ke); 264static void sched_priority(struct ksegrp *kg); 265static int sched_interact_score(struct ksegrp *kg); 266static void sched_interact_update(struct ksegrp *kg); 267static void sched_interact_fork(struct ksegrp *kg); 268static void sched_pctcpu_update(struct kse *ke); 269 270/* Operations on per processor queues */ 271static struct kse * kseq_choose(struct kseq *kseq); 272static void kseq_setup(struct kseq *kseq); 273static void kseq_load_add(struct kseq *kseq, struct kse *ke); 274static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 275static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 276static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 277static void kseq_nice_add(struct kseq *kseq, int nice); 278static void kseq_nice_rem(struct kseq *kseq, int nice); 279void kseq_print(int cpu); 280#ifdef SMP 281static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 282static struct kse *runq_steal(struct runq *rq); 283static void sched_balance(void *arg); 284static void sched_balance_group(struct kseq_group *ksg); 285static void sched_balance_pair(struct kseq *high, struct kseq *low); 286static void kseq_move(struct kseq *from, int cpu); 287static int kseq_idled(struct kseq *kseq); 288static void kseq_notify(struct kse *ke, int cpu); 289static void kseq_assign(struct kseq *); 290static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 291#define KSE_CAN_MIGRATE(ke, class) \ 292 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 293 ((ke)->ke_flags & KEF_BOUND) == 0) 294#endif 295 296void 297kseq_print(int cpu) 298{ 299 struct kseq *kseq; 300 int i; 301 302 kseq = KSEQ_CPU(cpu); 303 304 printf("kseq:\n"); 305 printf("\tload: %d\n", kseq->ksq_load); 306 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 307#ifdef SMP 308 printf("\tload transferable: %d\n", kseq->ksq_transferable); 309#endif 310 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 311 printf("\tnice counts:\n"); 312 for (i = 0; i < SCHED_PRI_NRESV; i++) 313 if (kseq->ksq_nice[i]) 314 printf("\t\t%d = %d\n", 315 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 316} 317 318static __inline void 319kseq_runq_add(struct kseq *kseq, struct kse *ke) 320{ 321#ifdef SMP 322 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 323 kseq->ksq_transferable++; 324 kseq->ksq_group->ksg_transferable++; 325 } 326#endif 327 runq_add(ke->ke_runq, ke); 328} 329 330static __inline void 331kseq_runq_rem(struct kseq *kseq, struct kse *ke) 332{ 333#ifdef SMP 334 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 335 kseq->ksq_transferable--; 336 kseq->ksq_group->ksg_transferable--; 337 } 338#endif 339 runq_remove(ke->ke_runq, ke); 340} 341 342static void 343kseq_load_add(struct kseq *kseq, struct kse *ke) 344{ 345 int class; 346 mtx_assert(&sched_lock, MA_OWNED); 347 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 348 if (class == PRI_TIMESHARE) 349 kseq->ksq_load_timeshare++; 350 kseq->ksq_load++; 351#ifdef SMP 352 if (class != PRI_ITHD) 353 kseq->ksq_group->ksg_load++; 354#endif 355 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 356 CTR6(KTR_ULE, 357 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 358 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 359 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 360 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 361 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 362} 363 364static void 365kseq_load_rem(struct kseq *kseq, struct kse *ke) 366{ 367 int class; 368 mtx_assert(&sched_lock, MA_OWNED); 369 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 370 if (class == PRI_TIMESHARE) 371 kseq->ksq_load_timeshare--; 372#ifdef SMP 373 if (class != PRI_ITHD) 374 kseq->ksq_group->ksg_load--; 375#endif 376 kseq->ksq_load--; 377 ke->ke_runq = NULL; 378 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 379 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 380} 381 382static void 383kseq_nice_add(struct kseq *kseq, int nice) 384{ 385 mtx_assert(&sched_lock, MA_OWNED); 386 /* Normalize to zero. */ 387 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 388 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 389 kseq->ksq_nicemin = nice; 390} 391 392static void 393kseq_nice_rem(struct kseq *kseq, int nice) 394{ 395 int n; 396 397 mtx_assert(&sched_lock, MA_OWNED); 398 /* Normalize to zero. */ 399 n = nice + SCHED_PRI_NHALF; 400 kseq->ksq_nice[n]--; 401 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 402 403 /* 404 * If this wasn't the smallest nice value or there are more in 405 * this bucket we can just return. Otherwise we have to recalculate 406 * the smallest nice. 407 */ 408 if (nice != kseq->ksq_nicemin || 409 kseq->ksq_nice[n] != 0 || 410 kseq->ksq_load_timeshare == 0) 411 return; 412 413 for (; n < SCHED_PRI_NRESV; n++) 414 if (kseq->ksq_nice[n]) { 415 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 416 return; 417 } 418} 419 420#ifdef SMP 421/* 422 * sched_balance is a simple CPU load balancing algorithm. It operates by 423 * finding the least loaded and most loaded cpu and equalizing their load 424 * by migrating some processes. 425 * 426 * Dealing only with two CPUs at a time has two advantages. Firstly, most 427 * installations will only have 2 cpus. Secondly, load balancing too much at 428 * once can have an unpleasant effect on the system. The scheduler rarely has 429 * enough information to make perfect decisions. So this algorithm chooses 430 * algorithm simplicity and more gradual effects on load in larger systems. 431 * 432 * It could be improved by considering the priorities and slices assigned to 433 * each task prior to balancing them. There are many pathological cases with 434 * any approach and so the semi random algorithm below may work as well as any. 435 * 436 */ 437static void 438sched_balance(void *arg) 439{ 440 struct kseq_group *high; 441 struct kseq_group *low; 442 struct kseq_group *ksg; 443 int timo; 444 int cnt; 445 int i; 446 447 mtx_lock_spin(&sched_lock); 448 if (smp_started == 0) 449 goto out; 450 low = high = NULL; 451 i = random() % (ksg_maxid + 1); 452 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 453 ksg = KSEQ_GROUP(i); 454 /* 455 * Find the CPU with the highest load that has some 456 * threads to transfer. 457 */ 458 if ((high == NULL || ksg->ksg_load > high->ksg_load) 459 && ksg->ksg_transferable) 460 high = ksg; 461 if (low == NULL || ksg->ksg_load < low->ksg_load) 462 low = ksg; 463 if (++i > ksg_maxid) 464 i = 0; 465 } 466 if (low != NULL && high != NULL && high != low) 467 sched_balance_pair(LIST_FIRST(&high->ksg_members), 468 LIST_FIRST(&low->ksg_members)); 469out: 470 mtx_unlock_spin(&sched_lock); 471 timo = random() % (hz * 2); 472 callout_reset(&kseq_lb_callout, timo, sched_balance, NULL); 473} 474 475static void 476sched_balance_groups(void *arg) 477{ 478 int timo; 479 int i; 480 481 mtx_lock_spin(&sched_lock); 482 if (smp_started) 483 for (i = 0; i <= ksg_maxid; i++) 484 sched_balance_group(KSEQ_GROUP(i)); 485 mtx_unlock_spin(&sched_lock); 486 timo = random() % (hz * 2); 487 callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL); 488} 489 490static void 491sched_balance_group(struct kseq_group *ksg) 492{ 493 struct kseq *kseq; 494 struct kseq *high; 495 struct kseq *low; 496 int load; 497 498 if (ksg->ksg_transferable == 0) 499 return; 500 low = NULL; 501 high = NULL; 502 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 503 load = kseq->ksq_load; 504 if (kseq == KSEQ_CPU(0)) 505 load--; 506 if (high == NULL || load > high->ksq_load) 507 high = kseq; 508 if (low == NULL || load < low->ksq_load) 509 low = kseq; 510 } 511 if (high != NULL && low != NULL && high != low) 512 sched_balance_pair(high, low); 513} 514 515static void 516sched_balance_pair(struct kseq *high, struct kseq *low) 517{ 518 int transferable; 519 int high_load; 520 int low_load; 521 int move; 522 int diff; 523 int i; 524 525 /* 526 * If we're transfering within a group we have to use this specific 527 * kseq's transferable count, otherwise we can steal from other members 528 * of the group. 529 */ 530 if (high->ksq_group == low->ksq_group) { 531 transferable = high->ksq_transferable; 532 high_load = high->ksq_load; 533 low_load = low->ksq_load; 534 /* 535 * XXX If we encounter cpu 0 we must remember to reduce it's 536 * load by 1 to reflect the swi that is running the callout. 537 * At some point we should really fix load balancing of the 538 * swi and then this wont matter. 539 */ 540 if (high == KSEQ_CPU(0)) 541 high_load--; 542 if (low == KSEQ_CPU(0)) 543 low_load--; 544 } else { 545 transferable = high->ksq_group->ksg_transferable; 546 high_load = high->ksq_group->ksg_load; 547 low_load = low->ksq_group->ksg_load; 548 } 549 if (transferable == 0) 550 return; 551 /* 552 * Determine what the imbalance is and then adjust that to how many 553 * kses we actually have to give up (transferable). 554 */ 555 diff = high_load - low_load; 556 move = diff / 2; 557 if (diff & 0x1) 558 move++; 559 move = min(move, transferable); 560 for (i = 0; i < move; i++) 561 kseq_move(high, KSEQ_ID(low)); 562 return; 563} 564 565static void 566kseq_move(struct kseq *from, int cpu) 567{ 568 struct kseq *kseq; 569 struct kseq *to; 570 struct kse *ke; 571 572 kseq = from; 573 to = KSEQ_CPU(cpu); 574 ke = kseq_steal(kseq, 1); 575 if (ke == NULL) { 576 struct kseq_group *ksg; 577 578 ksg = kseq->ksq_group; 579 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 580 if (kseq == from || kseq->ksq_transferable == 0) 581 continue; 582 ke = kseq_steal(kseq, 1); 583 break; 584 } 585 if (ke == NULL) 586 panic("kseq_move: No KSEs available with a " 587 "transferable count of %d\n", 588 ksg->ksg_transferable); 589 } 590 if (kseq == to) 591 return; 592 ke->ke_state = KES_THREAD; 593 kseq_runq_rem(kseq, ke); 594 kseq_load_rem(kseq, ke); 595 kseq_notify(ke, cpu); 596} 597 598static int 599kseq_idled(struct kseq *kseq) 600{ 601 struct kseq_group *ksg; 602 struct kseq *steal; 603 struct kse *ke; 604 605 ksg = kseq->ksq_group; 606 /* 607 * If we're in a cpu group, try and steal kses from another cpu in 608 * the group before idling. 609 */ 610 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 611 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 612 if (steal == kseq || steal->ksq_transferable == 0) 613 continue; 614 ke = kseq_steal(steal, 0); 615 if (ke == NULL) 616 continue; 617 ke->ke_state = KES_THREAD; 618 kseq_runq_rem(steal, ke); 619 kseq_load_rem(steal, ke); 620 ke->ke_cpu = PCPU_GET(cpuid); 621 sched_add(ke->ke_thread); 622 return (0); 623 } 624 } 625 /* 626 * We only set the idled bit when all of the cpus in the group are 627 * idle. Otherwise we could get into a situation where a KSE bounces 628 * back and forth between two idle cores on seperate physical CPUs. 629 */ 630 ksg->ksg_idlemask |= PCPU_GET(cpumask); 631 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 632 return (1); 633 atomic_set_int(&kseq_idle, ksg->ksg_mask); 634 return (1); 635} 636 637static void 638kseq_assign(struct kseq *kseq) 639{ 640 struct kse *nke; 641 struct kse *ke; 642 643 do { 644 (volatile struct kse *)ke = kseq->ksq_assigned; 645 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 646 for (; ke != NULL; ke = nke) { 647 nke = ke->ke_assign; 648 ke->ke_flags &= ~KEF_ASSIGNED; 649 sched_add(ke->ke_thread); 650 } 651} 652 653static void 654kseq_notify(struct kse *ke, int cpu) 655{ 656 struct kseq *kseq; 657 struct thread *td; 658 struct pcpu *pcpu; 659 660 ke->ke_cpu = cpu; 661 ke->ke_flags |= KEF_ASSIGNED; 662 663 kseq = KSEQ_CPU(cpu); 664 665 /* 666 * Place a KSE on another cpu's queue and force a resched. 667 */ 668 do { 669 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned; 670 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 671 pcpu = pcpu_find(cpu); 672 td = pcpu->pc_curthread; 673 if (ke->ke_thread->td_priority < td->td_priority || 674 td == pcpu->pc_idlethread) { 675 td->td_flags |= TDF_NEEDRESCHED; 676 ipi_selected(1 << cpu, IPI_AST); 677 } 678} 679 680static struct kse * 681runq_steal(struct runq *rq) 682{ 683 struct rqhead *rqh; 684 struct rqbits *rqb; 685 struct kse *ke; 686 int word; 687 int bit; 688 689 mtx_assert(&sched_lock, MA_OWNED); 690 rqb = &rq->rq_status; 691 for (word = 0; word < RQB_LEN; word++) { 692 if (rqb->rqb_bits[word] == 0) 693 continue; 694 for (bit = 0; bit < RQB_BPW; bit++) { 695 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 696 continue; 697 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 698 TAILQ_FOREACH(ke, rqh, ke_procq) { 699 if (KSE_CAN_MIGRATE(ke, 700 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 701 return (ke); 702 } 703 } 704 } 705 return (NULL); 706} 707 708static struct kse * 709kseq_steal(struct kseq *kseq, int stealidle) 710{ 711 struct kse *ke; 712 713 /* 714 * Steal from next first to try to get a non-interactive task that 715 * may not have run for a while. 716 */ 717 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 718 return (ke); 719 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 720 return (ke); 721 if (stealidle) 722 return (runq_steal(&kseq->ksq_idle)); 723 return (NULL); 724} 725 726int 727kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 728{ 729 struct kseq_group *ksg; 730 int cpu; 731 732 if (smp_started == 0) 733 return (0); 734 cpu = 0; 735 ksg = kseq->ksq_group; 736 737 /* 738 * If there are any idle groups, give them our extra load. The 739 * threshold at which we start to reassign kses has a large impact 740 * on the overall performance of the system. Tuned too high and 741 * some CPUs may idle. Too low and there will be excess migration 742 * and context swiches. 743 */ 744 /* 745 * XXX This ksg_transferable might work better if we were checking 746 * against a global group load. As it is now, this prevents us from 747 * transfering a thread from a group that is potentially bogged down 748 * with non transferable load. 749 */ 750 if (ksg->ksg_transferable > ksg->ksg_cpus && kseq_idle) { 751 /* 752 * Multiple cpus could find this bit simultaneously 753 * but the race shouldn't be terrible. 754 */ 755 cpu = ffs(kseq_idle); 756 if (cpu) 757 atomic_clear_int(&kseq_idle, 1 << (cpu - 1)); 758 } 759 /* 760 * If another cpu in this group has idled, assign a thread over 761 * to them after checking to see if there are idled groups. 762 */ 763 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) { 764 cpu = ffs(ksg->ksg_idlemask); 765 if (cpu) 766 ksg->ksg_idlemask &= ~(1 << (cpu - 1)); 767 } 768 /* 769 * Now that we've found an idle CPU, migrate the thread. 770 */ 771 if (cpu) { 772 cpu--; 773 ke->ke_runq = NULL; 774 kseq_notify(ke, cpu); 775 return (1); 776 } 777 return (0); 778} 779 780#endif /* SMP */ 781 782/* 783 * Pick the highest priority task we have and return it. 784 */ 785 786static struct kse * 787kseq_choose(struct kseq *kseq) 788{ 789 struct kse *ke; 790 struct runq *swap; 791 792 mtx_assert(&sched_lock, MA_OWNED); 793 swap = NULL; 794 795 for (;;) { 796 ke = runq_choose(kseq->ksq_curr); 797 if (ke == NULL) { 798 /* 799 * We already swaped once and didn't get anywhere. 800 */ 801 if (swap) 802 break; 803 swap = kseq->ksq_curr; 804 kseq->ksq_curr = kseq->ksq_next; 805 kseq->ksq_next = swap; 806 continue; 807 } 808 /* 809 * If we encounter a slice of 0 the kse is in a 810 * TIMESHARE kse group and its nice was too far out 811 * of the range that receives slices. 812 */ 813 if (ke->ke_slice == 0) { 814 runq_remove(ke->ke_runq, ke); 815 sched_slice(ke); 816 ke->ke_runq = kseq->ksq_next; 817 runq_add(ke->ke_runq, ke); 818 continue; 819 } 820 return (ke); 821 } 822 823 return (runq_choose(&kseq->ksq_idle)); 824} 825 826static void 827kseq_setup(struct kseq *kseq) 828{ 829 runq_init(&kseq->ksq_timeshare[0]); 830 runq_init(&kseq->ksq_timeshare[1]); 831 runq_init(&kseq->ksq_idle); 832 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 833 kseq->ksq_next = &kseq->ksq_timeshare[1]; 834 kseq->ksq_load = 0; 835 kseq->ksq_load_timeshare = 0; 836} 837 838static void 839sched_setup(void *dummy) 840{ 841#ifdef SMP 842 int balance_groups; 843 int i; 844#endif 845 846 slice_min = (hz/100); /* 10ms */ 847 slice_max = (hz/7); /* ~140ms */ 848 849#ifdef SMP 850 balance_groups = 0; 851 /* 852 * Initialize the kseqs. 853 */ 854 for (i = 0; i < MAXCPU; i++) { 855 struct kseq *ksq; 856 857 ksq = &kseq_cpu[i]; 858 ksq->ksq_assigned = NULL; 859 kseq_setup(&kseq_cpu[i]); 860 } 861 if (smp_topology == NULL) { 862 struct kseq_group *ksg; 863 struct kseq *ksq; 864 865 for (i = 0; i < MAXCPU; i++) { 866 ksq = &kseq_cpu[i]; 867 ksg = &kseq_groups[i]; 868 /* 869 * Setup a kse group with one member. 870 */ 871 ksq->ksq_transferable = 0; 872 ksq->ksq_group = ksg; 873 ksg->ksg_cpus = 1; 874 ksg->ksg_idlemask = 0; 875 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 876 ksg->ksg_load = 0; 877 ksg->ksg_transferable = 0; 878 LIST_INIT(&ksg->ksg_members); 879 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 880 } 881 } else { 882 struct kseq_group *ksg; 883 struct cpu_group *cg; 884 int j; 885 886 for (i = 0; i < smp_topology->ct_count; i++) { 887 cg = &smp_topology->ct_group[i]; 888 ksg = &kseq_groups[i]; 889 /* 890 * Initialize the group. 891 */ 892 ksg->ksg_idlemask = 0; 893 ksg->ksg_load = 0; 894 ksg->ksg_transferable = 0; 895 ksg->ksg_cpus = cg->cg_count; 896 ksg->ksg_cpumask = cg->cg_mask; 897 LIST_INIT(&ksg->ksg_members); 898 /* 899 * Find all of the group members and add them. 900 */ 901 for (j = 0; j < MAXCPU; j++) { 902 if ((cg->cg_mask & (1 << j)) != 0) { 903 if (ksg->ksg_mask == 0) 904 ksg->ksg_mask = 1 << j; 905 kseq_cpu[j].ksq_transferable = 0; 906 kseq_cpu[j].ksq_group = ksg; 907 LIST_INSERT_HEAD(&ksg->ksg_members, 908 &kseq_cpu[j], ksq_siblings); 909 } 910 } 911 if (ksg->ksg_cpus > 1) 912 balance_groups = 1; 913 } 914 ksg_maxid = smp_topology->ct_count - 1; 915 } 916 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 917 callout_init(&kseq_group_callout, CALLOUT_MPSAFE); 918 sched_balance(NULL); 919 /* 920 * Stagger the group and global load balancer so they do not 921 * interfere with each other. 922 */ 923 if (balance_groups) 924 callout_reset(&kseq_group_callout, hz / 2, 925 sched_balance_groups, NULL); 926#else 927 kseq_setup(KSEQ_SELF()); 928#endif 929 mtx_lock_spin(&sched_lock); 930 kseq_load_add(KSEQ_SELF(), &kse0); 931 mtx_unlock_spin(&sched_lock); 932} 933 934/* 935 * Scale the scheduling priority according to the "interactivity" of this 936 * process. 937 */ 938static void 939sched_priority(struct ksegrp *kg) 940{ 941 int pri; 942 943 if (kg->kg_pri_class != PRI_TIMESHARE) 944 return; 945 946 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 947 pri += SCHED_PRI_BASE; 948 pri += kg->kg_nice; 949 950 if (pri > PRI_MAX_TIMESHARE) 951 pri = PRI_MAX_TIMESHARE; 952 else if (pri < PRI_MIN_TIMESHARE) 953 pri = PRI_MIN_TIMESHARE; 954 955 kg->kg_user_pri = pri; 956 957 return; 958} 959 960/* 961 * Calculate a time slice based on the properties of the kseg and the runq 962 * that we're on. This is only for PRI_TIMESHARE ksegrps. 963 */ 964static void 965sched_slice(struct kse *ke) 966{ 967 struct kseq *kseq; 968 struct ksegrp *kg; 969 970 kg = ke->ke_ksegrp; 971 kseq = KSEQ_CPU(ke->ke_cpu); 972 973 /* 974 * Rationale: 975 * KSEs in interactive ksegs get the minimum slice so that we 976 * quickly notice if it abuses its advantage. 977 * 978 * KSEs in non-interactive ksegs are assigned a slice that is 979 * based on the ksegs nice value relative to the least nice kseg 980 * on the run queue for this cpu. 981 * 982 * If the KSE is less nice than all others it gets the maximum 983 * slice and other KSEs will adjust their slice relative to 984 * this when they first expire. 985 * 986 * There is 20 point window that starts relative to the least 987 * nice kse on the run queue. Slice size is determined by 988 * the kse distance from the last nice ksegrp. 989 * 990 * If the kse is outside of the window it will get no slice 991 * and will be reevaluated each time it is selected on the 992 * run queue. The exception to this is nice 0 ksegs when 993 * a nice -20 is running. They are always granted a minimum 994 * slice. 995 */ 996 if (!SCHED_INTERACTIVE(kg)) { 997 int nice; 998 999 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 1000 if (kseq->ksq_load_timeshare == 0 || 1001 kg->kg_nice < kseq->ksq_nicemin) 1002 ke->ke_slice = SCHED_SLICE_MAX; 1003 else if (nice <= SCHED_SLICE_NTHRESH) 1004 ke->ke_slice = SCHED_SLICE_NICE(nice); 1005 else if (kg->kg_nice == 0) 1006 ke->ke_slice = SCHED_SLICE_MIN; 1007 else 1008 ke->ke_slice = 0; 1009 } else 1010 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1011 1012 CTR6(KTR_ULE, 1013 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1014 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 1015 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1016 1017 return; 1018} 1019 1020/* 1021 * This routine enforces a maximum limit on the amount of scheduling history 1022 * kept. It is called after either the slptime or runtime is adjusted. 1023 * This routine will not operate correctly when slp or run times have been 1024 * adjusted to more than double their maximum. 1025 */ 1026static void 1027sched_interact_update(struct ksegrp *kg) 1028{ 1029 int sum; 1030 1031 sum = kg->kg_runtime + kg->kg_slptime; 1032 if (sum < SCHED_SLP_RUN_MAX) 1033 return; 1034 /* 1035 * If we have exceeded by more than 1/5th then the algorithm below 1036 * will not bring us back into range. Dividing by two here forces 1037 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1038 */ 1039 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 1040 kg->kg_runtime /= 2; 1041 kg->kg_slptime /= 2; 1042 return; 1043 } 1044 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1045 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1046} 1047 1048static void 1049sched_interact_fork(struct ksegrp *kg) 1050{ 1051 int ratio; 1052 int sum; 1053 1054 sum = kg->kg_runtime + kg->kg_slptime; 1055 if (sum > SCHED_SLP_RUN_FORK) { 1056 ratio = sum / SCHED_SLP_RUN_FORK; 1057 kg->kg_runtime /= ratio; 1058 kg->kg_slptime /= ratio; 1059 } 1060} 1061 1062static int 1063sched_interact_score(struct ksegrp *kg) 1064{ 1065 int div; 1066 1067 if (kg->kg_runtime > kg->kg_slptime) { 1068 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1069 return (SCHED_INTERACT_HALF + 1070 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1071 } if (kg->kg_slptime > kg->kg_runtime) { 1072 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1073 return (kg->kg_runtime / div); 1074 } 1075 1076 /* 1077 * This can happen if slptime and runtime are 0. 1078 */ 1079 return (0); 1080 1081} 1082 1083/* 1084 * This is only somewhat accurate since given many processes of the same 1085 * priority they will switch when their slices run out, which will be 1086 * at most SCHED_SLICE_MAX. 1087 */ 1088int 1089sched_rr_interval(void) 1090{ 1091 return (SCHED_SLICE_MAX); 1092} 1093 1094static void 1095sched_pctcpu_update(struct kse *ke) 1096{ 1097 /* 1098 * Adjust counters and watermark for pctcpu calc. 1099 */ 1100 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1101 /* 1102 * Shift the tick count out so that the divide doesn't 1103 * round away our results. 1104 */ 1105 ke->ke_ticks <<= 10; 1106 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1107 SCHED_CPU_TICKS; 1108 ke->ke_ticks >>= 10; 1109 } else 1110 ke->ke_ticks = 0; 1111 ke->ke_ltick = ticks; 1112 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1113} 1114 1115void 1116sched_prio(struct thread *td, u_char prio) 1117{ 1118 struct kse *ke; 1119 1120 ke = td->td_kse; 1121 mtx_assert(&sched_lock, MA_OWNED); 1122 if (TD_ON_RUNQ(td)) { 1123 /* 1124 * If the priority has been elevated due to priority 1125 * propagation, we may have to move ourselves to a new 1126 * queue. We still call adjustrunqueue below in case kse 1127 * needs to fix things up. 1128 */ 1129 if (prio < td->td_priority && ke && 1130 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1131 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1132 runq_remove(ke->ke_runq, ke); 1133 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1134 runq_add(ke->ke_runq, ke); 1135 } 1136 adjustrunqueue(td, prio); 1137 } else 1138 td->td_priority = prio; 1139} 1140 1141void 1142sched_switch(struct thread *td) 1143{ 1144 struct thread *newtd; 1145 struct kse *ke; 1146 1147 mtx_assert(&sched_lock, MA_OWNED); 1148 1149 ke = td->td_kse; 1150 1151 td->td_last_kse = ke; 1152 td->td_lastcpu = td->td_oncpu; 1153 td->td_oncpu = NOCPU; 1154 td->td_flags &= ~TDF_NEEDRESCHED; 1155 1156 /* 1157 * If the KSE has been assigned it may be in the process of switching 1158 * to the new cpu. This is the case in sched_bind(). 1159 */ 1160 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1161 if (TD_IS_RUNNING(td)) { 1162 if (td->td_proc->p_flag & P_SA) { 1163 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1164 setrunqueue(td); 1165 } else 1166 kseq_runq_add(KSEQ_SELF(), ke); 1167 } else { 1168 if (ke->ke_runq) 1169 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1170 /* 1171 * We will not be on the run queue. So we must be 1172 * sleeping or similar. 1173 */ 1174 if (td->td_proc->p_flag & P_SA) 1175 kse_reassign(ke); 1176 } 1177 } 1178 newtd = choosethread(); 1179 if (td != newtd) 1180 cpu_switch(td, newtd); 1181 sched_lock.mtx_lock = (uintptr_t)td; 1182 1183 td->td_oncpu = PCPU_GET(cpuid); 1184} 1185 1186void 1187sched_nice(struct ksegrp *kg, int nice) 1188{ 1189 struct kse *ke; 1190 struct thread *td; 1191 struct kseq *kseq; 1192 1193 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1194 mtx_assert(&sched_lock, MA_OWNED); 1195 /* 1196 * We need to adjust the nice counts for running KSEs. 1197 */ 1198 if (kg->kg_pri_class == PRI_TIMESHARE) 1199 FOREACH_KSE_IN_GROUP(kg, ke) { 1200 if (ke->ke_runq == NULL) 1201 continue; 1202 kseq = KSEQ_CPU(ke->ke_cpu); 1203 kseq_nice_rem(kseq, kg->kg_nice); 1204 kseq_nice_add(kseq, nice); 1205 } 1206 kg->kg_nice = nice; 1207 sched_priority(kg); 1208 FOREACH_THREAD_IN_GROUP(kg, td) 1209 td->td_flags |= TDF_NEEDRESCHED; 1210} 1211 1212void 1213sched_sleep(struct thread *td, u_char prio) 1214{ 1215 mtx_assert(&sched_lock, MA_OWNED); 1216 1217 td->td_slptime = ticks; 1218 td->td_priority = prio; 1219 1220 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1221 td->td_kse, td->td_slptime); 1222} 1223 1224void 1225sched_wakeup(struct thread *td) 1226{ 1227 mtx_assert(&sched_lock, MA_OWNED); 1228 1229 /* 1230 * Let the kseg know how long we slept for. This is because process 1231 * interactivity behavior is modeled in the kseg. 1232 */ 1233 if (td->td_slptime) { 1234 struct ksegrp *kg; 1235 int hzticks; 1236 1237 kg = td->td_ksegrp; 1238 hzticks = (ticks - td->td_slptime) << 10; 1239 if (hzticks >= SCHED_SLP_RUN_MAX) { 1240 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1241 kg->kg_runtime = 1; 1242 } else { 1243 kg->kg_slptime += hzticks; 1244 sched_interact_update(kg); 1245 } 1246 sched_priority(kg); 1247 if (td->td_kse) 1248 sched_slice(td->td_kse); 1249 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1250 td->td_kse, hzticks); 1251 td->td_slptime = 0; 1252 } 1253 setrunqueue(td); 1254} 1255 1256/* 1257 * Penalize the parent for creating a new child and initialize the child's 1258 * priority. 1259 */ 1260void 1261sched_fork(struct proc *p, struct proc *p1) 1262{ 1263 1264 mtx_assert(&sched_lock, MA_OWNED); 1265 1266 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1267 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1268 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1269} 1270 1271void 1272sched_fork_kse(struct kse *ke, struct kse *child) 1273{ 1274 1275 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1276 child->ke_cpu = ke->ke_cpu; 1277 child->ke_runq = NULL; 1278 1279 /* Grab our parents cpu estimation information. */ 1280 child->ke_ticks = ke->ke_ticks; 1281 child->ke_ltick = ke->ke_ltick; 1282 child->ke_ftick = ke->ke_ftick; 1283} 1284 1285void 1286sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1287{ 1288 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1289 1290 child->kg_slptime = kg->kg_slptime; 1291 child->kg_runtime = kg->kg_runtime; 1292 child->kg_user_pri = kg->kg_user_pri; 1293 child->kg_nice = kg->kg_nice; 1294 sched_interact_fork(child); 1295 kg->kg_runtime += tickincr << 10; 1296 sched_interact_update(kg); 1297 1298 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1299 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1300 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1301} 1302 1303void 1304sched_fork_thread(struct thread *td, struct thread *child) 1305{ 1306} 1307 1308void 1309sched_class(struct ksegrp *kg, int class) 1310{ 1311 struct kseq *kseq; 1312 struct kse *ke; 1313 int nclass; 1314 int oclass; 1315 1316 mtx_assert(&sched_lock, MA_OWNED); 1317 if (kg->kg_pri_class == class) 1318 return; 1319 1320 nclass = PRI_BASE(class); 1321 oclass = PRI_BASE(kg->kg_pri_class); 1322 FOREACH_KSE_IN_GROUP(kg, ke) { 1323 if (ke->ke_state != KES_ONRUNQ && 1324 ke->ke_state != KES_THREAD) 1325 continue; 1326 kseq = KSEQ_CPU(ke->ke_cpu); 1327 1328#ifdef SMP 1329 /* 1330 * On SMP if we're on the RUNQ we must adjust the transferable 1331 * count because could be changing to or from an interrupt 1332 * class. 1333 */ 1334 if (ke->ke_state == KES_ONRUNQ) { 1335 if (KSE_CAN_MIGRATE(ke, oclass)) { 1336 kseq->ksq_transferable--; 1337 kseq->ksq_group->ksg_transferable--; 1338 } 1339 if (KSE_CAN_MIGRATE(ke, nclass)) { 1340 kseq->ksq_transferable++; 1341 kseq->ksq_group->ksg_transferable++; 1342 } 1343 } 1344#endif 1345 if (oclass == PRI_TIMESHARE) { 1346 kseq->ksq_load_timeshare--; 1347 kseq_nice_rem(kseq, kg->kg_nice); 1348 } 1349 if (nclass == PRI_TIMESHARE) { 1350 kseq->ksq_load_timeshare++; 1351 kseq_nice_add(kseq, kg->kg_nice); 1352 } 1353 } 1354 1355 kg->kg_pri_class = class; 1356} 1357 1358/* 1359 * Return some of the child's priority and interactivity to the parent. 1360 */ 1361void 1362sched_exit(struct proc *p, struct proc *child) 1363{ 1364 mtx_assert(&sched_lock, MA_OWNED); 1365 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1366 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1367} 1368 1369void 1370sched_exit_kse(struct kse *ke, struct kse *child) 1371{ 1372 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child); 1373} 1374 1375void 1376sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1377{ 1378 /* kg->kg_slptime += child->kg_slptime; */ 1379 kg->kg_runtime += child->kg_runtime; 1380 sched_interact_update(kg); 1381} 1382 1383void 1384sched_exit_thread(struct thread *td, struct thread *child) 1385{ 1386} 1387 1388void 1389sched_clock(struct thread *td) 1390{ 1391 struct kseq *kseq; 1392 struct ksegrp *kg; 1393 struct kse *ke; 1394 1395 /* 1396 * sched_setup() apparently happens prior to stathz being set. We 1397 * need to resolve the timers earlier in the boot so we can avoid 1398 * calculating this here. 1399 */ 1400 if (realstathz == 0) { 1401 realstathz = stathz ? stathz : hz; 1402 tickincr = hz / realstathz; 1403 /* 1404 * XXX This does not work for values of stathz that are much 1405 * larger than hz. 1406 */ 1407 if (tickincr == 0) 1408 tickincr = 1; 1409 } 1410 1411 ke = td->td_kse; 1412 kg = ke->ke_ksegrp; 1413 1414 mtx_assert(&sched_lock, MA_OWNED); 1415 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1416 1417 /* Adjust ticks for pctcpu */ 1418 ke->ke_ticks++; 1419 ke->ke_ltick = ticks; 1420 1421 /* Go up to one second beyond our max and then trim back down */ 1422 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1423 sched_pctcpu_update(ke); 1424 1425 if (td->td_flags & TDF_IDLETD) 1426 return; 1427 1428 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1429 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1430 /* 1431 * We only do slicing code for TIMESHARE ksegrps. 1432 */ 1433 if (kg->kg_pri_class != PRI_TIMESHARE) 1434 return; 1435 /* 1436 * We used a tick charge it to the ksegrp so that we can compute our 1437 * interactivity. 1438 */ 1439 kg->kg_runtime += tickincr << 10; 1440 sched_interact_update(kg); 1441 1442 /* 1443 * We used up one time slice. 1444 */ 1445 if (--ke->ke_slice > 0) 1446 return; 1447 /* 1448 * We're out of time, recompute priorities and requeue. 1449 */ 1450 kseq = KSEQ_SELF(); 1451 kseq_load_rem(kseq, ke); 1452 sched_priority(kg); 1453 sched_slice(ke); 1454 if (SCHED_CURR(kg, ke)) 1455 ke->ke_runq = kseq->ksq_curr; 1456 else 1457 ke->ke_runq = kseq->ksq_next; 1458 kseq_load_add(kseq, ke); 1459 td->td_flags |= TDF_NEEDRESCHED; 1460} 1461 1462int 1463sched_runnable(void) 1464{ 1465 struct kseq *kseq; 1466 int load; 1467 1468 load = 1; 1469 1470 kseq = KSEQ_SELF(); 1471#ifdef SMP 1472 if (kseq->ksq_assigned) { 1473 mtx_lock_spin(&sched_lock); 1474 kseq_assign(kseq); 1475 mtx_unlock_spin(&sched_lock); 1476 } 1477#endif 1478 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1479 if (kseq->ksq_load > 0) 1480 goto out; 1481 } else 1482 if (kseq->ksq_load - 1 > 0) 1483 goto out; 1484 load = 0; 1485out: 1486 return (load); 1487} 1488 1489void 1490sched_userret(struct thread *td) 1491{ 1492 struct ksegrp *kg; 1493 1494 kg = td->td_ksegrp; 1495 1496 if (td->td_priority != kg->kg_user_pri) { 1497 mtx_lock_spin(&sched_lock); 1498 td->td_priority = kg->kg_user_pri; 1499 mtx_unlock_spin(&sched_lock); 1500 } 1501} 1502 1503struct kse * 1504sched_choose(void) 1505{ 1506 struct kseq *kseq; 1507 struct kse *ke; 1508 1509 mtx_assert(&sched_lock, MA_OWNED); 1510 kseq = KSEQ_SELF(); 1511#ifdef SMP 1512restart: 1513 if (kseq->ksq_assigned) 1514 kseq_assign(kseq); 1515#endif 1516 ke = kseq_choose(kseq); 1517 if (ke) { 1518#ifdef SMP 1519 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1520 if (kseq_idled(kseq) == 0) 1521 goto restart; 1522#endif 1523 kseq_runq_rem(kseq, ke); 1524 ke->ke_state = KES_THREAD; 1525 1526 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1527 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1528 ke, ke->ke_runq, ke->ke_slice, 1529 ke->ke_thread->td_priority); 1530 } 1531 return (ke); 1532 } 1533#ifdef SMP 1534 if (kseq_idled(kseq) == 0) 1535 goto restart; 1536#endif 1537 return (NULL); 1538} 1539 1540void 1541sched_add(struct thread *td) 1542{ 1543 struct kseq *kseq; 1544 struct ksegrp *kg; 1545 struct kse *ke; 1546 int class; 1547 1548 mtx_assert(&sched_lock, MA_OWNED); 1549 ke = td->td_kse; 1550 kg = td->td_ksegrp; 1551 if (ke->ke_flags & KEF_ASSIGNED) 1552 return; 1553 kseq = KSEQ_SELF(); 1554 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1555 KASSERT((ke->ke_thread->td_kse != NULL), 1556 ("sched_add: No KSE on thread")); 1557 KASSERT(ke->ke_state != KES_ONRUNQ, 1558 ("sched_add: kse %p (%s) already in run queue", ke, 1559 ke->ke_proc->p_comm)); 1560 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1561 ("sched_add: process swapped out")); 1562 KASSERT(ke->ke_runq == NULL, 1563 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1564 1565 class = PRI_BASE(kg->kg_pri_class); 1566 switch (class) { 1567 case PRI_ITHD: 1568 case PRI_REALTIME: 1569 ke->ke_runq = kseq->ksq_curr; 1570 ke->ke_slice = SCHED_SLICE_MAX; 1571 ke->ke_cpu = PCPU_GET(cpuid); 1572 break; 1573 case PRI_TIMESHARE: 1574 if (SCHED_CURR(kg, ke)) 1575 ke->ke_runq = kseq->ksq_curr; 1576 else 1577 ke->ke_runq = kseq->ksq_next; 1578 break; 1579 case PRI_IDLE: 1580 /* 1581 * This is for priority prop. 1582 */ 1583 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1584 ke->ke_runq = kseq->ksq_curr; 1585 else 1586 ke->ke_runq = &kseq->ksq_idle; 1587 ke->ke_slice = SCHED_SLICE_MIN; 1588 break; 1589 default: 1590 panic("Unknown pri class."); 1591 break; 1592 } 1593#ifdef SMP 1594 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1595 ke->ke_runq = NULL; 1596 kseq_notify(ke, ke->ke_cpu); 1597 return; 1598 } 1599 /* 1600 * If we had been idle, clear our bit in the group and potentially 1601 * the global bitmap. If not, see if we should transfer this thread. 1602 */ 1603 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1604 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1605 /* 1606 * Check to see if our group is unidling, and if so, remove it 1607 * from the global idle mask. 1608 */ 1609 if (kseq->ksq_group->ksg_idlemask == 1610 kseq->ksq_group->ksg_cpumask) 1611 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1612 /* 1613 * Now remove ourselves from the group specific idle mask. 1614 */ 1615 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1616 } else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class)) 1617 if (kseq_transfer(kseq, ke, class)) 1618 return; 1619#endif 1620 if (td->td_priority < curthread->td_priority) 1621 curthread->td_flags |= TDF_NEEDRESCHED; 1622 1623 ke->ke_ksegrp->kg_runq_kses++; 1624 ke->ke_state = KES_ONRUNQ; 1625 1626 kseq_runq_add(kseq, ke); 1627 kseq_load_add(kseq, ke); 1628} 1629 1630void 1631sched_rem(struct thread *td) 1632{ 1633 struct kseq *kseq; 1634 struct kse *ke; 1635 1636 ke = td->td_kse; 1637 /* 1638 * It is safe to just return here because sched_rem() is only ever 1639 * used in places where we're immediately going to add the 1640 * kse back on again. In that case it'll be added with the correct 1641 * thread and priority when the caller drops the sched_lock. 1642 */ 1643 if (ke->ke_flags & KEF_ASSIGNED) 1644 return; 1645 mtx_assert(&sched_lock, MA_OWNED); 1646 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1647 1648 ke->ke_state = KES_THREAD; 1649 ke->ke_ksegrp->kg_runq_kses--; 1650 kseq = KSEQ_CPU(ke->ke_cpu); 1651 kseq_runq_rem(kseq, ke); 1652 kseq_load_rem(kseq, ke); 1653} 1654 1655fixpt_t 1656sched_pctcpu(struct thread *td) 1657{ 1658 fixpt_t pctcpu; 1659 struct kse *ke; 1660 1661 pctcpu = 0; 1662 ke = td->td_kse; 1663 if (ke == NULL) 1664 return (0); 1665 1666 mtx_lock_spin(&sched_lock); 1667 if (ke->ke_ticks) { 1668 int rtick; 1669 1670 /* 1671 * Don't update more frequently than twice a second. Allowing 1672 * this causes the cpu usage to decay away too quickly due to 1673 * rounding errors. 1674 */ 1675 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1676 ke->ke_ltick < (ticks - (hz / 2))) 1677 sched_pctcpu_update(ke); 1678 /* How many rtick per second ? */ 1679 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1680 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1681 } 1682 1683 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1684 mtx_unlock_spin(&sched_lock); 1685 1686 return (pctcpu); 1687} 1688 1689void 1690sched_bind(struct thread *td, int cpu) 1691{ 1692 struct kse *ke; 1693 1694 mtx_assert(&sched_lock, MA_OWNED); 1695 ke = td->td_kse; 1696 ke->ke_flags |= KEF_BOUND; 1697#ifdef SMP 1698 if (PCPU_GET(cpuid) == cpu) 1699 return; 1700 /* sched_rem without the runq_remove */ 1701 ke->ke_state = KES_THREAD; 1702 ke->ke_ksegrp->kg_runq_kses--; 1703 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1704 kseq_notify(ke, cpu); 1705 /* When we return from mi_switch we'll be on the correct cpu. */ 1706 td->td_proc->p_stats->p_ru.ru_nvcsw++; 1707 mi_switch(); 1708#endif 1709} 1710 1711void 1712sched_unbind(struct thread *td) 1713{ 1714 mtx_assert(&sched_lock, MA_OWNED); 1715 td->td_kse->ke_flags &= ~KEF_BOUND; 1716} 1717 1718int 1719sched_sizeof_kse(void) 1720{ 1721 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1722} 1723 1724int 1725sched_sizeof_ksegrp(void) 1726{ 1727 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1728} 1729 1730int 1731sched_sizeof_proc(void) 1732{ 1733 return (sizeof(struct proc)); 1734} 1735 1736int 1737sched_sizeof_thread(void) 1738{ 1739 return (sizeof(struct thread) + sizeof(struct td_sched)); 1740} 1741