sched_ule.c revision 124944
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 124944 2004-01-25 03:54:52Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/resourcevar.h> 39#include <sys/sched.h> 40#include <sys/smp.h> 41#include <sys/sx.h> 42#include <sys/sysctl.h> 43#include <sys/sysproto.h> 44#include <sys/vmmeter.h> 45#ifdef DDB 46#include <ddb/ddb.h> 47#endif 48#ifdef KTRACE 49#include <sys/uio.h> 50#include <sys/ktrace.h> 51#endif 52 53#include <machine/cpu.h> 54#include <machine/smp.h> 55 56#define KTR_ULE KTR_NFS 57 58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59/* XXX This is bogus compatability crap for ps */ 60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62 63static void sched_setup(void *dummy); 64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65 66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 67 68static int sched_strict; 69SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 70 71static int slice_min = 1; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73 74static int slice_max = 10; 75SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76 77int realstathz; 78int tickincr = 1; 79 80#ifdef SMP 81/* Callouts to handle load balancing SMP systems. */ 82static struct callout kseq_lb_callout; 83static struct callout kseq_group_callout; 84#endif 85 86/* 87 * These datastructures are allocated within their parent datastructure but 88 * are scheduler specific. 89 */ 90 91struct ke_sched { 92 int ske_slice; 93 struct runq *ske_runq; 94 /* The following variables are only used for pctcpu calculation */ 95 int ske_ltick; /* Last tick that we were running on */ 96 int ske_ftick; /* First tick that we were running on */ 97 int ske_ticks; /* Tick count */ 98 /* CPU that we have affinity for. */ 99 u_char ske_cpu; 100}; 101#define ke_slice ke_sched->ske_slice 102#define ke_runq ke_sched->ske_runq 103#define ke_ltick ke_sched->ske_ltick 104#define ke_ftick ke_sched->ske_ftick 105#define ke_ticks ke_sched->ske_ticks 106#define ke_cpu ke_sched->ske_cpu 107#define ke_assign ke_procq.tqe_next 108 109#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 110#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 111 112struct kg_sched { 113 int skg_slptime; /* Number of ticks we vol. slept */ 114 int skg_runtime; /* Number of ticks we were running */ 115}; 116#define kg_slptime kg_sched->skg_slptime 117#define kg_runtime kg_sched->skg_runtime 118 119struct td_sched { 120 int std_slptime; 121}; 122#define td_slptime td_sched->std_slptime 123 124struct td_sched td_sched; 125struct ke_sched ke_sched; 126struct kg_sched kg_sched; 127 128struct ke_sched *kse0_sched = &ke_sched; 129struct kg_sched *ksegrp0_sched = &kg_sched; 130struct p_sched *proc0_sched = NULL; 131struct td_sched *thread0_sched = &td_sched; 132 133/* 134 * The priority is primarily determined by the interactivity score. Thus, we 135 * give lower(better) priorities to kse groups that use less CPU. The nice 136 * value is then directly added to this to allow nice to have some effect 137 * on latency. 138 * 139 * PRI_RANGE: Total priority range for timeshare threads. 140 * PRI_NRESV: Number of nice values. 141 * PRI_BASE: The start of the dynamic range. 142 */ 143#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 144#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 145#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 146#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 147#define SCHED_PRI_INTERACT(score) \ 148 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 149 150/* 151 * These determine the interactivity of a process. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * These parameters and macros determine the size of the time slice that is 167 * granted to each thread. 168 * 169 * SLICE_MIN: Minimum time slice granted, in units of ticks. 170 * SLICE_MAX: Maximum time slice granted. 171 * SLICE_RANGE: Range of available time slices scaled by hz. 172 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 173 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 174 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 175 */ 176#define SCHED_SLICE_MIN (slice_min) 177#define SCHED_SLICE_MAX (slice_max) 178#define SCHED_SLICE_INTERACTIVE (slice_min * 4) 179#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 180#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 181#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 182#define SCHED_SLICE_NICE(nice) \ 183 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 184 185/* 186 * This macro determines whether or not the kse belongs on the current or 187 * next run queue. 188 */ 189#define SCHED_INTERACTIVE(kg) \ 190 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 191#define SCHED_CURR(kg, ke) \ 192 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 193 SCHED_INTERACTIVE(kg)) 194 195/* 196 * Cpu percentage computation macros and defines. 197 * 198 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 199 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 200 */ 201 202#define SCHED_CPU_TIME 10 203#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 204 205/* 206 * kseq - per processor runqs and statistics. 207 */ 208struct kseq { 209 struct runq ksq_idle; /* Queue of IDLE threads. */ 210 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 211 struct runq *ksq_next; /* Next timeshare queue. */ 212 struct runq *ksq_curr; /* Current queue. */ 213 int ksq_load_timeshare; /* Load for timeshare. */ 214 int ksq_load; /* Aggregate load. */ 215 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 216 short ksq_nicemin; /* Least nice. */ 217#ifdef SMP 218 int ksq_transferable; 219 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 220 struct kseq_group *ksq_group; /* Our processor group. */ 221 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 222#endif 223}; 224 225#ifdef SMP 226/* 227 * kseq groups are groups of processors which can cheaply share threads. When 228 * one processor in the group goes idle it will check the runqs of the other 229 * processors in its group prior to halting and waiting for an interrupt. 230 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 231 * In a numa environment we'd want an idle bitmap per group and a two tiered 232 * load balancer. 233 */ 234struct kseq_group { 235 int ksg_cpus; /* Count of CPUs in this kseq group. */ 236 int ksg_cpumask; /* Mask of cpus in this group. */ 237 int ksg_idlemask; /* Idle cpus in this group. */ 238 int ksg_mask; /* Bit mask for first cpu. */ 239 int ksg_load; /* Total load of this group. */ 240 int ksg_transferable; /* Transferable load of this group. */ 241 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 242}; 243#endif 244 245/* 246 * One kse queue per processor. 247 */ 248#ifdef SMP 249static int kseq_idle; 250static int ksg_maxid; 251static struct kseq kseq_cpu[MAXCPU]; 252static struct kseq_group kseq_groups[MAXCPU]; 253#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 254#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 255#define KSEQ_ID(x) ((x) - kseq_cpu) 256#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 257#else /* !SMP */ 258static struct kseq kseq_cpu; 259#define KSEQ_SELF() (&kseq_cpu) 260#define KSEQ_CPU(x) (&kseq_cpu) 261#endif 262 263static void sched_slice(struct kse *ke); 264static void sched_priority(struct ksegrp *kg); 265static int sched_interact_score(struct ksegrp *kg); 266static void sched_interact_update(struct ksegrp *kg); 267static void sched_interact_fork(struct ksegrp *kg); 268static void sched_pctcpu_update(struct kse *ke); 269 270/* Operations on per processor queues */ 271static struct kse * kseq_choose(struct kseq *kseq); 272static void kseq_setup(struct kseq *kseq); 273static void kseq_load_add(struct kseq *kseq, struct kse *ke); 274static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 275static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 276static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 277static void kseq_nice_add(struct kseq *kseq, int nice); 278static void kseq_nice_rem(struct kseq *kseq, int nice); 279void kseq_print(int cpu); 280#ifdef SMP 281static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 282static struct kse *runq_steal(struct runq *rq); 283static void sched_balance(void *arg); 284static void sched_balance_group(struct kseq_group *ksg); 285static void sched_balance_pair(struct kseq *high, struct kseq *low); 286static void kseq_move(struct kseq *from, int cpu); 287static int kseq_idled(struct kseq *kseq); 288static void kseq_notify(struct kse *ke, int cpu); 289static void kseq_assign(struct kseq *); 290static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 291/* 292 * On P4 Xeons the round-robin interrupt delivery is broken. As a result of 293 * this, we can't pin interrupts to the cpu that they were delivered to, 294 * otherwise all ithreads only run on CPU 0. 295 */ 296#ifdef __i386__ 297#define KSE_CAN_MIGRATE(ke, class) \ 298 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 299#else /* !__i386__ */ 300#define KSE_CAN_MIGRATE(ke, class) \ 301 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 302 ((ke)->ke_flags & KEF_BOUND) == 0) 303#endif /* !__i386__ */ 304#endif 305 306void 307kseq_print(int cpu) 308{ 309 struct kseq *kseq; 310 int i; 311 312 kseq = KSEQ_CPU(cpu); 313 314 printf("kseq:\n"); 315 printf("\tload: %d\n", kseq->ksq_load); 316 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 317#ifdef SMP 318 printf("\tload transferable: %d\n", kseq->ksq_transferable); 319#endif 320 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 321 printf("\tnice counts:\n"); 322 for (i = 0; i < SCHED_PRI_NRESV; i++) 323 if (kseq->ksq_nice[i]) 324 printf("\t\t%d = %d\n", 325 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 326} 327 328static __inline void 329kseq_runq_add(struct kseq *kseq, struct kse *ke) 330{ 331#ifdef SMP 332 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 333 kseq->ksq_transferable++; 334 kseq->ksq_group->ksg_transferable++; 335 } 336#endif 337 runq_add(ke->ke_runq, ke); 338} 339 340static __inline void 341kseq_runq_rem(struct kseq *kseq, struct kse *ke) 342{ 343#ifdef SMP 344 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 345 kseq->ksq_transferable--; 346 kseq->ksq_group->ksg_transferable--; 347 } 348#endif 349 runq_remove(ke->ke_runq, ke); 350} 351 352static void 353kseq_load_add(struct kseq *kseq, struct kse *ke) 354{ 355 int class; 356 mtx_assert(&sched_lock, MA_OWNED); 357 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 358 if (class == PRI_TIMESHARE) 359 kseq->ksq_load_timeshare++; 360 kseq->ksq_load++; 361#ifdef SMP 362 if (class != PRI_ITHD) 363 kseq->ksq_group->ksg_load++; 364#endif 365 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 366 CTR6(KTR_ULE, 367 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 368 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 369 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 370 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 371 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 372} 373 374static void 375kseq_load_rem(struct kseq *kseq, struct kse *ke) 376{ 377 int class; 378 mtx_assert(&sched_lock, MA_OWNED); 379 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 380 if (class == PRI_TIMESHARE) 381 kseq->ksq_load_timeshare--; 382#ifdef SMP 383 if (class != PRI_ITHD) 384 kseq->ksq_group->ksg_load--; 385#endif 386 kseq->ksq_load--; 387 ke->ke_runq = NULL; 388 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 389 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 390} 391 392static void 393kseq_nice_add(struct kseq *kseq, int nice) 394{ 395 mtx_assert(&sched_lock, MA_OWNED); 396 /* Normalize to zero. */ 397 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 398 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 399 kseq->ksq_nicemin = nice; 400} 401 402static void 403kseq_nice_rem(struct kseq *kseq, int nice) 404{ 405 int n; 406 407 mtx_assert(&sched_lock, MA_OWNED); 408 /* Normalize to zero. */ 409 n = nice + SCHED_PRI_NHALF; 410 kseq->ksq_nice[n]--; 411 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 412 413 /* 414 * If this wasn't the smallest nice value or there are more in 415 * this bucket we can just return. Otherwise we have to recalculate 416 * the smallest nice. 417 */ 418 if (nice != kseq->ksq_nicemin || 419 kseq->ksq_nice[n] != 0 || 420 kseq->ksq_load_timeshare == 0) 421 return; 422 423 for (; n < SCHED_PRI_NRESV; n++) 424 if (kseq->ksq_nice[n]) { 425 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 426 return; 427 } 428} 429 430#ifdef SMP 431/* 432 * sched_balance is a simple CPU load balancing algorithm. It operates by 433 * finding the least loaded and most loaded cpu and equalizing their load 434 * by migrating some processes. 435 * 436 * Dealing only with two CPUs at a time has two advantages. Firstly, most 437 * installations will only have 2 cpus. Secondly, load balancing too much at 438 * once can have an unpleasant effect on the system. The scheduler rarely has 439 * enough information to make perfect decisions. So this algorithm chooses 440 * algorithm simplicity and more gradual effects on load in larger systems. 441 * 442 * It could be improved by considering the priorities and slices assigned to 443 * each task prior to balancing them. There are many pathological cases with 444 * any approach and so the semi random algorithm below may work as well as any. 445 * 446 */ 447static void 448sched_balance(void *arg) 449{ 450 struct kseq_group *high; 451 struct kseq_group *low; 452 struct kseq_group *ksg; 453 int timo; 454 int cnt; 455 int i; 456 457 mtx_lock_spin(&sched_lock); 458 if (smp_started == 0) 459 goto out; 460 low = high = NULL; 461 i = random() % (ksg_maxid + 1); 462 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 463 ksg = KSEQ_GROUP(i); 464 /* 465 * Find the CPU with the highest load that has some 466 * threads to transfer. 467 */ 468 if ((high == NULL || ksg->ksg_load > high->ksg_load) 469 && ksg->ksg_transferable) 470 high = ksg; 471 if (low == NULL || ksg->ksg_load < low->ksg_load) 472 low = ksg; 473 if (++i > ksg_maxid) 474 i = 0; 475 } 476 if (low != NULL && high != NULL && high != low) 477 sched_balance_pair(LIST_FIRST(&high->ksg_members), 478 LIST_FIRST(&low->ksg_members)); 479out: 480 mtx_unlock_spin(&sched_lock); 481 timo = random() % (hz * 2); 482 callout_reset(&kseq_lb_callout, timo, sched_balance, NULL); 483} 484 485static void 486sched_balance_groups(void *arg) 487{ 488 int timo; 489 int i; 490 491 mtx_lock_spin(&sched_lock); 492 if (smp_started) 493 for (i = 0; i <= ksg_maxid; i++) 494 sched_balance_group(KSEQ_GROUP(i)); 495 mtx_unlock_spin(&sched_lock); 496 timo = random() % (hz * 2); 497 callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL); 498} 499 500static void 501sched_balance_group(struct kseq_group *ksg) 502{ 503 struct kseq *kseq; 504 struct kseq *high; 505 struct kseq *low; 506 int load; 507 508 if (ksg->ksg_transferable == 0) 509 return; 510 low = NULL; 511 high = NULL; 512 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 513 load = kseq->ksq_load; 514 if (kseq == KSEQ_CPU(0)) 515 load--; 516 if (high == NULL || load > high->ksq_load) 517 high = kseq; 518 if (low == NULL || load < low->ksq_load) 519 low = kseq; 520 } 521 if (high != NULL && low != NULL && high != low) 522 sched_balance_pair(high, low); 523} 524 525static void 526sched_balance_pair(struct kseq *high, struct kseq *low) 527{ 528 int transferable; 529 int high_load; 530 int low_load; 531 int move; 532 int diff; 533 int i; 534 535 /* 536 * If we're transfering within a group we have to use this specific 537 * kseq's transferable count, otherwise we can steal from other members 538 * of the group. 539 */ 540 if (high->ksq_group == low->ksq_group) { 541 transferable = high->ksq_transferable; 542 high_load = high->ksq_load; 543 low_load = low->ksq_load; 544 /* 545 * XXX If we encounter cpu 0 we must remember to reduce it's 546 * load by 1 to reflect the swi that is running the callout. 547 * At some point we should really fix load balancing of the 548 * swi and then this wont matter. 549 */ 550 if (high == KSEQ_CPU(0)) 551 high_load--; 552 if (low == KSEQ_CPU(0)) 553 low_load--; 554 } else { 555 transferable = high->ksq_group->ksg_transferable; 556 high_load = high->ksq_group->ksg_load; 557 low_load = low->ksq_group->ksg_load; 558 } 559 if (transferable == 0) 560 return; 561 /* 562 * Determine what the imbalance is and then adjust that to how many 563 * kses we actually have to give up (transferable). 564 */ 565 diff = high_load - low_load; 566 move = diff / 2; 567 if (diff & 0x1) 568 move++; 569 move = min(move, transferable); 570 for (i = 0; i < move; i++) 571 kseq_move(high, KSEQ_ID(low)); 572 return; 573} 574 575static void 576kseq_move(struct kseq *from, int cpu) 577{ 578 struct kseq *kseq; 579 struct kseq *to; 580 struct kse *ke; 581 582 kseq = from; 583 to = KSEQ_CPU(cpu); 584 ke = kseq_steal(kseq, 1); 585 if (ke == NULL) { 586 struct kseq_group *ksg; 587 588 ksg = kseq->ksq_group; 589 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 590 if (kseq == from || kseq->ksq_transferable == 0) 591 continue; 592 ke = kseq_steal(kseq, 1); 593 break; 594 } 595 if (ke == NULL) 596 panic("kseq_move: No KSEs available with a " 597 "transferable count of %d\n", 598 ksg->ksg_transferable); 599 } 600 if (kseq == to) 601 return; 602 ke->ke_state = KES_THREAD; 603 kseq_runq_rem(kseq, ke); 604 kseq_load_rem(kseq, ke); 605 kseq_notify(ke, cpu); 606} 607 608static int 609kseq_idled(struct kseq *kseq) 610{ 611 struct kseq_group *ksg; 612 struct kseq *steal; 613 struct kse *ke; 614 615 ksg = kseq->ksq_group; 616 /* 617 * If we're in a cpu group, try and steal kses from another cpu in 618 * the group before idling. 619 */ 620 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 621 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 622 if (steal == kseq || steal->ksq_transferable == 0) 623 continue; 624 ke = kseq_steal(steal, 0); 625 if (ke == NULL) 626 continue; 627 ke->ke_state = KES_THREAD; 628 kseq_runq_rem(steal, ke); 629 kseq_load_rem(steal, ke); 630 ke->ke_cpu = PCPU_GET(cpuid); 631 sched_add(ke->ke_thread); 632 return (0); 633 } 634 } 635 /* 636 * We only set the idled bit when all of the cpus in the group are 637 * idle. Otherwise we could get into a situation where a KSE bounces 638 * back and forth between two idle cores on seperate physical CPUs. 639 */ 640 ksg->ksg_idlemask |= PCPU_GET(cpumask); 641 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 642 return (1); 643 atomic_set_int(&kseq_idle, ksg->ksg_mask); 644 return (1); 645} 646 647static void 648kseq_assign(struct kseq *kseq) 649{ 650 struct kse *nke; 651 struct kse *ke; 652 653 do { 654 (volatile struct kse *)ke = kseq->ksq_assigned; 655 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 656 for (; ke != NULL; ke = nke) { 657 nke = ke->ke_assign; 658 ke->ke_flags &= ~KEF_ASSIGNED; 659 sched_add(ke->ke_thread); 660 } 661} 662 663static void 664kseq_notify(struct kse *ke, int cpu) 665{ 666 struct kseq *kseq; 667 struct thread *td; 668 struct pcpu *pcpu; 669 670 ke->ke_cpu = cpu; 671 ke->ke_flags |= KEF_ASSIGNED; 672 673 kseq = KSEQ_CPU(cpu); 674 675 /* 676 * Place a KSE on another cpu's queue and force a resched. 677 */ 678 do { 679 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned; 680 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 681 pcpu = pcpu_find(cpu); 682 td = pcpu->pc_curthread; 683 if (ke->ke_thread->td_priority < td->td_priority || 684 td == pcpu->pc_idlethread) { 685 td->td_flags |= TDF_NEEDRESCHED; 686 ipi_selected(1 << cpu, IPI_AST); 687 } 688} 689 690static struct kse * 691runq_steal(struct runq *rq) 692{ 693 struct rqhead *rqh; 694 struct rqbits *rqb; 695 struct kse *ke; 696 int word; 697 int bit; 698 699 mtx_assert(&sched_lock, MA_OWNED); 700 rqb = &rq->rq_status; 701 for (word = 0; word < RQB_LEN; word++) { 702 if (rqb->rqb_bits[word] == 0) 703 continue; 704 for (bit = 0; bit < RQB_BPW; bit++) { 705 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 706 continue; 707 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 708 TAILQ_FOREACH(ke, rqh, ke_procq) { 709 if (KSE_CAN_MIGRATE(ke, 710 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 711 return (ke); 712 } 713 } 714 } 715 return (NULL); 716} 717 718static struct kse * 719kseq_steal(struct kseq *kseq, int stealidle) 720{ 721 struct kse *ke; 722 723 /* 724 * Steal from next first to try to get a non-interactive task that 725 * may not have run for a while. 726 */ 727 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 728 return (ke); 729 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 730 return (ke); 731 if (stealidle) 732 return (runq_steal(&kseq->ksq_idle)); 733 return (NULL); 734} 735 736int 737kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 738{ 739 struct kseq_group *ksg; 740 int cpu; 741 742 if (smp_started == 0) 743 return (0); 744 cpu = 0; 745 ksg = kseq->ksq_group; 746 747 /* 748 * If there are any idle groups, give them our extra load. The 749 * threshold at which we start to reassign kses has a large impact 750 * on the overall performance of the system. Tuned too high and 751 * some CPUs may idle. Too low and there will be excess migration 752 * and context swiches. 753 */ 754 if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) { 755 /* 756 * Multiple cpus could find this bit simultaneously 757 * but the race shouldn't be terrible. 758 */ 759 cpu = ffs(kseq_idle); 760 if (cpu) 761 atomic_clear_int(&kseq_idle, 1 << (cpu - 1)); 762 } 763 /* 764 * If another cpu in this group has idled, assign a thread over 765 * to them after checking to see if there are idled groups. 766 */ 767 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) { 768 cpu = ffs(ksg->ksg_idlemask); 769 if (cpu) 770 ksg->ksg_idlemask &= ~(1 << (cpu - 1)); 771 } 772 /* 773 * Now that we've found an idle CPU, migrate the thread. 774 */ 775 if (cpu) { 776 cpu--; 777 ke->ke_runq = NULL; 778 kseq_notify(ke, cpu); 779 return (1); 780 } 781 return (0); 782} 783 784#endif /* SMP */ 785 786/* 787 * Pick the highest priority task we have and return it. 788 */ 789 790static struct kse * 791kseq_choose(struct kseq *kseq) 792{ 793 struct kse *ke; 794 struct runq *swap; 795 796 mtx_assert(&sched_lock, MA_OWNED); 797 swap = NULL; 798 799 for (;;) { 800 ke = runq_choose(kseq->ksq_curr); 801 if (ke == NULL) { 802 /* 803 * We already swaped once and didn't get anywhere. 804 */ 805 if (swap) 806 break; 807 swap = kseq->ksq_curr; 808 kseq->ksq_curr = kseq->ksq_next; 809 kseq->ksq_next = swap; 810 continue; 811 } 812 /* 813 * If we encounter a slice of 0 the kse is in a 814 * TIMESHARE kse group and its nice was too far out 815 * of the range that receives slices. 816 */ 817 if (ke->ke_slice == 0) { 818 runq_remove(ke->ke_runq, ke); 819 sched_slice(ke); 820 ke->ke_runq = kseq->ksq_next; 821 runq_add(ke->ke_runq, ke); 822 continue; 823 } 824 return (ke); 825 } 826 827 return (runq_choose(&kseq->ksq_idle)); 828} 829 830static void 831kseq_setup(struct kseq *kseq) 832{ 833 runq_init(&kseq->ksq_timeshare[0]); 834 runq_init(&kseq->ksq_timeshare[1]); 835 runq_init(&kseq->ksq_idle); 836 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 837 kseq->ksq_next = &kseq->ksq_timeshare[1]; 838 kseq->ksq_load = 0; 839 kseq->ksq_load_timeshare = 0; 840} 841 842static void 843sched_setup(void *dummy) 844{ 845#ifdef SMP 846 int balance_groups; 847 int i; 848#endif 849 850 slice_min = (hz/100); /* 10ms */ 851 slice_max = (hz/7); /* ~140ms */ 852 853#ifdef SMP 854 balance_groups = 0; 855 /* 856 * Initialize the kseqs. 857 */ 858 for (i = 0; i < MAXCPU; i++) { 859 struct kseq *ksq; 860 861 ksq = &kseq_cpu[i]; 862 ksq->ksq_assigned = NULL; 863 kseq_setup(&kseq_cpu[i]); 864 } 865 if (smp_topology == NULL) { 866 struct kseq_group *ksg; 867 struct kseq *ksq; 868 869 for (i = 0; i < MAXCPU; i++) { 870 ksq = &kseq_cpu[i]; 871 ksg = &kseq_groups[i]; 872 /* 873 * Setup a kse group with one member. 874 */ 875 ksq->ksq_transferable = 0; 876 ksq->ksq_group = ksg; 877 ksg->ksg_cpus = 1; 878 ksg->ksg_idlemask = 0; 879 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 880 ksg->ksg_load = 0; 881 ksg->ksg_transferable = 0; 882 LIST_INIT(&ksg->ksg_members); 883 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 884 } 885 } else { 886 struct kseq_group *ksg; 887 struct cpu_group *cg; 888 int j; 889 890 for (i = 0; i < smp_topology->ct_count; i++) { 891 cg = &smp_topology->ct_group[i]; 892 ksg = &kseq_groups[i]; 893 /* 894 * Initialize the group. 895 */ 896 ksg->ksg_idlemask = 0; 897 ksg->ksg_load = 0; 898 ksg->ksg_transferable = 0; 899 ksg->ksg_cpus = cg->cg_count; 900 ksg->ksg_cpumask = cg->cg_mask; 901 LIST_INIT(&ksg->ksg_members); 902 /* 903 * Find all of the group members and add them. 904 */ 905 for (j = 0; j < MAXCPU; j++) { 906 if ((cg->cg_mask & (1 << j)) != 0) { 907 if (ksg->ksg_mask == 0) 908 ksg->ksg_mask = 1 << j; 909 kseq_cpu[j].ksq_transferable = 0; 910 kseq_cpu[j].ksq_group = ksg; 911 LIST_INSERT_HEAD(&ksg->ksg_members, 912 &kseq_cpu[j], ksq_siblings); 913 } 914 } 915 if (ksg->ksg_cpus > 1) 916 balance_groups = 1; 917 } 918 ksg_maxid = smp_topology->ct_count - 1; 919 } 920 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 921 callout_init(&kseq_group_callout, CALLOUT_MPSAFE); 922 sched_balance(NULL); 923 /* 924 * Stagger the group and global load balancer so they do not 925 * interfere with each other. 926 */ 927 if (balance_groups) 928 callout_reset(&kseq_group_callout, hz / 2, 929 sched_balance_groups, NULL); 930#else 931 kseq_setup(KSEQ_SELF()); 932#endif 933 mtx_lock_spin(&sched_lock); 934 kseq_load_add(KSEQ_SELF(), &kse0); 935 mtx_unlock_spin(&sched_lock); 936} 937 938/* 939 * Scale the scheduling priority according to the "interactivity" of this 940 * process. 941 */ 942static void 943sched_priority(struct ksegrp *kg) 944{ 945 int pri; 946 947 if (kg->kg_pri_class != PRI_TIMESHARE) 948 return; 949 950 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 951 pri += SCHED_PRI_BASE; 952 pri += kg->kg_nice; 953 954 if (pri > PRI_MAX_TIMESHARE) 955 pri = PRI_MAX_TIMESHARE; 956 else if (pri < PRI_MIN_TIMESHARE) 957 pri = PRI_MIN_TIMESHARE; 958 959 kg->kg_user_pri = pri; 960 961 return; 962} 963 964/* 965 * Calculate a time slice based on the properties of the kseg and the runq 966 * that we're on. This is only for PRI_TIMESHARE ksegrps. 967 */ 968static void 969sched_slice(struct kse *ke) 970{ 971 struct kseq *kseq; 972 struct ksegrp *kg; 973 974 kg = ke->ke_ksegrp; 975 kseq = KSEQ_CPU(ke->ke_cpu); 976 977 /* 978 * Rationale: 979 * KSEs in interactive ksegs get the minimum slice so that we 980 * quickly notice if it abuses its advantage. 981 * 982 * KSEs in non-interactive ksegs are assigned a slice that is 983 * based on the ksegs nice value relative to the least nice kseg 984 * on the run queue for this cpu. 985 * 986 * If the KSE is less nice than all others it gets the maximum 987 * slice and other KSEs will adjust their slice relative to 988 * this when they first expire. 989 * 990 * There is 20 point window that starts relative to the least 991 * nice kse on the run queue. Slice size is determined by 992 * the kse distance from the last nice ksegrp. 993 * 994 * If the kse is outside of the window it will get no slice 995 * and will be reevaluated each time it is selected on the 996 * run queue. The exception to this is nice 0 ksegs when 997 * a nice -20 is running. They are always granted a minimum 998 * slice. 999 */ 1000 if (!SCHED_INTERACTIVE(kg)) { 1001 int nice; 1002 1003 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 1004 if (kseq->ksq_load_timeshare == 0 || 1005 kg->kg_nice < kseq->ksq_nicemin) 1006 ke->ke_slice = SCHED_SLICE_MAX; 1007 else if (nice <= SCHED_SLICE_NTHRESH) 1008 ke->ke_slice = SCHED_SLICE_NICE(nice); 1009 else if (kg->kg_nice == 0) 1010 ke->ke_slice = SCHED_SLICE_MIN; 1011 else 1012 ke->ke_slice = 0; 1013 } else 1014 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1015 1016 CTR6(KTR_ULE, 1017 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1018 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 1019 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1020 1021 return; 1022} 1023 1024/* 1025 * This routine enforces a maximum limit on the amount of scheduling history 1026 * kept. It is called after either the slptime or runtime is adjusted. 1027 * This routine will not operate correctly when slp or run times have been 1028 * adjusted to more than double their maximum. 1029 */ 1030static void 1031sched_interact_update(struct ksegrp *kg) 1032{ 1033 int sum; 1034 1035 sum = kg->kg_runtime + kg->kg_slptime; 1036 if (sum < SCHED_SLP_RUN_MAX) 1037 return; 1038 /* 1039 * If we have exceeded by more than 1/5th then the algorithm below 1040 * will not bring us back into range. Dividing by two here forces 1041 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1042 */ 1043 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 1044 kg->kg_runtime /= 2; 1045 kg->kg_slptime /= 2; 1046 return; 1047 } 1048 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1049 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1050} 1051 1052static void 1053sched_interact_fork(struct ksegrp *kg) 1054{ 1055 int ratio; 1056 int sum; 1057 1058 sum = kg->kg_runtime + kg->kg_slptime; 1059 if (sum > SCHED_SLP_RUN_FORK) { 1060 ratio = sum / SCHED_SLP_RUN_FORK; 1061 kg->kg_runtime /= ratio; 1062 kg->kg_slptime /= ratio; 1063 } 1064} 1065 1066static int 1067sched_interact_score(struct ksegrp *kg) 1068{ 1069 int div; 1070 1071 if (kg->kg_runtime > kg->kg_slptime) { 1072 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1073 return (SCHED_INTERACT_HALF + 1074 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1075 } if (kg->kg_slptime > kg->kg_runtime) { 1076 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1077 return (kg->kg_runtime / div); 1078 } 1079 1080 /* 1081 * This can happen if slptime and runtime are 0. 1082 */ 1083 return (0); 1084 1085} 1086 1087/* 1088 * This is only somewhat accurate since given many processes of the same 1089 * priority they will switch when their slices run out, which will be 1090 * at most SCHED_SLICE_MAX. 1091 */ 1092int 1093sched_rr_interval(void) 1094{ 1095 return (SCHED_SLICE_MAX); 1096} 1097 1098static void 1099sched_pctcpu_update(struct kse *ke) 1100{ 1101 /* 1102 * Adjust counters and watermark for pctcpu calc. 1103 */ 1104 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1105 /* 1106 * Shift the tick count out so that the divide doesn't 1107 * round away our results. 1108 */ 1109 ke->ke_ticks <<= 10; 1110 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1111 SCHED_CPU_TICKS; 1112 ke->ke_ticks >>= 10; 1113 } else 1114 ke->ke_ticks = 0; 1115 ke->ke_ltick = ticks; 1116 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1117} 1118 1119void 1120sched_prio(struct thread *td, u_char prio) 1121{ 1122 struct kse *ke; 1123 1124 ke = td->td_kse; 1125 mtx_assert(&sched_lock, MA_OWNED); 1126 if (TD_ON_RUNQ(td)) { 1127 /* 1128 * If the priority has been elevated due to priority 1129 * propagation, we may have to move ourselves to a new 1130 * queue. We still call adjustrunqueue below in case kse 1131 * needs to fix things up. 1132 */ 1133 if (prio < td->td_priority && ke && 1134 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1135 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1136 runq_remove(ke->ke_runq, ke); 1137 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1138 runq_add(ke->ke_runq, ke); 1139 } 1140 adjustrunqueue(td, prio); 1141 } else 1142 td->td_priority = prio; 1143} 1144 1145void 1146sched_switch(struct thread *td) 1147{ 1148 struct thread *newtd; 1149 struct kse *ke; 1150 1151 mtx_assert(&sched_lock, MA_OWNED); 1152 1153 ke = td->td_kse; 1154 1155 td->td_last_kse = ke; 1156 td->td_lastcpu = td->td_oncpu; 1157 td->td_oncpu = NOCPU; 1158 td->td_flags &= ~TDF_NEEDRESCHED; 1159 1160 /* 1161 * If the KSE has been assigned it may be in the process of switching 1162 * to the new cpu. This is the case in sched_bind(). 1163 */ 1164 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1165 if (TD_IS_RUNNING(td)) { 1166 if (td->td_proc->p_flag & P_SA) { 1167 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1168 setrunqueue(td); 1169 } else 1170 kseq_runq_add(KSEQ_SELF(), ke); 1171 } else { 1172 if (ke->ke_runq) 1173 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1174 /* 1175 * We will not be on the run queue. So we must be 1176 * sleeping or similar. 1177 */ 1178 if (td->td_proc->p_flag & P_SA) 1179 kse_reassign(ke); 1180 } 1181 } 1182 newtd = choosethread(); 1183 if (td != newtd) 1184 cpu_switch(td, newtd); 1185 sched_lock.mtx_lock = (uintptr_t)td; 1186 1187 td->td_oncpu = PCPU_GET(cpuid); 1188} 1189 1190void 1191sched_nice(struct ksegrp *kg, int nice) 1192{ 1193 struct kse *ke; 1194 struct thread *td; 1195 struct kseq *kseq; 1196 1197 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1198 mtx_assert(&sched_lock, MA_OWNED); 1199 /* 1200 * We need to adjust the nice counts for running KSEs. 1201 */ 1202 if (kg->kg_pri_class == PRI_TIMESHARE) 1203 FOREACH_KSE_IN_GROUP(kg, ke) { 1204 if (ke->ke_runq == NULL) 1205 continue; 1206 kseq = KSEQ_CPU(ke->ke_cpu); 1207 kseq_nice_rem(kseq, kg->kg_nice); 1208 kseq_nice_add(kseq, nice); 1209 } 1210 kg->kg_nice = nice; 1211 sched_priority(kg); 1212 FOREACH_THREAD_IN_GROUP(kg, td) 1213 td->td_flags |= TDF_NEEDRESCHED; 1214} 1215 1216void 1217sched_sleep(struct thread *td, u_char prio) 1218{ 1219 mtx_assert(&sched_lock, MA_OWNED); 1220 1221 td->td_slptime = ticks; 1222 td->td_priority = prio; 1223 1224 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1225 td->td_kse, td->td_slptime); 1226} 1227 1228void 1229sched_wakeup(struct thread *td) 1230{ 1231 mtx_assert(&sched_lock, MA_OWNED); 1232 1233 /* 1234 * Let the kseg know how long we slept for. This is because process 1235 * interactivity behavior is modeled in the kseg. 1236 */ 1237 if (td->td_slptime) { 1238 struct ksegrp *kg; 1239 int hzticks; 1240 1241 kg = td->td_ksegrp; 1242 hzticks = (ticks - td->td_slptime) << 10; 1243 if (hzticks >= SCHED_SLP_RUN_MAX) { 1244 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1245 kg->kg_runtime = 1; 1246 } else { 1247 kg->kg_slptime += hzticks; 1248 sched_interact_update(kg); 1249 } 1250 sched_priority(kg); 1251 if (td->td_kse) 1252 sched_slice(td->td_kse); 1253 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1254 td->td_kse, hzticks); 1255 td->td_slptime = 0; 1256 } 1257 setrunqueue(td); 1258} 1259 1260/* 1261 * Penalize the parent for creating a new child and initialize the child's 1262 * priority. 1263 */ 1264void 1265sched_fork(struct proc *p, struct proc *p1) 1266{ 1267 1268 mtx_assert(&sched_lock, MA_OWNED); 1269 1270 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1271 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1272 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1273} 1274 1275void 1276sched_fork_kse(struct kse *ke, struct kse *child) 1277{ 1278 1279 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1280 child->ke_cpu = ke->ke_cpu; 1281 child->ke_runq = NULL; 1282 1283 /* Grab our parents cpu estimation information. */ 1284 child->ke_ticks = ke->ke_ticks; 1285 child->ke_ltick = ke->ke_ltick; 1286 child->ke_ftick = ke->ke_ftick; 1287} 1288 1289void 1290sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1291{ 1292 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1293 1294 child->kg_slptime = kg->kg_slptime; 1295 child->kg_runtime = kg->kg_runtime; 1296 child->kg_user_pri = kg->kg_user_pri; 1297 child->kg_nice = kg->kg_nice; 1298 sched_interact_fork(child); 1299 kg->kg_runtime += tickincr << 10; 1300 sched_interact_update(kg); 1301 1302 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1303 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1304 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1305} 1306 1307void 1308sched_fork_thread(struct thread *td, struct thread *child) 1309{ 1310} 1311 1312void 1313sched_class(struct ksegrp *kg, int class) 1314{ 1315 struct kseq *kseq; 1316 struct kse *ke; 1317 int nclass; 1318 int oclass; 1319 1320 mtx_assert(&sched_lock, MA_OWNED); 1321 if (kg->kg_pri_class == class) 1322 return; 1323 1324 nclass = PRI_BASE(class); 1325 oclass = PRI_BASE(kg->kg_pri_class); 1326 FOREACH_KSE_IN_GROUP(kg, ke) { 1327 if (ke->ke_state != KES_ONRUNQ && 1328 ke->ke_state != KES_THREAD) 1329 continue; 1330 kseq = KSEQ_CPU(ke->ke_cpu); 1331 1332#ifdef SMP 1333 /* 1334 * On SMP if we're on the RUNQ we must adjust the transferable 1335 * count because could be changing to or from an interrupt 1336 * class. 1337 */ 1338 if (ke->ke_state == KES_ONRUNQ) { 1339 if (KSE_CAN_MIGRATE(ke, oclass)) { 1340 kseq->ksq_transferable--; 1341 kseq->ksq_group->ksg_transferable--; 1342 } 1343 if (KSE_CAN_MIGRATE(ke, nclass)) { 1344 kseq->ksq_transferable++; 1345 kseq->ksq_group->ksg_transferable++; 1346 } 1347 } 1348#endif 1349 if (oclass == PRI_TIMESHARE) { 1350 kseq->ksq_load_timeshare--; 1351 kseq_nice_rem(kseq, kg->kg_nice); 1352 } 1353 if (nclass == PRI_TIMESHARE) { 1354 kseq->ksq_load_timeshare++; 1355 kseq_nice_add(kseq, kg->kg_nice); 1356 } 1357 } 1358 1359 kg->kg_pri_class = class; 1360} 1361 1362/* 1363 * Return some of the child's priority and interactivity to the parent. 1364 */ 1365void 1366sched_exit(struct proc *p, struct proc *child) 1367{ 1368 mtx_assert(&sched_lock, MA_OWNED); 1369 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1370 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1371} 1372 1373void 1374sched_exit_kse(struct kse *ke, struct kse *child) 1375{ 1376 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child); 1377} 1378 1379void 1380sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1381{ 1382 /* kg->kg_slptime += child->kg_slptime; */ 1383 kg->kg_runtime += child->kg_runtime; 1384 sched_interact_update(kg); 1385} 1386 1387void 1388sched_exit_thread(struct thread *td, struct thread *child) 1389{ 1390} 1391 1392void 1393sched_clock(struct thread *td) 1394{ 1395 struct kseq *kseq; 1396 struct ksegrp *kg; 1397 struct kse *ke; 1398 1399 /* 1400 * sched_setup() apparently happens prior to stathz being set. We 1401 * need to resolve the timers earlier in the boot so we can avoid 1402 * calculating this here. 1403 */ 1404 if (realstathz == 0) { 1405 realstathz = stathz ? stathz : hz; 1406 tickincr = hz / realstathz; 1407 /* 1408 * XXX This does not work for values of stathz that are much 1409 * larger than hz. 1410 */ 1411 if (tickincr == 0) 1412 tickincr = 1; 1413 } 1414 1415 ke = td->td_kse; 1416 kg = ke->ke_ksegrp; 1417 1418 mtx_assert(&sched_lock, MA_OWNED); 1419 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1420 1421 /* Adjust ticks for pctcpu */ 1422 ke->ke_ticks++; 1423 ke->ke_ltick = ticks; 1424 1425 /* Go up to one second beyond our max and then trim back down */ 1426 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1427 sched_pctcpu_update(ke); 1428 1429 if (td->td_flags & TDF_IDLETD) 1430 return; 1431 1432 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1433 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1434 /* 1435 * We only do slicing code for TIMESHARE ksegrps. 1436 */ 1437 if (kg->kg_pri_class != PRI_TIMESHARE) 1438 return; 1439 /* 1440 * We used a tick charge it to the ksegrp so that we can compute our 1441 * interactivity. 1442 */ 1443 kg->kg_runtime += tickincr << 10; 1444 sched_interact_update(kg); 1445 1446 /* 1447 * We used up one time slice. 1448 */ 1449 if (--ke->ke_slice > 0) 1450 return; 1451 /* 1452 * We're out of time, recompute priorities and requeue. 1453 */ 1454 kseq = KSEQ_SELF(); 1455 kseq_load_rem(kseq, ke); 1456 sched_priority(kg); 1457 sched_slice(ke); 1458 if (SCHED_CURR(kg, ke)) 1459 ke->ke_runq = kseq->ksq_curr; 1460 else 1461 ke->ke_runq = kseq->ksq_next; 1462 kseq_load_add(kseq, ke); 1463 td->td_flags |= TDF_NEEDRESCHED; 1464} 1465 1466int 1467sched_runnable(void) 1468{ 1469 struct kseq *kseq; 1470 int load; 1471 1472 load = 1; 1473 1474 kseq = KSEQ_SELF(); 1475#ifdef SMP 1476 if (kseq->ksq_assigned) { 1477 mtx_lock_spin(&sched_lock); 1478 kseq_assign(kseq); 1479 mtx_unlock_spin(&sched_lock); 1480 } 1481#endif 1482 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1483 if (kseq->ksq_load > 0) 1484 goto out; 1485 } else 1486 if (kseq->ksq_load - 1 > 0) 1487 goto out; 1488 load = 0; 1489out: 1490 return (load); 1491} 1492 1493void 1494sched_userret(struct thread *td) 1495{ 1496 struct ksegrp *kg; 1497 1498 kg = td->td_ksegrp; 1499 1500 if (td->td_priority != kg->kg_user_pri) { 1501 mtx_lock_spin(&sched_lock); 1502 td->td_priority = kg->kg_user_pri; 1503 mtx_unlock_spin(&sched_lock); 1504 } 1505} 1506 1507struct kse * 1508sched_choose(void) 1509{ 1510 struct kseq *kseq; 1511 struct kse *ke; 1512 1513 mtx_assert(&sched_lock, MA_OWNED); 1514 kseq = KSEQ_SELF(); 1515#ifdef SMP 1516restart: 1517 if (kseq->ksq_assigned) 1518 kseq_assign(kseq); 1519#endif 1520 ke = kseq_choose(kseq); 1521 if (ke) { 1522#ifdef SMP 1523 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1524 if (kseq_idled(kseq) == 0) 1525 goto restart; 1526#endif 1527 kseq_runq_rem(kseq, ke); 1528 ke->ke_state = KES_THREAD; 1529 1530 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1531 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1532 ke, ke->ke_runq, ke->ke_slice, 1533 ke->ke_thread->td_priority); 1534 } 1535 return (ke); 1536 } 1537#ifdef SMP 1538 if (kseq_idled(kseq) == 0) 1539 goto restart; 1540#endif 1541 return (NULL); 1542} 1543 1544void 1545sched_add(struct thread *td) 1546{ 1547 struct kseq *kseq; 1548 struct ksegrp *kg; 1549 struct kse *ke; 1550 int class; 1551 1552 mtx_assert(&sched_lock, MA_OWNED); 1553 ke = td->td_kse; 1554 kg = td->td_ksegrp; 1555 if (ke->ke_flags & KEF_ASSIGNED) 1556 return; 1557 kseq = KSEQ_SELF(); 1558 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1559 KASSERT((ke->ke_thread->td_kse != NULL), 1560 ("sched_add: No KSE on thread")); 1561 KASSERT(ke->ke_state != KES_ONRUNQ, 1562 ("sched_add: kse %p (%s) already in run queue", ke, 1563 ke->ke_proc->p_comm)); 1564 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1565 ("sched_add: process swapped out")); 1566 KASSERT(ke->ke_runq == NULL, 1567 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1568 1569 class = PRI_BASE(kg->kg_pri_class); 1570 switch (class) { 1571 case PRI_ITHD: 1572 case PRI_REALTIME: 1573 ke->ke_runq = kseq->ksq_curr; 1574 ke->ke_slice = SCHED_SLICE_MAX; 1575 ke->ke_cpu = PCPU_GET(cpuid); 1576 break; 1577 case PRI_TIMESHARE: 1578 if (SCHED_CURR(kg, ke)) 1579 ke->ke_runq = kseq->ksq_curr; 1580 else 1581 ke->ke_runq = kseq->ksq_next; 1582 break; 1583 case PRI_IDLE: 1584 /* 1585 * This is for priority prop. 1586 */ 1587 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1588 ke->ke_runq = kseq->ksq_curr; 1589 else 1590 ke->ke_runq = &kseq->ksq_idle; 1591 ke->ke_slice = SCHED_SLICE_MIN; 1592 break; 1593 default: 1594 panic("Unknown pri class."); 1595 break; 1596 } 1597#ifdef SMP 1598 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1599 ke->ke_runq = NULL; 1600 kseq_notify(ke, ke->ke_cpu); 1601 return; 1602 } 1603 /* 1604 * If we had been idle, clear our bit in the group and potentially 1605 * the global bitmap. If not, see if we should transfer this thread. 1606 */ 1607 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1608 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1609 /* 1610 * Check to see if our group is unidling, and if so, remove it 1611 * from the global idle mask. 1612 */ 1613 if (kseq->ksq_group->ksg_idlemask == 1614 kseq->ksq_group->ksg_cpumask) 1615 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1616 /* 1617 * Now remove ourselves from the group specific idle mask. 1618 */ 1619 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1620 } else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class)) 1621 if (kseq_transfer(kseq, ke, class)) 1622 return; 1623#endif 1624 if (td->td_priority < curthread->td_priority) 1625 curthread->td_flags |= TDF_NEEDRESCHED; 1626 1627 ke->ke_ksegrp->kg_runq_kses++; 1628 ke->ke_state = KES_ONRUNQ; 1629 1630 kseq_runq_add(kseq, ke); 1631 kseq_load_add(kseq, ke); 1632} 1633 1634void 1635sched_rem(struct thread *td) 1636{ 1637 struct kseq *kseq; 1638 struct kse *ke; 1639 1640 ke = td->td_kse; 1641 /* 1642 * It is safe to just return here because sched_rem() is only ever 1643 * used in places where we're immediately going to add the 1644 * kse back on again. In that case it'll be added with the correct 1645 * thread and priority when the caller drops the sched_lock. 1646 */ 1647 if (ke->ke_flags & KEF_ASSIGNED) 1648 return; 1649 mtx_assert(&sched_lock, MA_OWNED); 1650 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1651 1652 ke->ke_state = KES_THREAD; 1653 ke->ke_ksegrp->kg_runq_kses--; 1654 kseq = KSEQ_CPU(ke->ke_cpu); 1655 kseq_runq_rem(kseq, ke); 1656 kseq_load_rem(kseq, ke); 1657} 1658 1659fixpt_t 1660sched_pctcpu(struct thread *td) 1661{ 1662 fixpt_t pctcpu; 1663 struct kse *ke; 1664 1665 pctcpu = 0; 1666 ke = td->td_kse; 1667 if (ke == NULL) 1668 return (0); 1669 1670 mtx_lock_spin(&sched_lock); 1671 if (ke->ke_ticks) { 1672 int rtick; 1673 1674 /* 1675 * Don't update more frequently than twice a second. Allowing 1676 * this causes the cpu usage to decay away too quickly due to 1677 * rounding errors. 1678 */ 1679 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1680 ke->ke_ltick < (ticks - (hz / 2))) 1681 sched_pctcpu_update(ke); 1682 /* How many rtick per second ? */ 1683 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1684 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1685 } 1686 1687 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1688 mtx_unlock_spin(&sched_lock); 1689 1690 return (pctcpu); 1691} 1692 1693void 1694sched_bind(struct thread *td, int cpu) 1695{ 1696 struct kse *ke; 1697 1698 mtx_assert(&sched_lock, MA_OWNED); 1699 ke = td->td_kse; 1700 ke->ke_flags |= KEF_BOUND; 1701#ifdef SMP 1702 if (PCPU_GET(cpuid) == cpu) 1703 return; 1704 /* sched_rem without the runq_remove */ 1705 ke->ke_state = KES_THREAD; 1706 ke->ke_ksegrp->kg_runq_kses--; 1707 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1708 kseq_notify(ke, cpu); 1709 /* When we return from mi_switch we'll be on the correct cpu. */ 1710 mi_switch(SW_VOL); 1711#endif 1712} 1713 1714void 1715sched_unbind(struct thread *td) 1716{ 1717 mtx_assert(&sched_lock, MA_OWNED); 1718 td->td_kse->ke_flags &= ~KEF_BOUND; 1719} 1720 1721int 1722sched_sizeof_kse(void) 1723{ 1724 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1725} 1726 1727int 1728sched_sizeof_ksegrp(void) 1729{ 1730 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1731} 1732 1733int 1734sched_sizeof_proc(void) 1735{ 1736 return (sizeof(struct proc)); 1737} 1738 1739int 1740sched_sizeof_thread(void) 1741{ 1742 return (sizeof(struct thread) + sizeof(struct td_sched)); 1743} 1744