sched_ule.c revision 123693
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 123693 2003-12-20 20:36:19Z jeff $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> 37#include <sys/resource.h> 38#include <sys/resourcevar.h> 39#include <sys/sched.h> 40#include <sys/smp.h> 41#include <sys/sx.h> 42#include <sys/sysctl.h> 43#include <sys/sysproto.h> 44#include <sys/vmmeter.h> 45#ifdef DDB 46#include <ddb/ddb.h> 47#endif 48#ifdef KTRACE 49#include <sys/uio.h> 50#include <sys/ktrace.h> 51#endif 52 53#include <machine/cpu.h> 54#include <machine/smp.h> 55 56#define KTR_ULE KTR_NFS 57 58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59/* XXX This is bogus compatability crap for ps */ 60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62 63static void sched_setup(void *dummy); 64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65 66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 67 68static int sched_strict; 69SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 70 71static int slice_min = 1; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73 74static int slice_max = 10; 75SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76 77int realstathz; 78int tickincr = 1; 79 80#ifdef SMP 81/* Callouts to handle load balancing SMP systems. */ 82static struct callout kseq_lb_callout; 83static struct callout kseq_group_callout; 84#endif 85 86/* 87 * These datastructures are allocated within their parent datastructure but 88 * are scheduler specific. 89 */ 90 91struct ke_sched { 92 int ske_slice; 93 struct runq *ske_runq; 94 /* The following variables are only used for pctcpu calculation */ 95 int ske_ltick; /* Last tick that we were running on */ 96 int ske_ftick; /* First tick that we were running on */ 97 int ske_ticks; /* Tick count */ 98 /* CPU that we have affinity for. */ 99 u_char ske_cpu; 100}; 101#define ke_slice ke_sched->ske_slice 102#define ke_runq ke_sched->ske_runq 103#define ke_ltick ke_sched->ske_ltick 104#define ke_ftick ke_sched->ske_ftick 105#define ke_ticks ke_sched->ske_ticks 106#define ke_cpu ke_sched->ske_cpu 107#define ke_assign ke_procq.tqe_next 108 109#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 110#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 111 112struct kg_sched { 113 int skg_slptime; /* Number of ticks we vol. slept */ 114 int skg_runtime; /* Number of ticks we were running */ 115}; 116#define kg_slptime kg_sched->skg_slptime 117#define kg_runtime kg_sched->skg_runtime 118 119struct td_sched { 120 int std_slptime; 121}; 122#define td_slptime td_sched->std_slptime 123 124struct td_sched td_sched; 125struct ke_sched ke_sched; 126struct kg_sched kg_sched; 127 128struct ke_sched *kse0_sched = &ke_sched; 129struct kg_sched *ksegrp0_sched = &kg_sched; 130struct p_sched *proc0_sched = NULL; 131struct td_sched *thread0_sched = &td_sched; 132 133/* 134 * The priority is primarily determined by the interactivity score. Thus, we 135 * give lower(better) priorities to kse groups that use less CPU. The nice 136 * value is then directly added to this to allow nice to have some effect 137 * on latency. 138 * 139 * PRI_RANGE: Total priority range for timeshare threads. 140 * PRI_NRESV: Number of nice values. 141 * PRI_BASE: The start of the dynamic range. 142 */ 143#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 144#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 145#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 146#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 147#define SCHED_PRI_INTERACT(score) \ 148 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 149 150/* 151 * These determine the interactivity of a process. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * These parameters and macros determine the size of the time slice that is 167 * granted to each thread. 168 * 169 * SLICE_MIN: Minimum time slice granted, in units of ticks. 170 * SLICE_MAX: Maximum time slice granted. 171 * SLICE_RANGE: Range of available time slices scaled by hz. 172 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 173 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 174 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 175 */ 176#define SCHED_SLICE_MIN (slice_min) 177#define SCHED_SLICE_MAX (slice_max) 178#define SCHED_SLICE_INTERACTIVE (slice_min * 4) 179#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 180#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 181#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 182#define SCHED_SLICE_NICE(nice) \ 183 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 184 185/* 186 * This macro determines whether or not the kse belongs on the current or 187 * next run queue. 188 */ 189#define SCHED_INTERACTIVE(kg) \ 190 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 191#define SCHED_CURR(kg, ke) \ 192 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 193 SCHED_INTERACTIVE(kg)) 194 195/* 196 * Cpu percentage computation macros and defines. 197 * 198 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 199 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 200 */ 201 202#define SCHED_CPU_TIME 10 203#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 204 205/* 206 * kseq - per processor runqs and statistics. 207 */ 208struct kseq { 209 struct runq ksq_idle; /* Queue of IDLE threads. */ 210 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 211 struct runq *ksq_next; /* Next timeshare queue. */ 212 struct runq *ksq_curr; /* Current queue. */ 213 int ksq_load_timeshare; /* Load for timeshare. */ 214 int ksq_load; /* Aggregate load. */ 215 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 216 short ksq_nicemin; /* Least nice. */ 217#ifdef SMP 218 int ksq_transferable; 219 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 220 struct kseq_group *ksq_group; /* Our processor group. */ 221 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 222#endif 223}; 224 225#ifdef SMP 226/* 227 * kseq groups are groups of processors which can cheaply share threads. When 228 * one processor in the group goes idle it will check the runqs of the other 229 * processors in its group prior to halting and waiting for an interrupt. 230 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 231 * In a numa environment we'd want an idle bitmap per group and a two tiered 232 * load balancer. 233 */ 234struct kseq_group { 235 int ksg_cpus; /* Count of CPUs in this kseq group. */ 236 int ksg_cpumask; /* Mask of cpus in this group. */ 237 int ksg_idlemask; /* Idle cpus in this group. */ 238 int ksg_mask; /* Bit mask for first cpu. */ 239 int ksg_load; /* Total load of this group. */ 240 int ksg_transferable; /* Transferable load of this group. */ 241 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 242}; 243#endif 244 245/* 246 * One kse queue per processor. 247 */ 248#ifdef SMP 249static int kseq_idle; 250static int ksg_maxid; 251static struct kseq kseq_cpu[MAXCPU]; 252static struct kseq_group kseq_groups[MAXCPU]; 253#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 254#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 255#define KSEQ_ID(x) ((x) - kseq_cpu) 256#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 257#else /* !SMP */ 258static struct kseq kseq_cpu; 259#define KSEQ_SELF() (&kseq_cpu) 260#define KSEQ_CPU(x) (&kseq_cpu) 261#endif 262 263static void sched_slice(struct kse *ke); 264static void sched_priority(struct ksegrp *kg); 265static int sched_interact_score(struct ksegrp *kg); 266static void sched_interact_update(struct ksegrp *kg); 267static void sched_interact_fork(struct ksegrp *kg); 268static void sched_pctcpu_update(struct kse *ke); 269 270/* Operations on per processor queues */ 271static struct kse * kseq_choose(struct kseq *kseq); 272static void kseq_setup(struct kseq *kseq); 273static void kseq_load_add(struct kseq *kseq, struct kse *ke); 274static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 275static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 276static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 277static void kseq_nice_add(struct kseq *kseq, int nice); 278static void kseq_nice_rem(struct kseq *kseq, int nice); 279void kseq_print(int cpu); 280#ifdef SMP 281static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 282static struct kse *runq_steal(struct runq *rq); 283static void sched_balance(void *arg); 284static void sched_balance_group(struct kseq_group *ksg); 285static void sched_balance_pair(struct kseq *high, struct kseq *low); 286static void kseq_move(struct kseq *from, int cpu); 287static int kseq_idled(struct kseq *kseq); 288static void kseq_notify(struct kse *ke, int cpu); 289static void kseq_assign(struct kseq *); 290static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 291/* 292 * On P4 Xeons the round-robin interrupt delivery is broken. As a result of 293 * this, we can't pin interrupts to the cpu that they were delivered to, 294 * otherwise all ithreads only run on CPU 0. 295 */ 296#ifdef __i386__ 297#define KSE_CAN_MIGRATE(ke, class) \ 298 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 299#else /* !__i386__ */ 300#define KSE_CAN_MIGRATE(ke, class) \ 301 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 302 ((ke)->ke_flags & KEF_BOUND) == 0) 303#endif /* !__i386__ */ 304#endif 305 306void 307kseq_print(int cpu) 308{ 309 struct kseq *kseq; 310 int i; 311 312 kseq = KSEQ_CPU(cpu); 313 314 printf("kseq:\n"); 315 printf("\tload: %d\n", kseq->ksq_load); 316 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 317#ifdef SMP 318 printf("\tload transferable: %d\n", kseq->ksq_transferable); 319#endif 320 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 321 printf("\tnice counts:\n"); 322 for (i = 0; i < SCHED_PRI_NRESV; i++) 323 if (kseq->ksq_nice[i]) 324 printf("\t\t%d = %d\n", 325 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 326} 327 328static __inline void 329kseq_runq_add(struct kseq *kseq, struct kse *ke) 330{ 331#ifdef SMP 332 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 333 kseq->ksq_transferable++; 334 kseq->ksq_group->ksg_transferable++; 335 } 336#endif 337 runq_add(ke->ke_runq, ke); 338} 339 340static __inline void 341kseq_runq_rem(struct kseq *kseq, struct kse *ke) 342{ 343#ifdef SMP 344 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 345 kseq->ksq_transferable--; 346 kseq->ksq_group->ksg_transferable--; 347 } 348#endif 349 runq_remove(ke->ke_runq, ke); 350} 351 352static void 353kseq_load_add(struct kseq *kseq, struct kse *ke) 354{ 355 int class; 356 mtx_assert(&sched_lock, MA_OWNED); 357 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 358 if (class == PRI_TIMESHARE) 359 kseq->ksq_load_timeshare++; 360 kseq->ksq_load++; 361#ifdef SMP 362 if (class != PRI_ITHD) 363 kseq->ksq_group->ksg_load++; 364#endif 365 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 366 CTR6(KTR_ULE, 367 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 368 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 369 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 370 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 371 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 372} 373 374static void 375kseq_load_rem(struct kseq *kseq, struct kse *ke) 376{ 377 int class; 378 mtx_assert(&sched_lock, MA_OWNED); 379 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 380 if (class == PRI_TIMESHARE) 381 kseq->ksq_load_timeshare--; 382#ifdef SMP 383 if (class != PRI_ITHD) 384 kseq->ksq_group->ksg_load--; 385#endif 386 kseq->ksq_load--; 387 ke->ke_runq = NULL; 388 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 389 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 390} 391 392static void 393kseq_nice_add(struct kseq *kseq, int nice) 394{ 395 mtx_assert(&sched_lock, MA_OWNED); 396 /* Normalize to zero. */ 397 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 398 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 399 kseq->ksq_nicemin = nice; 400} 401 402static void 403kseq_nice_rem(struct kseq *kseq, int nice) 404{ 405 int n; 406 407 mtx_assert(&sched_lock, MA_OWNED); 408 /* Normalize to zero. */ 409 n = nice + SCHED_PRI_NHALF; 410 kseq->ksq_nice[n]--; 411 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 412 413 /* 414 * If this wasn't the smallest nice value or there are more in 415 * this bucket we can just return. Otherwise we have to recalculate 416 * the smallest nice. 417 */ 418 if (nice != kseq->ksq_nicemin || 419 kseq->ksq_nice[n] != 0 || 420 kseq->ksq_load_timeshare == 0) 421 return; 422 423 for (; n < SCHED_PRI_NRESV; n++) 424 if (kseq->ksq_nice[n]) { 425 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 426 return; 427 } 428} 429 430#ifdef SMP 431/* 432 * sched_balance is a simple CPU load balancing algorithm. It operates by 433 * finding the least loaded and most loaded cpu and equalizing their load 434 * by migrating some processes. 435 * 436 * Dealing only with two CPUs at a time has two advantages. Firstly, most 437 * installations will only have 2 cpus. Secondly, load balancing too much at 438 * once can have an unpleasant effect on the system. The scheduler rarely has 439 * enough information to make perfect decisions. So this algorithm chooses 440 * algorithm simplicity and more gradual effects on load in larger systems. 441 * 442 * It could be improved by considering the priorities and slices assigned to 443 * each task prior to balancing them. There are many pathological cases with 444 * any approach and so the semi random algorithm below may work as well as any. 445 * 446 */ 447static void 448sched_balance(void *arg) 449{ 450 struct kseq_group *high; 451 struct kseq_group *low; 452 struct kseq_group *ksg; 453 int timo; 454 int cnt; 455 int i; 456 457 mtx_lock_spin(&sched_lock); 458 if (smp_started == 0) 459 goto out; 460 low = high = NULL; 461 i = random() % (ksg_maxid + 1); 462 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 463 ksg = KSEQ_GROUP(i); 464 /* 465 * Find the CPU with the highest load that has some 466 * threads to transfer. 467 */ 468 if ((high == NULL || ksg->ksg_load > high->ksg_load) 469 && ksg->ksg_transferable) 470 high = ksg; 471 if (low == NULL || ksg->ksg_load < low->ksg_load) 472 low = ksg; 473 if (++i > ksg_maxid) 474 i = 0; 475 } 476 if (low != NULL && high != NULL && high != low) 477 sched_balance_pair(LIST_FIRST(&high->ksg_members), 478 LIST_FIRST(&low->ksg_members)); 479out: 480 mtx_unlock_spin(&sched_lock); 481 timo = random() % (hz * 2); 482 callout_reset(&kseq_lb_callout, timo, sched_balance, NULL); 483} 484 485static void 486sched_balance_groups(void *arg) 487{ 488 int timo; 489 int i; 490 491 mtx_lock_spin(&sched_lock); 492 if (smp_started) 493 for (i = 0; i <= ksg_maxid; i++) 494 sched_balance_group(KSEQ_GROUP(i)); 495 mtx_unlock_spin(&sched_lock); 496 timo = random() % (hz * 2); 497 callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL); 498} 499 500static void 501sched_balance_group(struct kseq_group *ksg) 502{ 503 struct kseq *kseq; 504 struct kseq *high; 505 struct kseq *low; 506 int load; 507 508 if (ksg->ksg_transferable == 0) 509 return; 510 low = NULL; 511 high = NULL; 512 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 513 load = kseq->ksq_load; 514 if (kseq == KSEQ_CPU(0)) 515 load--; 516 if (high == NULL || load > high->ksq_load) 517 high = kseq; 518 if (low == NULL || load < low->ksq_load) 519 low = kseq; 520 } 521 if (high != NULL && low != NULL && high != low) 522 sched_balance_pair(high, low); 523} 524 525static void 526sched_balance_pair(struct kseq *high, struct kseq *low) 527{ 528 int transferable; 529 int high_load; 530 int low_load; 531 int move; 532 int diff; 533 int i; 534 535 /* 536 * If we're transfering within a group we have to use this specific 537 * kseq's transferable count, otherwise we can steal from other members 538 * of the group. 539 */ 540 if (high->ksq_group == low->ksq_group) { 541 transferable = high->ksq_transferable; 542 high_load = high->ksq_load; 543 low_load = low->ksq_load; 544 /* 545 * XXX If we encounter cpu 0 we must remember to reduce it's 546 * load by 1 to reflect the swi that is running the callout. 547 * At some point we should really fix load balancing of the 548 * swi and then this wont matter. 549 */ 550 if (high == KSEQ_CPU(0)) 551 high_load--; 552 if (low == KSEQ_CPU(0)) 553 low_load--; 554 } else { 555 transferable = high->ksq_group->ksg_transferable; 556 high_load = high->ksq_group->ksg_load; 557 low_load = low->ksq_group->ksg_load; 558 } 559 if (transferable == 0) 560 return; 561 /* 562 * Determine what the imbalance is and then adjust that to how many 563 * kses we actually have to give up (transferable). 564 */ 565 diff = high_load - low_load; 566 move = diff / 2; 567 if (diff & 0x1) 568 move++; 569 move = min(move, transferable); 570 for (i = 0; i < move; i++) 571 kseq_move(high, KSEQ_ID(low)); 572 return; 573} 574 575static void 576kseq_move(struct kseq *from, int cpu) 577{ 578 struct kseq *kseq; 579 struct kseq *to; 580 struct kse *ke; 581 582 kseq = from; 583 to = KSEQ_CPU(cpu); 584 ke = kseq_steal(kseq, 1); 585 if (ke == NULL) { 586 struct kseq_group *ksg; 587 588 ksg = kseq->ksq_group; 589 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 590 if (kseq == from || kseq->ksq_transferable == 0) 591 continue; 592 ke = kseq_steal(kseq, 1); 593 break; 594 } 595 if (ke == NULL) 596 panic("kseq_move: No KSEs available with a " 597 "transferable count of %d\n", 598 ksg->ksg_transferable); 599 } 600 if (kseq == to) 601 return; 602 ke->ke_state = KES_THREAD; 603 kseq_runq_rem(kseq, ke); 604 kseq_load_rem(kseq, ke); 605 kseq_notify(ke, cpu); 606} 607 608static int 609kseq_idled(struct kseq *kseq) 610{ 611 struct kseq_group *ksg; 612 struct kseq *steal; 613 struct kse *ke; 614 615 ksg = kseq->ksq_group; 616 /* 617 * If we're in a cpu group, try and steal kses from another cpu in 618 * the group before idling. 619 */ 620 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 621 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 622 if (steal == kseq || steal->ksq_transferable == 0) 623 continue; 624 ke = kseq_steal(steal, 0); 625 if (ke == NULL) 626 continue; 627 ke->ke_state = KES_THREAD; 628 kseq_runq_rem(steal, ke); 629 kseq_load_rem(steal, ke); 630 ke->ke_cpu = PCPU_GET(cpuid); 631 sched_add(ke->ke_thread); 632 return (0); 633 } 634 } 635 /* 636 * We only set the idled bit when all of the cpus in the group are 637 * idle. Otherwise we could get into a situation where a KSE bounces 638 * back and forth between two idle cores on seperate physical CPUs. 639 */ 640 ksg->ksg_idlemask |= PCPU_GET(cpumask); 641 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 642 return (1); 643 atomic_set_int(&kseq_idle, ksg->ksg_mask); 644 return (1); 645} 646 647static void 648kseq_assign(struct kseq *kseq) 649{ 650 struct kse *nke; 651 struct kse *ke; 652 653 do { 654 (volatile struct kse *)ke = kseq->ksq_assigned; 655 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 656 for (; ke != NULL; ke = nke) { 657 nke = ke->ke_assign; 658 ke->ke_flags &= ~KEF_ASSIGNED; 659 sched_add(ke->ke_thread); 660 } 661} 662 663static void 664kseq_notify(struct kse *ke, int cpu) 665{ 666 struct kseq *kseq; 667 struct thread *td; 668 struct pcpu *pcpu; 669 670 ke->ke_cpu = cpu; 671 ke->ke_flags |= KEF_ASSIGNED; 672 673 kseq = KSEQ_CPU(cpu); 674 675 /* 676 * Place a KSE on another cpu's queue and force a resched. 677 */ 678 do { 679 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned; 680 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 681 pcpu = pcpu_find(cpu); 682 td = pcpu->pc_curthread; 683 if (ke->ke_thread->td_priority < td->td_priority || 684 td == pcpu->pc_idlethread) { 685 td->td_flags |= TDF_NEEDRESCHED; 686 ipi_selected(1 << cpu, IPI_AST); 687 } 688} 689 690static struct kse * 691runq_steal(struct runq *rq) 692{ 693 struct rqhead *rqh; 694 struct rqbits *rqb; 695 struct kse *ke; 696 int word; 697 int bit; 698 699 mtx_assert(&sched_lock, MA_OWNED); 700 rqb = &rq->rq_status; 701 for (word = 0; word < RQB_LEN; word++) { 702 if (rqb->rqb_bits[word] == 0) 703 continue; 704 for (bit = 0; bit < RQB_BPW; bit++) { 705 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 706 continue; 707 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 708 TAILQ_FOREACH(ke, rqh, ke_procq) { 709 if (KSE_CAN_MIGRATE(ke, 710 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 711 return (ke); 712 } 713 } 714 } 715 return (NULL); 716} 717 718static struct kse * 719kseq_steal(struct kseq *kseq, int stealidle) 720{ 721 struct kse *ke; 722 723 /* 724 * Steal from next first to try to get a non-interactive task that 725 * may not have run for a while. 726 */ 727 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 728 return (ke); 729 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 730 return (ke); 731 if (stealidle) 732 return (runq_steal(&kseq->ksq_idle)); 733 return (NULL); 734} 735 736int 737kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 738{ 739 struct kseq_group *ksg; 740 int cpu; 741 742 if (smp_started == 0) 743 return (0); 744 cpu = 0; 745 ksg = kseq->ksq_group; 746 747 /* 748 * If there are any idle groups, give them our extra load. The 749 * threshold at which we start to reassign kses has a large impact 750 * on the overall performance of the system. Tuned too high and 751 * some CPUs may idle. Too low and there will be excess migration 752 * and context swiches. 753 */ 754 /* 755 * XXX This ksg_transferable might work better if we were checking 756 * against a global group load. As it is now, this prevents us from 757 * transfering a thread from a group that is potentially bogged down 758 * with non transferable load. 759 */ 760 if (ksg->ksg_transferable > ksg->ksg_cpus && kseq_idle) { 761 /* 762 * Multiple cpus could find this bit simultaneously 763 * but the race shouldn't be terrible. 764 */ 765 cpu = ffs(kseq_idle); 766 if (cpu) 767 atomic_clear_int(&kseq_idle, 1 << (cpu - 1)); 768 } 769 /* 770 * If another cpu in this group has idled, assign a thread over 771 * to them after checking to see if there are idled groups. 772 */ 773 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) { 774 cpu = ffs(ksg->ksg_idlemask); 775 if (cpu) 776 ksg->ksg_idlemask &= ~(1 << (cpu - 1)); 777 } 778 /* 779 * Now that we've found an idle CPU, migrate the thread. 780 */ 781 if (cpu) { 782 cpu--; 783 ke->ke_runq = NULL; 784 kseq_notify(ke, cpu); 785 return (1); 786 } 787 return (0); 788} 789 790#endif /* SMP */ 791 792/* 793 * Pick the highest priority task we have and return it. 794 */ 795 796static struct kse * 797kseq_choose(struct kseq *kseq) 798{ 799 struct kse *ke; 800 struct runq *swap; 801 802 mtx_assert(&sched_lock, MA_OWNED); 803 swap = NULL; 804 805 for (;;) { 806 ke = runq_choose(kseq->ksq_curr); 807 if (ke == NULL) { 808 /* 809 * We already swaped once and didn't get anywhere. 810 */ 811 if (swap) 812 break; 813 swap = kseq->ksq_curr; 814 kseq->ksq_curr = kseq->ksq_next; 815 kseq->ksq_next = swap; 816 continue; 817 } 818 /* 819 * If we encounter a slice of 0 the kse is in a 820 * TIMESHARE kse group and its nice was too far out 821 * of the range that receives slices. 822 */ 823 if (ke->ke_slice == 0) { 824 runq_remove(ke->ke_runq, ke); 825 sched_slice(ke); 826 ke->ke_runq = kseq->ksq_next; 827 runq_add(ke->ke_runq, ke); 828 continue; 829 } 830 return (ke); 831 } 832 833 return (runq_choose(&kseq->ksq_idle)); 834} 835 836static void 837kseq_setup(struct kseq *kseq) 838{ 839 runq_init(&kseq->ksq_timeshare[0]); 840 runq_init(&kseq->ksq_timeshare[1]); 841 runq_init(&kseq->ksq_idle); 842 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 843 kseq->ksq_next = &kseq->ksq_timeshare[1]; 844 kseq->ksq_load = 0; 845 kseq->ksq_load_timeshare = 0; 846} 847 848static void 849sched_setup(void *dummy) 850{ 851#ifdef SMP 852 int balance_groups; 853 int i; 854#endif 855 856 slice_min = (hz/100); /* 10ms */ 857 slice_max = (hz/7); /* ~140ms */ 858 859#ifdef SMP 860 balance_groups = 0; 861 /* 862 * Initialize the kseqs. 863 */ 864 for (i = 0; i < MAXCPU; i++) { 865 struct kseq *ksq; 866 867 ksq = &kseq_cpu[i]; 868 ksq->ksq_assigned = NULL; 869 kseq_setup(&kseq_cpu[i]); 870 } 871 if (smp_topology == NULL) { 872 struct kseq_group *ksg; 873 struct kseq *ksq; 874 875 for (i = 0; i < MAXCPU; i++) { 876 ksq = &kseq_cpu[i]; 877 ksg = &kseq_groups[i]; 878 /* 879 * Setup a kse group with one member. 880 */ 881 ksq->ksq_transferable = 0; 882 ksq->ksq_group = ksg; 883 ksg->ksg_cpus = 1; 884 ksg->ksg_idlemask = 0; 885 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 886 ksg->ksg_load = 0; 887 ksg->ksg_transferable = 0; 888 LIST_INIT(&ksg->ksg_members); 889 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 890 } 891 } else { 892 struct kseq_group *ksg; 893 struct cpu_group *cg; 894 int j; 895 896 for (i = 0; i < smp_topology->ct_count; i++) { 897 cg = &smp_topology->ct_group[i]; 898 ksg = &kseq_groups[i]; 899 /* 900 * Initialize the group. 901 */ 902 ksg->ksg_idlemask = 0; 903 ksg->ksg_load = 0; 904 ksg->ksg_transferable = 0; 905 ksg->ksg_cpus = cg->cg_count; 906 ksg->ksg_cpumask = cg->cg_mask; 907 LIST_INIT(&ksg->ksg_members); 908 /* 909 * Find all of the group members and add them. 910 */ 911 for (j = 0; j < MAXCPU; j++) { 912 if ((cg->cg_mask & (1 << j)) != 0) { 913 if (ksg->ksg_mask == 0) 914 ksg->ksg_mask = 1 << j; 915 kseq_cpu[j].ksq_transferable = 0; 916 kseq_cpu[j].ksq_group = ksg; 917 LIST_INSERT_HEAD(&ksg->ksg_members, 918 &kseq_cpu[j], ksq_siblings); 919 } 920 } 921 if (ksg->ksg_cpus > 1) 922 balance_groups = 1; 923 } 924 ksg_maxid = smp_topology->ct_count - 1; 925 } 926 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 927 callout_init(&kseq_group_callout, CALLOUT_MPSAFE); 928 sched_balance(NULL); 929 /* 930 * Stagger the group and global load balancer so they do not 931 * interfere with each other. 932 */ 933 if (balance_groups) 934 callout_reset(&kseq_group_callout, hz / 2, 935 sched_balance_groups, NULL); 936#else 937 kseq_setup(KSEQ_SELF()); 938#endif 939 mtx_lock_spin(&sched_lock); 940 kseq_load_add(KSEQ_SELF(), &kse0); 941 mtx_unlock_spin(&sched_lock); 942} 943 944/* 945 * Scale the scheduling priority according to the "interactivity" of this 946 * process. 947 */ 948static void 949sched_priority(struct ksegrp *kg) 950{ 951 int pri; 952 953 if (kg->kg_pri_class != PRI_TIMESHARE) 954 return; 955 956 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 957 pri += SCHED_PRI_BASE; 958 pri += kg->kg_nice; 959 960 if (pri > PRI_MAX_TIMESHARE) 961 pri = PRI_MAX_TIMESHARE; 962 else if (pri < PRI_MIN_TIMESHARE) 963 pri = PRI_MIN_TIMESHARE; 964 965 kg->kg_user_pri = pri; 966 967 return; 968} 969 970/* 971 * Calculate a time slice based on the properties of the kseg and the runq 972 * that we're on. This is only for PRI_TIMESHARE ksegrps. 973 */ 974static void 975sched_slice(struct kse *ke) 976{ 977 struct kseq *kseq; 978 struct ksegrp *kg; 979 980 kg = ke->ke_ksegrp; 981 kseq = KSEQ_CPU(ke->ke_cpu); 982 983 /* 984 * Rationale: 985 * KSEs in interactive ksegs get the minimum slice so that we 986 * quickly notice if it abuses its advantage. 987 * 988 * KSEs in non-interactive ksegs are assigned a slice that is 989 * based on the ksegs nice value relative to the least nice kseg 990 * on the run queue for this cpu. 991 * 992 * If the KSE is less nice than all others it gets the maximum 993 * slice and other KSEs will adjust their slice relative to 994 * this when they first expire. 995 * 996 * There is 20 point window that starts relative to the least 997 * nice kse on the run queue. Slice size is determined by 998 * the kse distance from the last nice ksegrp. 999 * 1000 * If the kse is outside of the window it will get no slice 1001 * and will be reevaluated each time it is selected on the 1002 * run queue. The exception to this is nice 0 ksegs when 1003 * a nice -20 is running. They are always granted a minimum 1004 * slice. 1005 */ 1006 if (!SCHED_INTERACTIVE(kg)) { 1007 int nice; 1008 1009 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 1010 if (kseq->ksq_load_timeshare == 0 || 1011 kg->kg_nice < kseq->ksq_nicemin) 1012 ke->ke_slice = SCHED_SLICE_MAX; 1013 else if (nice <= SCHED_SLICE_NTHRESH) 1014 ke->ke_slice = SCHED_SLICE_NICE(nice); 1015 else if (kg->kg_nice == 0) 1016 ke->ke_slice = SCHED_SLICE_MIN; 1017 else 1018 ke->ke_slice = 0; 1019 } else 1020 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1021 1022 CTR6(KTR_ULE, 1023 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1024 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 1025 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1026 1027 return; 1028} 1029 1030/* 1031 * This routine enforces a maximum limit on the amount of scheduling history 1032 * kept. It is called after either the slptime or runtime is adjusted. 1033 * This routine will not operate correctly when slp or run times have been 1034 * adjusted to more than double their maximum. 1035 */ 1036static void 1037sched_interact_update(struct ksegrp *kg) 1038{ 1039 int sum; 1040 1041 sum = kg->kg_runtime + kg->kg_slptime; 1042 if (sum < SCHED_SLP_RUN_MAX) 1043 return; 1044 /* 1045 * If we have exceeded by more than 1/5th then the algorithm below 1046 * will not bring us back into range. Dividing by two here forces 1047 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1048 */ 1049 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 1050 kg->kg_runtime /= 2; 1051 kg->kg_slptime /= 2; 1052 return; 1053 } 1054 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1055 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1056} 1057 1058static void 1059sched_interact_fork(struct ksegrp *kg) 1060{ 1061 int ratio; 1062 int sum; 1063 1064 sum = kg->kg_runtime + kg->kg_slptime; 1065 if (sum > SCHED_SLP_RUN_FORK) { 1066 ratio = sum / SCHED_SLP_RUN_FORK; 1067 kg->kg_runtime /= ratio; 1068 kg->kg_slptime /= ratio; 1069 } 1070} 1071 1072static int 1073sched_interact_score(struct ksegrp *kg) 1074{ 1075 int div; 1076 1077 if (kg->kg_runtime > kg->kg_slptime) { 1078 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1079 return (SCHED_INTERACT_HALF + 1080 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1081 } if (kg->kg_slptime > kg->kg_runtime) { 1082 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1083 return (kg->kg_runtime / div); 1084 } 1085 1086 /* 1087 * This can happen if slptime and runtime are 0. 1088 */ 1089 return (0); 1090 1091} 1092 1093/* 1094 * This is only somewhat accurate since given many processes of the same 1095 * priority they will switch when their slices run out, which will be 1096 * at most SCHED_SLICE_MAX. 1097 */ 1098int 1099sched_rr_interval(void) 1100{ 1101 return (SCHED_SLICE_MAX); 1102} 1103 1104static void 1105sched_pctcpu_update(struct kse *ke) 1106{ 1107 /* 1108 * Adjust counters and watermark for pctcpu calc. 1109 */ 1110 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1111 /* 1112 * Shift the tick count out so that the divide doesn't 1113 * round away our results. 1114 */ 1115 ke->ke_ticks <<= 10; 1116 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1117 SCHED_CPU_TICKS; 1118 ke->ke_ticks >>= 10; 1119 } else 1120 ke->ke_ticks = 0; 1121 ke->ke_ltick = ticks; 1122 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1123} 1124 1125void 1126sched_prio(struct thread *td, u_char prio) 1127{ 1128 struct kse *ke; 1129 1130 ke = td->td_kse; 1131 mtx_assert(&sched_lock, MA_OWNED); 1132 if (TD_ON_RUNQ(td)) { 1133 /* 1134 * If the priority has been elevated due to priority 1135 * propagation, we may have to move ourselves to a new 1136 * queue. We still call adjustrunqueue below in case kse 1137 * needs to fix things up. 1138 */ 1139 if (prio < td->td_priority && ke && 1140 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1141 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1142 runq_remove(ke->ke_runq, ke); 1143 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1144 runq_add(ke->ke_runq, ke); 1145 } 1146 adjustrunqueue(td, prio); 1147 } else 1148 td->td_priority = prio; 1149} 1150 1151void 1152sched_switch(struct thread *td) 1153{ 1154 struct thread *newtd; 1155 struct kse *ke; 1156 1157 mtx_assert(&sched_lock, MA_OWNED); 1158 1159 ke = td->td_kse; 1160 1161 td->td_last_kse = ke; 1162 td->td_lastcpu = td->td_oncpu; 1163 td->td_oncpu = NOCPU; 1164 td->td_flags &= ~TDF_NEEDRESCHED; 1165 1166 /* 1167 * If the KSE has been assigned it may be in the process of switching 1168 * to the new cpu. This is the case in sched_bind(). 1169 */ 1170 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1171 if (TD_IS_RUNNING(td)) { 1172 if (td->td_proc->p_flag & P_SA) { 1173 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1174 setrunqueue(td); 1175 } else 1176 kseq_runq_add(KSEQ_SELF(), ke); 1177 } else { 1178 if (ke->ke_runq) 1179 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1180 /* 1181 * We will not be on the run queue. So we must be 1182 * sleeping or similar. 1183 */ 1184 if (td->td_proc->p_flag & P_SA) 1185 kse_reassign(ke); 1186 } 1187 } 1188 newtd = choosethread(); 1189 if (td != newtd) 1190 cpu_switch(td, newtd); 1191 sched_lock.mtx_lock = (uintptr_t)td; 1192 1193 td->td_oncpu = PCPU_GET(cpuid); 1194} 1195 1196void 1197sched_nice(struct ksegrp *kg, int nice) 1198{ 1199 struct kse *ke; 1200 struct thread *td; 1201 struct kseq *kseq; 1202 1203 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1204 mtx_assert(&sched_lock, MA_OWNED); 1205 /* 1206 * We need to adjust the nice counts for running KSEs. 1207 */ 1208 if (kg->kg_pri_class == PRI_TIMESHARE) 1209 FOREACH_KSE_IN_GROUP(kg, ke) { 1210 if (ke->ke_runq == NULL) 1211 continue; 1212 kseq = KSEQ_CPU(ke->ke_cpu); 1213 kseq_nice_rem(kseq, kg->kg_nice); 1214 kseq_nice_add(kseq, nice); 1215 } 1216 kg->kg_nice = nice; 1217 sched_priority(kg); 1218 FOREACH_THREAD_IN_GROUP(kg, td) 1219 td->td_flags |= TDF_NEEDRESCHED; 1220} 1221 1222void 1223sched_sleep(struct thread *td, u_char prio) 1224{ 1225 mtx_assert(&sched_lock, MA_OWNED); 1226 1227 td->td_slptime = ticks; 1228 td->td_priority = prio; 1229 1230 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1231 td->td_kse, td->td_slptime); 1232} 1233 1234void 1235sched_wakeup(struct thread *td) 1236{ 1237 mtx_assert(&sched_lock, MA_OWNED); 1238 1239 /* 1240 * Let the kseg know how long we slept for. This is because process 1241 * interactivity behavior is modeled in the kseg. 1242 */ 1243 if (td->td_slptime) { 1244 struct ksegrp *kg; 1245 int hzticks; 1246 1247 kg = td->td_ksegrp; 1248 hzticks = (ticks - td->td_slptime) << 10; 1249 if (hzticks >= SCHED_SLP_RUN_MAX) { 1250 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1251 kg->kg_runtime = 1; 1252 } else { 1253 kg->kg_slptime += hzticks; 1254 sched_interact_update(kg); 1255 } 1256 sched_priority(kg); 1257 if (td->td_kse) 1258 sched_slice(td->td_kse); 1259 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1260 td->td_kse, hzticks); 1261 td->td_slptime = 0; 1262 } 1263 setrunqueue(td); 1264} 1265 1266/* 1267 * Penalize the parent for creating a new child and initialize the child's 1268 * priority. 1269 */ 1270void 1271sched_fork(struct proc *p, struct proc *p1) 1272{ 1273 1274 mtx_assert(&sched_lock, MA_OWNED); 1275 1276 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1277 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1278 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1279} 1280 1281void 1282sched_fork_kse(struct kse *ke, struct kse *child) 1283{ 1284 1285 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1286 child->ke_cpu = ke->ke_cpu; 1287 child->ke_runq = NULL; 1288 1289 /* Grab our parents cpu estimation information. */ 1290 child->ke_ticks = ke->ke_ticks; 1291 child->ke_ltick = ke->ke_ltick; 1292 child->ke_ftick = ke->ke_ftick; 1293} 1294 1295void 1296sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1297{ 1298 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1299 1300 child->kg_slptime = kg->kg_slptime; 1301 child->kg_runtime = kg->kg_runtime; 1302 child->kg_user_pri = kg->kg_user_pri; 1303 child->kg_nice = kg->kg_nice; 1304 sched_interact_fork(child); 1305 kg->kg_runtime += tickincr << 10; 1306 sched_interact_update(kg); 1307 1308 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1309 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1310 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1311} 1312 1313void 1314sched_fork_thread(struct thread *td, struct thread *child) 1315{ 1316} 1317 1318void 1319sched_class(struct ksegrp *kg, int class) 1320{ 1321 struct kseq *kseq; 1322 struct kse *ke; 1323 int nclass; 1324 int oclass; 1325 1326 mtx_assert(&sched_lock, MA_OWNED); 1327 if (kg->kg_pri_class == class) 1328 return; 1329 1330 nclass = PRI_BASE(class); 1331 oclass = PRI_BASE(kg->kg_pri_class); 1332 FOREACH_KSE_IN_GROUP(kg, ke) { 1333 if (ke->ke_state != KES_ONRUNQ && 1334 ke->ke_state != KES_THREAD) 1335 continue; 1336 kseq = KSEQ_CPU(ke->ke_cpu); 1337 1338#ifdef SMP 1339 /* 1340 * On SMP if we're on the RUNQ we must adjust the transferable 1341 * count because could be changing to or from an interrupt 1342 * class. 1343 */ 1344 if (ke->ke_state == KES_ONRUNQ) { 1345 if (KSE_CAN_MIGRATE(ke, oclass)) { 1346 kseq->ksq_transferable--; 1347 kseq->ksq_group->ksg_transferable--; 1348 } 1349 if (KSE_CAN_MIGRATE(ke, nclass)) { 1350 kseq->ksq_transferable++; 1351 kseq->ksq_group->ksg_transferable++; 1352 } 1353 } 1354#endif 1355 if (oclass == PRI_TIMESHARE) { 1356 kseq->ksq_load_timeshare--; 1357 kseq_nice_rem(kseq, kg->kg_nice); 1358 } 1359 if (nclass == PRI_TIMESHARE) { 1360 kseq->ksq_load_timeshare++; 1361 kseq_nice_add(kseq, kg->kg_nice); 1362 } 1363 } 1364 1365 kg->kg_pri_class = class; 1366} 1367 1368/* 1369 * Return some of the child's priority and interactivity to the parent. 1370 */ 1371void 1372sched_exit(struct proc *p, struct proc *child) 1373{ 1374 mtx_assert(&sched_lock, MA_OWNED); 1375 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1376 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1377} 1378 1379void 1380sched_exit_kse(struct kse *ke, struct kse *child) 1381{ 1382 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child); 1383} 1384 1385void 1386sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1387{ 1388 /* kg->kg_slptime += child->kg_slptime; */ 1389 kg->kg_runtime += child->kg_runtime; 1390 sched_interact_update(kg); 1391} 1392 1393void 1394sched_exit_thread(struct thread *td, struct thread *child) 1395{ 1396} 1397 1398void 1399sched_clock(struct thread *td) 1400{ 1401 struct kseq *kseq; 1402 struct ksegrp *kg; 1403 struct kse *ke; 1404 1405 /* 1406 * sched_setup() apparently happens prior to stathz being set. We 1407 * need to resolve the timers earlier in the boot so we can avoid 1408 * calculating this here. 1409 */ 1410 if (realstathz == 0) { 1411 realstathz = stathz ? stathz : hz; 1412 tickincr = hz / realstathz; 1413 /* 1414 * XXX This does not work for values of stathz that are much 1415 * larger than hz. 1416 */ 1417 if (tickincr == 0) 1418 tickincr = 1; 1419 } 1420 1421 ke = td->td_kse; 1422 kg = ke->ke_ksegrp; 1423 1424 mtx_assert(&sched_lock, MA_OWNED); 1425 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1426 1427 /* Adjust ticks for pctcpu */ 1428 ke->ke_ticks++; 1429 ke->ke_ltick = ticks; 1430 1431 /* Go up to one second beyond our max and then trim back down */ 1432 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1433 sched_pctcpu_update(ke); 1434 1435 if (td->td_flags & TDF_IDLETD) 1436 return; 1437 1438 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1439 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1440 /* 1441 * We only do slicing code for TIMESHARE ksegrps. 1442 */ 1443 if (kg->kg_pri_class != PRI_TIMESHARE) 1444 return; 1445 /* 1446 * We used a tick charge it to the ksegrp so that we can compute our 1447 * interactivity. 1448 */ 1449 kg->kg_runtime += tickincr << 10; 1450 sched_interact_update(kg); 1451 1452 /* 1453 * We used up one time slice. 1454 */ 1455 if (--ke->ke_slice > 0) 1456 return; 1457 /* 1458 * We're out of time, recompute priorities and requeue. 1459 */ 1460 kseq = KSEQ_SELF(); 1461 kseq_load_rem(kseq, ke); 1462 sched_priority(kg); 1463 sched_slice(ke); 1464 if (SCHED_CURR(kg, ke)) 1465 ke->ke_runq = kseq->ksq_curr; 1466 else 1467 ke->ke_runq = kseq->ksq_next; 1468 kseq_load_add(kseq, ke); 1469 td->td_flags |= TDF_NEEDRESCHED; 1470} 1471 1472int 1473sched_runnable(void) 1474{ 1475 struct kseq *kseq; 1476 int load; 1477 1478 load = 1; 1479 1480 kseq = KSEQ_SELF(); 1481#ifdef SMP 1482 if (kseq->ksq_assigned) { 1483 mtx_lock_spin(&sched_lock); 1484 kseq_assign(kseq); 1485 mtx_unlock_spin(&sched_lock); 1486 } 1487#endif 1488 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1489 if (kseq->ksq_load > 0) 1490 goto out; 1491 } else 1492 if (kseq->ksq_load - 1 > 0) 1493 goto out; 1494 load = 0; 1495out: 1496 return (load); 1497} 1498 1499void 1500sched_userret(struct thread *td) 1501{ 1502 struct ksegrp *kg; 1503 1504 kg = td->td_ksegrp; 1505 1506 if (td->td_priority != kg->kg_user_pri) { 1507 mtx_lock_spin(&sched_lock); 1508 td->td_priority = kg->kg_user_pri; 1509 mtx_unlock_spin(&sched_lock); 1510 } 1511} 1512 1513struct kse * 1514sched_choose(void) 1515{ 1516 struct kseq *kseq; 1517 struct kse *ke; 1518 1519 mtx_assert(&sched_lock, MA_OWNED); 1520 kseq = KSEQ_SELF(); 1521#ifdef SMP 1522restart: 1523 if (kseq->ksq_assigned) 1524 kseq_assign(kseq); 1525#endif 1526 ke = kseq_choose(kseq); 1527 if (ke) { 1528#ifdef SMP 1529 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1530 if (kseq_idled(kseq) == 0) 1531 goto restart; 1532#endif 1533 kseq_runq_rem(kseq, ke); 1534 ke->ke_state = KES_THREAD; 1535 1536 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1537 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1538 ke, ke->ke_runq, ke->ke_slice, 1539 ke->ke_thread->td_priority); 1540 } 1541 return (ke); 1542 } 1543#ifdef SMP 1544 if (kseq_idled(kseq) == 0) 1545 goto restart; 1546#endif 1547 return (NULL); 1548} 1549 1550void 1551sched_add(struct thread *td) 1552{ 1553 struct kseq *kseq; 1554 struct ksegrp *kg; 1555 struct kse *ke; 1556 int class; 1557 1558 mtx_assert(&sched_lock, MA_OWNED); 1559 ke = td->td_kse; 1560 kg = td->td_ksegrp; 1561 if (ke->ke_flags & KEF_ASSIGNED) 1562 return; 1563 kseq = KSEQ_SELF(); 1564 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1565 KASSERT((ke->ke_thread->td_kse != NULL), 1566 ("sched_add: No KSE on thread")); 1567 KASSERT(ke->ke_state != KES_ONRUNQ, 1568 ("sched_add: kse %p (%s) already in run queue", ke, 1569 ke->ke_proc->p_comm)); 1570 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1571 ("sched_add: process swapped out")); 1572 KASSERT(ke->ke_runq == NULL, 1573 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1574 1575 class = PRI_BASE(kg->kg_pri_class); 1576 switch (class) { 1577 case PRI_ITHD: 1578 case PRI_REALTIME: 1579 ke->ke_runq = kseq->ksq_curr; 1580 ke->ke_slice = SCHED_SLICE_MAX; 1581 ke->ke_cpu = PCPU_GET(cpuid); 1582 break; 1583 case PRI_TIMESHARE: 1584 if (SCHED_CURR(kg, ke)) 1585 ke->ke_runq = kseq->ksq_curr; 1586 else 1587 ke->ke_runq = kseq->ksq_next; 1588 break; 1589 case PRI_IDLE: 1590 /* 1591 * This is for priority prop. 1592 */ 1593 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1594 ke->ke_runq = kseq->ksq_curr; 1595 else 1596 ke->ke_runq = &kseq->ksq_idle; 1597 ke->ke_slice = SCHED_SLICE_MIN; 1598 break; 1599 default: 1600 panic("Unknown pri class."); 1601 break; 1602 } 1603#ifdef SMP 1604 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1605 ke->ke_runq = NULL; 1606 kseq_notify(ke, ke->ke_cpu); 1607 return; 1608 } 1609 /* 1610 * If we had been idle, clear our bit in the group and potentially 1611 * the global bitmap. If not, see if we should transfer this thread. 1612 */ 1613 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1614 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1615 /* 1616 * Check to see if our group is unidling, and if so, remove it 1617 * from the global idle mask. 1618 */ 1619 if (kseq->ksq_group->ksg_idlemask == 1620 kseq->ksq_group->ksg_cpumask) 1621 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1622 /* 1623 * Now remove ourselves from the group specific idle mask. 1624 */ 1625 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1626 } else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class)) 1627 if (kseq_transfer(kseq, ke, class)) 1628 return; 1629#endif 1630 if (td->td_priority < curthread->td_priority) 1631 curthread->td_flags |= TDF_NEEDRESCHED; 1632 1633 ke->ke_ksegrp->kg_runq_kses++; 1634 ke->ke_state = KES_ONRUNQ; 1635 1636 kseq_runq_add(kseq, ke); 1637 kseq_load_add(kseq, ke); 1638} 1639 1640void 1641sched_rem(struct thread *td) 1642{ 1643 struct kseq *kseq; 1644 struct kse *ke; 1645 1646 ke = td->td_kse; 1647 /* 1648 * It is safe to just return here because sched_rem() is only ever 1649 * used in places where we're immediately going to add the 1650 * kse back on again. In that case it'll be added with the correct 1651 * thread and priority when the caller drops the sched_lock. 1652 */ 1653 if (ke->ke_flags & KEF_ASSIGNED) 1654 return; 1655 mtx_assert(&sched_lock, MA_OWNED); 1656 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1657 1658 ke->ke_state = KES_THREAD; 1659 ke->ke_ksegrp->kg_runq_kses--; 1660 kseq = KSEQ_CPU(ke->ke_cpu); 1661 kseq_runq_rem(kseq, ke); 1662 kseq_load_rem(kseq, ke); 1663} 1664 1665fixpt_t 1666sched_pctcpu(struct thread *td) 1667{ 1668 fixpt_t pctcpu; 1669 struct kse *ke; 1670 1671 pctcpu = 0; 1672 ke = td->td_kse; 1673 if (ke == NULL) 1674 return (0); 1675 1676 mtx_lock_spin(&sched_lock); 1677 if (ke->ke_ticks) { 1678 int rtick; 1679 1680 /* 1681 * Don't update more frequently than twice a second. Allowing 1682 * this causes the cpu usage to decay away too quickly due to 1683 * rounding errors. 1684 */ 1685 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1686 ke->ke_ltick < (ticks - (hz / 2))) 1687 sched_pctcpu_update(ke); 1688 /* How many rtick per second ? */ 1689 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1690 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1691 } 1692 1693 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1694 mtx_unlock_spin(&sched_lock); 1695 1696 return (pctcpu); 1697} 1698 1699void 1700sched_bind(struct thread *td, int cpu) 1701{ 1702 struct kse *ke; 1703 1704 mtx_assert(&sched_lock, MA_OWNED); 1705 ke = td->td_kse; 1706 ke->ke_flags |= KEF_BOUND; 1707#ifdef SMP 1708 if (PCPU_GET(cpuid) == cpu) 1709 return; 1710 /* sched_rem without the runq_remove */ 1711 ke->ke_state = KES_THREAD; 1712 ke->ke_ksegrp->kg_runq_kses--; 1713 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1714 kseq_notify(ke, cpu); 1715 /* When we return from mi_switch we'll be on the correct cpu. */ 1716 td->td_proc->p_stats->p_ru.ru_nvcsw++; 1717 mi_switch(); 1718#endif 1719} 1720 1721void 1722sched_unbind(struct thread *td) 1723{ 1724 mtx_assert(&sched_lock, MA_OWNED); 1725 td->td_kse->ke_flags &= ~KEF_BOUND; 1726} 1727 1728int 1729sched_sizeof_kse(void) 1730{ 1731 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1732} 1733 1734int 1735sched_sizeof_ksegrp(void) 1736{ 1737 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1738} 1739 1740int 1741sched_sizeof_proc(void) 1742{ 1743 return (sizeof(struct proc)); 1744} 1745 1746int 1747sched_sizeof_thread(void) 1748{ 1749 return (sizeof(struct thread) + sizeof(struct td_sched)); 1750} 1751