sched_ule.c revision 134649
1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 134649 2004-09-02 18:59:15Z scottl $"); 29 30#include <opt_sched.h> 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kdb.h> 35#include <sys/kernel.h> 36#include <sys/ktr.h> 37#include <sys/lock.h> 38#include <sys/mutex.h> 39#include <sys/proc.h> 40#include <sys/resource.h> 41#include <sys/resourcevar.h> 42#include <sys/sched.h> 43#include <sys/smp.h> 44#include <sys/sx.h> 45#include <sys/sysctl.h> 46#include <sys/sysproto.h> 47#include <sys/vmmeter.h> 48#ifdef KTRACE 49#include <sys/uio.h> 50#include <sys/ktrace.h> 51#endif 52 53#include <machine/cpu.h> 54#include <machine/smp.h> 55 56#define KTR_ULE KTR_NFS 57 58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59/* XXX This is bogus compatability crap for ps */ 60static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62 63static void sched_setup(void *dummy); 64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65 66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 67 68SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 69 "Scheduler name"); 70 71static int slice_min = 1; 72SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73 74static int slice_max = 10; 75SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76 77int realstathz; 78int tickincr = 1; 79 80#ifdef PREEMPTION 81static void 82printf_caddr_t(void *data) 83{ 84 printf("%s", (char *)data); 85} 86static char preempt_warning[] = 87 "WARNING: Kernel PREEMPTION is unstable under SCHED_ULE.\n"; 88SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 89 preempt_warning) 90#endif 91 92/* 93 * These datastructures are allocated within their parent datastructure but 94 * are scheduler specific. 95 */ 96 97struct ke_sched { 98 int ske_slice; 99 struct runq *ske_runq; 100 /* The following variables are only used for pctcpu calculation */ 101 int ske_ltick; /* Last tick that we were running on */ 102 int ske_ftick; /* First tick that we were running on */ 103 int ske_ticks; /* Tick count */ 104 /* CPU that we have affinity for. */ 105 u_char ske_cpu; 106}; 107#define ke_slice ke_sched->ske_slice 108#define ke_runq ke_sched->ske_runq 109#define ke_ltick ke_sched->ske_ltick 110#define ke_ftick ke_sched->ske_ftick 111#define ke_ticks ke_sched->ske_ticks 112#define ke_cpu ke_sched->ske_cpu 113#define ke_assign ke_procq.tqe_next 114 115#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 116#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 117#define KEF_XFERABLE KEF_SCHED2 /* KSE was added as transferable. */ 118#define KEF_HOLD KEF_SCHED3 /* KSE is temporarily bound. */ 119 120struct kg_sched { 121 int skg_slptime; /* Number of ticks we vol. slept */ 122 int skg_runtime; /* Number of ticks we were running */ 123}; 124#define kg_slptime kg_sched->skg_slptime 125#define kg_runtime kg_sched->skg_runtime 126 127struct td_sched { 128 int std_slptime; 129}; 130#define td_slptime td_sched->std_slptime 131 132struct td_sched td_sched; 133struct ke_sched ke_sched; 134struct kg_sched kg_sched; 135 136struct ke_sched *kse0_sched = &ke_sched; 137struct kg_sched *ksegrp0_sched = &kg_sched; 138struct p_sched *proc0_sched = NULL; 139struct td_sched *thread0_sched = &td_sched; 140 141/* 142 * The priority is primarily determined by the interactivity score. Thus, we 143 * give lower(better) priorities to kse groups that use less CPU. The nice 144 * value is then directly added to this to allow nice to have some effect 145 * on latency. 146 * 147 * PRI_RANGE: Total priority range for timeshare threads. 148 * PRI_NRESV: Number of nice values. 149 * PRI_BASE: The start of the dynamic range. 150 */ 151#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 152#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 153#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 154#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 155#define SCHED_PRI_INTERACT(score) \ 156 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 157 158/* 159 * These determine the interactivity of a process. 160 * 161 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 162 * before throttling back. 163 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 164 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 165 * INTERACT_THRESH: Threshhold for placement on the current runq. 166 */ 167#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 168#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 169#define SCHED_INTERACT_MAX (100) 170#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 171#define SCHED_INTERACT_THRESH (30) 172 173/* 174 * These parameters and macros determine the size of the time slice that is 175 * granted to each thread. 176 * 177 * SLICE_MIN: Minimum time slice granted, in units of ticks. 178 * SLICE_MAX: Maximum time slice granted. 179 * SLICE_RANGE: Range of available time slices scaled by hz. 180 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 181 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 182 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 183 */ 184#define SCHED_SLICE_MIN (slice_min) 185#define SCHED_SLICE_MAX (slice_max) 186#define SCHED_SLICE_INTERACTIVE (slice_max) 187#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 188#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 189#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 190#define SCHED_SLICE_NICE(nice) \ 191 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 192 193/* 194 * This macro determines whether or not the kse belongs on the current or 195 * next run queue. 196 */ 197#define SCHED_INTERACTIVE(kg) \ 198 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 199#define SCHED_CURR(kg, ke) \ 200 (ke->ke_thread->td_priority < kg->kg_user_pri || \ 201 SCHED_INTERACTIVE(kg)) 202 203/* 204 * Cpu percentage computation macros and defines. 205 * 206 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 207 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 208 */ 209 210#define SCHED_CPU_TIME 10 211#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 212 213/* 214 * kseq - per processor runqs and statistics. 215 */ 216struct kseq { 217 struct runq ksq_idle; /* Queue of IDLE threads. */ 218 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 219 struct runq *ksq_next; /* Next timeshare queue. */ 220 struct runq *ksq_curr; /* Current queue. */ 221 int ksq_load_timeshare; /* Load for timeshare. */ 222 int ksq_load; /* Aggregate load. */ 223 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 224 short ksq_nicemin; /* Least nice. */ 225#ifdef SMP 226 int ksq_transferable; 227 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 228 struct kseq_group *ksq_group; /* Our processor group. */ 229 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 230#else 231 int ksq_sysload; /* For loadavg, !ITHD load. */ 232#endif 233}; 234 235#ifdef SMP 236/* 237 * kseq groups are groups of processors which can cheaply share threads. When 238 * one processor in the group goes idle it will check the runqs of the other 239 * processors in its group prior to halting and waiting for an interrupt. 240 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 241 * In a numa environment we'd want an idle bitmap per group and a two tiered 242 * load balancer. 243 */ 244struct kseq_group { 245 int ksg_cpus; /* Count of CPUs in this kseq group. */ 246 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ 247 cpumask_t ksg_idlemask; /* Idle cpus in this group. */ 248 cpumask_t ksg_mask; /* Bit mask for first cpu. */ 249 int ksg_load; /* Total load of this group. */ 250 int ksg_transferable; /* Transferable load of this group. */ 251 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 252}; 253#endif 254 255/* 256 * One kse queue per processor. 257 */ 258#ifdef SMP 259static cpumask_t kseq_idle; 260static int ksg_maxid; 261static struct kseq kseq_cpu[MAXCPU]; 262static struct kseq_group kseq_groups[MAXCPU]; 263static int bal_tick; 264static int gbal_tick; 265 266#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 267#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 268#define KSEQ_ID(x) ((x) - kseq_cpu) 269#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 270#else /* !SMP */ 271static struct kseq kseq_cpu; 272 273#define KSEQ_SELF() (&kseq_cpu) 274#define KSEQ_CPU(x) (&kseq_cpu) 275#endif 276 277static void sched_add_internal(struct thread *td, int preemptive); 278static void sched_slice(struct kse *ke); 279static void sched_priority(struct ksegrp *kg); 280static int sched_interact_score(struct ksegrp *kg); 281static void sched_interact_update(struct ksegrp *kg); 282static void sched_interact_fork(struct ksegrp *kg); 283static void sched_pctcpu_update(struct kse *ke); 284 285/* Operations on per processor queues */ 286static struct kse * kseq_choose(struct kseq *kseq); 287static void kseq_setup(struct kseq *kseq); 288static void kseq_load_add(struct kseq *kseq, struct kse *ke); 289static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 290static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 291static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 292static void kseq_nice_add(struct kseq *kseq, int nice); 293static void kseq_nice_rem(struct kseq *kseq, int nice); 294void kseq_print(int cpu); 295#ifdef SMP 296static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 297static struct kse *runq_steal(struct runq *rq); 298static void sched_balance(void); 299static void sched_balance_groups(void); 300static void sched_balance_group(struct kseq_group *ksg); 301static void sched_balance_pair(struct kseq *high, struct kseq *low); 302static void kseq_move(struct kseq *from, int cpu); 303static int kseq_idled(struct kseq *kseq); 304static void kseq_notify(struct kse *ke, int cpu); 305static void kseq_assign(struct kseq *); 306static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 307/* 308 * On P4 Xeons the round-robin interrupt delivery is broken. As a result of 309 * this, we can't pin interrupts to the cpu that they were delivered to, 310 * otherwise all ithreads only run on CPU 0. 311 */ 312#ifdef __i386__ 313#define KSE_CAN_MIGRATE(ke, class) \ 314 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 315#else /* !__i386__ */ 316#define KSE_CAN_MIGRATE(ke, class) \ 317 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 318 ((ke)->ke_flags & KEF_BOUND) == 0) 319#endif /* !__i386__ */ 320#endif 321 322void 323kseq_print(int cpu) 324{ 325 struct kseq *kseq; 326 int i; 327 328 kseq = KSEQ_CPU(cpu); 329 330 printf("kseq:\n"); 331 printf("\tload: %d\n", kseq->ksq_load); 332 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 333#ifdef SMP 334 printf("\tload transferable: %d\n", kseq->ksq_transferable); 335#endif 336 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 337 printf("\tnice counts:\n"); 338 for (i = 0; i < SCHED_PRI_NRESV; i++) 339 if (kseq->ksq_nice[i]) 340 printf("\t\t%d = %d\n", 341 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 342} 343 344static __inline void 345kseq_runq_add(struct kseq *kseq, struct kse *ke) 346{ 347#ifdef SMP 348 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 349 kseq->ksq_transferable++; 350 kseq->ksq_group->ksg_transferable++; 351 ke->ke_flags |= KEF_XFERABLE; 352 } 353#endif 354 runq_add(ke->ke_runq, ke); 355} 356 357static __inline void 358kseq_runq_rem(struct kseq *kseq, struct kse *ke) 359{ 360#ifdef SMP 361 if (ke->ke_flags & KEF_XFERABLE) { 362 kseq->ksq_transferable--; 363 kseq->ksq_group->ksg_transferable--; 364 ke->ke_flags &= ~KEF_XFERABLE; 365 } 366#endif 367 runq_remove(ke->ke_runq, ke); 368} 369 370static void 371kseq_load_add(struct kseq *kseq, struct kse *ke) 372{ 373 int class; 374 mtx_assert(&sched_lock, MA_OWNED); 375 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 376 if (class == PRI_TIMESHARE) 377 kseq->ksq_load_timeshare++; 378 kseq->ksq_load++; 379 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 380#ifdef SMP 381 kseq->ksq_group->ksg_load++; 382#else 383 kseq->ksq_sysload++; 384#endif 385 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 386 CTR6(KTR_ULE, 387 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 388 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 389 ke->ke_proc->p_nice, kseq->ksq_nicemin); 390 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 391 kseq_nice_add(kseq, ke->ke_proc->p_nice); 392} 393 394static void 395kseq_load_rem(struct kseq *kseq, struct kse *ke) 396{ 397 int class; 398 mtx_assert(&sched_lock, MA_OWNED); 399 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 400 if (class == PRI_TIMESHARE) 401 kseq->ksq_load_timeshare--; 402 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 403#ifdef SMP 404 kseq->ksq_group->ksg_load--; 405#else 406 kseq->ksq_sysload--; 407#endif 408 kseq->ksq_load--; 409 ke->ke_runq = NULL; 410 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 411 kseq_nice_rem(kseq, ke->ke_proc->p_nice); 412} 413 414static void 415kseq_nice_add(struct kseq *kseq, int nice) 416{ 417 mtx_assert(&sched_lock, MA_OWNED); 418 /* Normalize to zero. */ 419 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 420 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 421 kseq->ksq_nicemin = nice; 422} 423 424static void 425kseq_nice_rem(struct kseq *kseq, int nice) 426{ 427 int n; 428 429 mtx_assert(&sched_lock, MA_OWNED); 430 /* Normalize to zero. */ 431 n = nice + SCHED_PRI_NHALF; 432 kseq->ksq_nice[n]--; 433 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 434 435 /* 436 * If this wasn't the smallest nice value or there are more in 437 * this bucket we can just return. Otherwise we have to recalculate 438 * the smallest nice. 439 */ 440 if (nice != kseq->ksq_nicemin || 441 kseq->ksq_nice[n] != 0 || 442 kseq->ksq_load_timeshare == 0) 443 return; 444 445 for (; n < SCHED_PRI_NRESV; n++) 446 if (kseq->ksq_nice[n]) { 447 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 448 return; 449 } 450} 451 452#ifdef SMP 453/* 454 * sched_balance is a simple CPU load balancing algorithm. It operates by 455 * finding the least loaded and most loaded cpu and equalizing their load 456 * by migrating some processes. 457 * 458 * Dealing only with two CPUs at a time has two advantages. Firstly, most 459 * installations will only have 2 cpus. Secondly, load balancing too much at 460 * once can have an unpleasant effect on the system. The scheduler rarely has 461 * enough information to make perfect decisions. So this algorithm chooses 462 * algorithm simplicity and more gradual effects on load in larger systems. 463 * 464 * It could be improved by considering the priorities and slices assigned to 465 * each task prior to balancing them. There are many pathological cases with 466 * any approach and so the semi random algorithm below may work as well as any. 467 * 468 */ 469static void 470sched_balance(void) 471{ 472 struct kseq_group *high; 473 struct kseq_group *low; 474 struct kseq_group *ksg; 475 int cnt; 476 int i; 477 478 if (smp_started == 0) 479 goto out; 480 low = high = NULL; 481 i = random() % (ksg_maxid + 1); 482 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 483 ksg = KSEQ_GROUP(i); 484 /* 485 * Find the CPU with the highest load that has some 486 * threads to transfer. 487 */ 488 if ((high == NULL || ksg->ksg_load > high->ksg_load) 489 && ksg->ksg_transferable) 490 high = ksg; 491 if (low == NULL || ksg->ksg_load < low->ksg_load) 492 low = ksg; 493 if (++i > ksg_maxid) 494 i = 0; 495 } 496 if (low != NULL && high != NULL && high != low) 497 sched_balance_pair(LIST_FIRST(&high->ksg_members), 498 LIST_FIRST(&low->ksg_members)); 499out: 500 bal_tick = ticks + (random() % (hz * 2)); 501} 502 503static void 504sched_balance_groups(void) 505{ 506 int i; 507 508 mtx_assert(&sched_lock, MA_OWNED); 509 if (smp_started) 510 for (i = 0; i <= ksg_maxid; i++) 511 sched_balance_group(KSEQ_GROUP(i)); 512 gbal_tick = ticks + (random() % (hz * 2)); 513} 514 515static void 516sched_balance_group(struct kseq_group *ksg) 517{ 518 struct kseq *kseq; 519 struct kseq *high; 520 struct kseq *low; 521 int load; 522 523 if (ksg->ksg_transferable == 0) 524 return; 525 low = NULL; 526 high = NULL; 527 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 528 load = kseq->ksq_load; 529 if (high == NULL || load > high->ksq_load) 530 high = kseq; 531 if (low == NULL || load < low->ksq_load) 532 low = kseq; 533 } 534 if (high != NULL && low != NULL && high != low) 535 sched_balance_pair(high, low); 536} 537 538static void 539sched_balance_pair(struct kseq *high, struct kseq *low) 540{ 541 int transferable; 542 int high_load; 543 int low_load; 544 int move; 545 int diff; 546 int i; 547 548 /* 549 * If we're transfering within a group we have to use this specific 550 * kseq's transferable count, otherwise we can steal from other members 551 * of the group. 552 */ 553 if (high->ksq_group == low->ksq_group) { 554 transferable = high->ksq_transferable; 555 high_load = high->ksq_load; 556 low_load = low->ksq_load; 557 } else { 558 transferable = high->ksq_group->ksg_transferable; 559 high_load = high->ksq_group->ksg_load; 560 low_load = low->ksq_group->ksg_load; 561 } 562 if (transferable == 0) 563 return; 564 /* 565 * Determine what the imbalance is and then adjust that to how many 566 * kses we actually have to give up (transferable). 567 */ 568 diff = high_load - low_load; 569 move = diff / 2; 570 if (diff & 0x1) 571 move++; 572 move = min(move, transferable); 573 for (i = 0; i < move; i++) 574 kseq_move(high, KSEQ_ID(low)); 575 return; 576} 577 578static void 579kseq_move(struct kseq *from, int cpu) 580{ 581 struct kseq *kseq; 582 struct kseq *to; 583 struct kse *ke; 584 585 kseq = from; 586 to = KSEQ_CPU(cpu); 587 ke = kseq_steal(kseq, 1); 588 if (ke == NULL) { 589 struct kseq_group *ksg; 590 591 ksg = kseq->ksq_group; 592 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 593 if (kseq == from || kseq->ksq_transferable == 0) 594 continue; 595 ke = kseq_steal(kseq, 1); 596 break; 597 } 598 if (ke == NULL) 599 panic("kseq_move: No KSEs available with a " 600 "transferable count of %d\n", 601 ksg->ksg_transferable); 602 } 603 if (kseq == to) 604 return; 605 ke->ke_state = KES_THREAD; 606 kseq_runq_rem(kseq, ke); 607 kseq_load_rem(kseq, ke); 608 kseq_notify(ke, cpu); 609} 610 611static int 612kseq_idled(struct kseq *kseq) 613{ 614 struct kseq_group *ksg; 615 struct kseq *steal; 616 struct kse *ke; 617 618 ksg = kseq->ksq_group; 619 /* 620 * If we're in a cpu group, try and steal kses from another cpu in 621 * the group before idling. 622 */ 623 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 624 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 625 if (steal == kseq || steal->ksq_transferable == 0) 626 continue; 627 ke = kseq_steal(steal, 0); 628 if (ke == NULL) 629 continue; 630 ke->ke_state = KES_THREAD; 631 kseq_runq_rem(steal, ke); 632 kseq_load_rem(steal, ke); 633 ke->ke_cpu = PCPU_GET(cpuid); 634 sched_add_internal(ke->ke_thread, 0); 635 return (0); 636 } 637 } 638 /* 639 * We only set the idled bit when all of the cpus in the group are 640 * idle. Otherwise we could get into a situation where a KSE bounces 641 * back and forth between two idle cores on seperate physical CPUs. 642 */ 643 ksg->ksg_idlemask |= PCPU_GET(cpumask); 644 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 645 return (1); 646 atomic_set_int(&kseq_idle, ksg->ksg_mask); 647 return (1); 648} 649 650static void 651kseq_assign(struct kseq *kseq) 652{ 653 struct kse *nke; 654 struct kse *ke; 655 656 do { 657 *(volatile struct kse **)&ke = kseq->ksq_assigned; 658 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 659 for (; ke != NULL; ke = nke) { 660 nke = ke->ke_assign; 661 ke->ke_flags &= ~KEF_ASSIGNED; 662 sched_add_internal(ke->ke_thread, 0); 663 } 664} 665 666static void 667kseq_notify(struct kse *ke, int cpu) 668{ 669 struct kseq *kseq; 670 struct thread *td; 671 struct pcpu *pcpu; 672 int prio; 673 674 ke->ke_cpu = cpu; 675 ke->ke_flags |= KEF_ASSIGNED; 676 prio = ke->ke_thread->td_priority; 677 678 kseq = KSEQ_CPU(cpu); 679 680 /* 681 * Place a KSE on another cpu's queue and force a resched. 682 */ 683 do { 684 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; 685 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 686 /* 687 * Without sched_lock we could lose a race where we set NEEDRESCHED 688 * on a thread that is switched out before the IPI is delivered. This 689 * would lead us to miss the resched. This will be a problem once 690 * sched_lock is pushed down. 691 */ 692 pcpu = pcpu_find(cpu); 693 td = pcpu->pc_curthread; 694 if (ke->ke_thread->td_priority < td->td_priority || 695 td == pcpu->pc_idlethread) { 696 td->td_flags |= TDF_NEEDRESCHED; 697 ipi_selected(1 << cpu, IPI_AST); 698 } 699} 700 701static struct kse * 702runq_steal(struct runq *rq) 703{ 704 struct rqhead *rqh; 705 struct rqbits *rqb; 706 struct kse *ke; 707 int word; 708 int bit; 709 710 mtx_assert(&sched_lock, MA_OWNED); 711 rqb = &rq->rq_status; 712 for (word = 0; word < RQB_LEN; word++) { 713 if (rqb->rqb_bits[word] == 0) 714 continue; 715 for (bit = 0; bit < RQB_BPW; bit++) { 716 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 717 continue; 718 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 719 TAILQ_FOREACH(ke, rqh, ke_procq) { 720 if (KSE_CAN_MIGRATE(ke, 721 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 722 return (ke); 723 } 724 } 725 } 726 return (NULL); 727} 728 729static struct kse * 730kseq_steal(struct kseq *kseq, int stealidle) 731{ 732 struct kse *ke; 733 734 /* 735 * Steal from next first to try to get a non-interactive task that 736 * may not have run for a while. 737 */ 738 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 739 return (ke); 740 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 741 return (ke); 742 if (stealidle) 743 return (runq_steal(&kseq->ksq_idle)); 744 return (NULL); 745} 746 747int 748kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 749{ 750 struct kseq_group *ksg; 751 int cpu; 752 753 if (smp_started == 0) 754 return (0); 755 cpu = 0; 756 /* 757 * If our load exceeds a certain threshold we should attempt to 758 * reassign this thread. The first candidate is the cpu that 759 * originally ran the thread. If it is idle, assign it there, 760 * otherwise, pick an idle cpu. 761 * 762 * The threshold at which we start to reassign kses has a large impact 763 * on the overall performance of the system. Tuned too high and 764 * some CPUs may idle. Too low and there will be excess migration 765 * and context switches. 766 */ 767 ksg = kseq->ksq_group; 768 if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) { 769 ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group; 770 if (kseq_idle & ksg->ksg_mask) { 771 cpu = ffs(ksg->ksg_idlemask); 772 if (cpu) 773 goto migrate; 774 } 775 /* 776 * Multiple cpus could find this bit simultaneously 777 * but the race shouldn't be terrible. 778 */ 779 cpu = ffs(kseq_idle); 780 if (cpu) 781 goto migrate; 782 } 783 /* 784 * If another cpu in this group has idled, assign a thread over 785 * to them after checking to see if there are idled groups. 786 */ 787 ksg = kseq->ksq_group; 788 if (ksg->ksg_idlemask) { 789 cpu = ffs(ksg->ksg_idlemask); 790 if (cpu) 791 goto migrate; 792 } 793 /* 794 * No new CPU was found. 795 */ 796 return (0); 797migrate: 798 /* 799 * Now that we've found an idle CPU, migrate the thread. 800 */ 801 cpu--; 802 ke->ke_runq = NULL; 803 kseq_notify(ke, cpu); 804 805 return (1); 806} 807 808#endif /* SMP */ 809 810/* 811 * Pick the highest priority task we have and return it. 812 */ 813 814static struct kse * 815kseq_choose(struct kseq *kseq) 816{ 817 struct kse *ke; 818 struct runq *swap; 819 820 mtx_assert(&sched_lock, MA_OWNED); 821 swap = NULL; 822 823 for (;;) { 824 ke = runq_choose(kseq->ksq_curr); 825 if (ke == NULL) { 826 /* 827 * We already swapped once and didn't get anywhere. 828 */ 829 if (swap) 830 break; 831 swap = kseq->ksq_curr; 832 kseq->ksq_curr = kseq->ksq_next; 833 kseq->ksq_next = swap; 834 continue; 835 } 836 /* 837 * If we encounter a slice of 0 the kse is in a 838 * TIMESHARE kse group and its nice was too far out 839 * of the range that receives slices. 840 */ 841 if (ke->ke_slice == 0) { 842 runq_remove(ke->ke_runq, ke); 843 sched_slice(ke); 844 ke->ke_runq = kseq->ksq_next; 845 runq_add(ke->ke_runq, ke); 846 continue; 847 } 848 return (ke); 849 } 850 851 return (runq_choose(&kseq->ksq_idle)); 852} 853 854static void 855kseq_setup(struct kseq *kseq) 856{ 857 runq_init(&kseq->ksq_timeshare[0]); 858 runq_init(&kseq->ksq_timeshare[1]); 859 runq_init(&kseq->ksq_idle); 860 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 861 kseq->ksq_next = &kseq->ksq_timeshare[1]; 862 kseq->ksq_load = 0; 863 kseq->ksq_load_timeshare = 0; 864} 865 866static void 867sched_setup(void *dummy) 868{ 869#ifdef SMP 870 int balance_groups; 871 int i; 872#endif 873 874 slice_min = (hz/100); /* 10ms */ 875 slice_max = (hz/7); /* ~140ms */ 876 877#ifdef SMP 878 balance_groups = 0; 879 /* 880 * Initialize the kseqs. 881 */ 882 for (i = 0; i < MAXCPU; i++) { 883 struct kseq *ksq; 884 885 ksq = &kseq_cpu[i]; 886 ksq->ksq_assigned = NULL; 887 kseq_setup(&kseq_cpu[i]); 888 } 889 if (smp_topology == NULL) { 890 struct kseq_group *ksg; 891 struct kseq *ksq; 892 893 for (i = 0; i < MAXCPU; i++) { 894 ksq = &kseq_cpu[i]; 895 ksg = &kseq_groups[i]; 896 /* 897 * Setup a kseq group with one member. 898 */ 899 ksq->ksq_transferable = 0; 900 ksq->ksq_group = ksg; 901 ksg->ksg_cpus = 1; 902 ksg->ksg_idlemask = 0; 903 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 904 ksg->ksg_load = 0; 905 ksg->ksg_transferable = 0; 906 LIST_INIT(&ksg->ksg_members); 907 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 908 } 909 } else { 910 struct kseq_group *ksg; 911 struct cpu_group *cg; 912 int j; 913 914 for (i = 0; i < smp_topology->ct_count; i++) { 915 cg = &smp_topology->ct_group[i]; 916 ksg = &kseq_groups[i]; 917 /* 918 * Initialize the group. 919 */ 920 ksg->ksg_idlemask = 0; 921 ksg->ksg_load = 0; 922 ksg->ksg_transferable = 0; 923 ksg->ksg_cpus = cg->cg_count; 924 ksg->ksg_cpumask = cg->cg_mask; 925 LIST_INIT(&ksg->ksg_members); 926 /* 927 * Find all of the group members and add them. 928 */ 929 for (j = 0; j < MAXCPU; j++) { 930 if ((cg->cg_mask & (1 << j)) != 0) { 931 if (ksg->ksg_mask == 0) 932 ksg->ksg_mask = 1 << j; 933 kseq_cpu[j].ksq_transferable = 0; 934 kseq_cpu[j].ksq_group = ksg; 935 LIST_INSERT_HEAD(&ksg->ksg_members, 936 &kseq_cpu[j], ksq_siblings); 937 } 938 } 939 if (ksg->ksg_cpus > 1) 940 balance_groups = 1; 941 } 942 ksg_maxid = smp_topology->ct_count - 1; 943 } 944 /* 945 * Stagger the group and global load balancer so they do not 946 * interfere with each other. 947 */ 948 bal_tick = ticks + hz; 949 if (balance_groups) 950 gbal_tick = ticks + (hz / 2); 951#else 952 kseq_setup(KSEQ_SELF()); 953#endif 954 mtx_lock_spin(&sched_lock); 955 kseq_load_add(KSEQ_SELF(), &kse0); 956 mtx_unlock_spin(&sched_lock); 957} 958 959/* 960 * Scale the scheduling priority according to the "interactivity" of this 961 * process. 962 */ 963static void 964sched_priority(struct ksegrp *kg) 965{ 966 int pri; 967 968 if (kg->kg_pri_class != PRI_TIMESHARE) 969 return; 970 971 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 972 pri += SCHED_PRI_BASE; 973 pri += kg->kg_proc->p_nice; 974 975 if (pri > PRI_MAX_TIMESHARE) 976 pri = PRI_MAX_TIMESHARE; 977 else if (pri < PRI_MIN_TIMESHARE) 978 pri = PRI_MIN_TIMESHARE; 979 980 kg->kg_user_pri = pri; 981 982 return; 983} 984 985/* 986 * Calculate a time slice based on the properties of the kseg and the runq 987 * that we're on. This is only for PRI_TIMESHARE ksegrps. 988 */ 989static void 990sched_slice(struct kse *ke) 991{ 992 struct kseq *kseq; 993 struct ksegrp *kg; 994 995 kg = ke->ke_ksegrp; 996 kseq = KSEQ_CPU(ke->ke_cpu); 997 998 /* 999 * Rationale: 1000 * KSEs in interactive ksegs get a minimal slice so that we 1001 * quickly notice if it abuses its advantage. 1002 * 1003 * KSEs in non-interactive ksegs are assigned a slice that is 1004 * based on the ksegs nice value relative to the least nice kseg 1005 * on the run queue for this cpu. 1006 * 1007 * If the KSE is less nice than all others it gets the maximum 1008 * slice and other KSEs will adjust their slice relative to 1009 * this when they first expire. 1010 * 1011 * There is 20 point window that starts relative to the least 1012 * nice kse on the run queue. Slice size is determined by 1013 * the kse distance from the last nice ksegrp. 1014 * 1015 * If the kse is outside of the window it will get no slice 1016 * and will be reevaluated each time it is selected on the 1017 * run queue. The exception to this is nice 0 ksegs when 1018 * a nice -20 is running. They are always granted a minimum 1019 * slice. 1020 */ 1021 if (!SCHED_INTERACTIVE(kg)) { 1022 int nice; 1023 1024 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); 1025 if (kseq->ksq_load_timeshare == 0 || 1026 kg->kg_proc->p_nice < kseq->ksq_nicemin) 1027 ke->ke_slice = SCHED_SLICE_MAX; 1028 else if (nice <= SCHED_SLICE_NTHRESH) 1029 ke->ke_slice = SCHED_SLICE_NICE(nice); 1030 else if (kg->kg_proc->p_nice == 0) 1031 ke->ke_slice = SCHED_SLICE_MIN; 1032 else 1033 ke->ke_slice = 0; 1034 } else 1035 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1036 1037 CTR6(KTR_ULE, 1038 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1039 ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin, 1040 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1041 1042 return; 1043} 1044 1045/* 1046 * This routine enforces a maximum limit on the amount of scheduling history 1047 * kept. It is called after either the slptime or runtime is adjusted. 1048 * This routine will not operate correctly when slp or run times have been 1049 * adjusted to more than double their maximum. 1050 */ 1051static void 1052sched_interact_update(struct ksegrp *kg) 1053{ 1054 int sum; 1055 1056 sum = kg->kg_runtime + kg->kg_slptime; 1057 if (sum < SCHED_SLP_RUN_MAX) 1058 return; 1059 /* 1060 * If we have exceeded by more than 1/5th then the algorithm below 1061 * will not bring us back into range. Dividing by two here forces 1062 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1063 */ 1064 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1065 kg->kg_runtime /= 2; 1066 kg->kg_slptime /= 2; 1067 return; 1068 } 1069 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1070 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1071} 1072 1073static void 1074sched_interact_fork(struct ksegrp *kg) 1075{ 1076 int ratio; 1077 int sum; 1078 1079 sum = kg->kg_runtime + kg->kg_slptime; 1080 if (sum > SCHED_SLP_RUN_FORK) { 1081 ratio = sum / SCHED_SLP_RUN_FORK; 1082 kg->kg_runtime /= ratio; 1083 kg->kg_slptime /= ratio; 1084 } 1085} 1086 1087static int 1088sched_interact_score(struct ksegrp *kg) 1089{ 1090 int div; 1091 1092 if (kg->kg_runtime > kg->kg_slptime) { 1093 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1094 return (SCHED_INTERACT_HALF + 1095 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1096 } if (kg->kg_slptime > kg->kg_runtime) { 1097 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1098 return (kg->kg_runtime / div); 1099 } 1100 1101 /* 1102 * This can happen if slptime and runtime are 0. 1103 */ 1104 return (0); 1105 1106} 1107 1108/* 1109 * This is only somewhat accurate since given many processes of the same 1110 * priority they will switch when their slices run out, which will be 1111 * at most SCHED_SLICE_MAX. 1112 */ 1113int 1114sched_rr_interval(void) 1115{ 1116 return (SCHED_SLICE_MAX); 1117} 1118 1119static void 1120sched_pctcpu_update(struct kse *ke) 1121{ 1122 /* 1123 * Adjust counters and watermark for pctcpu calc. 1124 */ 1125 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1126 /* 1127 * Shift the tick count out so that the divide doesn't 1128 * round away our results. 1129 */ 1130 ke->ke_ticks <<= 10; 1131 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1132 SCHED_CPU_TICKS; 1133 ke->ke_ticks >>= 10; 1134 } else 1135 ke->ke_ticks = 0; 1136 ke->ke_ltick = ticks; 1137 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1138} 1139 1140void 1141sched_prio(struct thread *td, u_char prio) 1142{ 1143 struct kse *ke; 1144 1145 ke = td->td_kse; 1146 mtx_assert(&sched_lock, MA_OWNED); 1147 if (TD_ON_RUNQ(td)) { 1148 /* 1149 * If the priority has been elevated due to priority 1150 * propagation, we may have to move ourselves to a new 1151 * queue. We still call adjustrunqueue below in case kse 1152 * needs to fix things up. 1153 */ 1154 if (prio < td->td_priority && ke && 1155 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1156 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1157 runq_remove(ke->ke_runq, ke); 1158 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1159 runq_add(ke->ke_runq, ke); 1160 } 1161 /* 1162 * Hold this kse on this cpu so that sched_prio() doesn't 1163 * cause excessive migration. We only want migration to 1164 * happen as the result of a wakeup. 1165 */ 1166 ke->ke_flags |= KEF_HOLD; 1167 adjustrunqueue(td, prio); 1168 } else 1169 td->td_priority = prio; 1170} 1171 1172void 1173sched_switch(struct thread *td, struct thread *newtd) 1174{ 1175 struct kse *ke; 1176 1177 mtx_assert(&sched_lock, MA_OWNED); 1178 1179 ke = td->td_kse; 1180 1181 td->td_last_kse = ke; 1182 td->td_lastcpu = td->td_oncpu; 1183 td->td_oncpu = NOCPU; 1184 td->td_flags &= ~TDF_NEEDRESCHED; 1185 td->td_pflags &= ~TDP_OWEPREEMPT; 1186 1187 /* 1188 * If the KSE has been assigned it may be in the process of switching 1189 * to the new cpu. This is the case in sched_bind(). 1190 */ 1191 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1192 if (td == PCPU_GET(idlethread)) { 1193 TD_SET_CAN_RUN(td); 1194 } else if (TD_IS_RUNNING(td)) { 1195 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1196 /* 1197 * Don't allow the kse to migrate from a preemption. 1198 */ 1199 ke->ke_flags |= KEF_HOLD; 1200 setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); 1201 } else { 1202 if (ke->ke_runq) { 1203 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1204 } else if ((td->td_flags & TDF_IDLETD) == 0) 1205 kdb_backtrace(); 1206 /* 1207 * We will not be on the run queue. So we must be 1208 * sleeping or similar. 1209 */ 1210 if (td->td_proc->p_flag & P_SA) 1211 kse_reassign(ke); 1212 } 1213 } 1214 if (newtd != NULL) 1215 kseq_load_add(KSEQ_SELF(), newtd->td_kse); 1216 else 1217 newtd = choosethread(); 1218 if (td != newtd) 1219 cpu_switch(td, newtd); 1220 sched_lock.mtx_lock = (uintptr_t)td; 1221 1222 td->td_oncpu = PCPU_GET(cpuid); 1223} 1224 1225void 1226sched_nice(struct proc *p, int nice) 1227{ 1228 struct ksegrp *kg; 1229 struct kse *ke; 1230 struct thread *td; 1231 struct kseq *kseq; 1232 1233 PROC_LOCK_ASSERT(p, MA_OWNED); 1234 mtx_assert(&sched_lock, MA_OWNED); 1235 /* 1236 * We need to adjust the nice counts for running KSEs. 1237 */ 1238 FOREACH_KSEGRP_IN_PROC(p, kg) { 1239 if (kg->kg_pri_class == PRI_TIMESHARE) { 1240 FOREACH_KSE_IN_GROUP(kg, ke) { 1241 if (ke->ke_runq == NULL) 1242 continue; 1243 kseq = KSEQ_CPU(ke->ke_cpu); 1244 kseq_nice_rem(kseq, p->p_nice); 1245 kseq_nice_add(kseq, nice); 1246 } 1247 } 1248 } 1249 p->p_nice = nice; 1250 FOREACH_KSEGRP_IN_PROC(p, kg) { 1251 sched_priority(kg); 1252 FOREACH_THREAD_IN_GROUP(kg, td) 1253 td->td_flags |= TDF_NEEDRESCHED; 1254 } 1255} 1256 1257void 1258sched_sleep(struct thread *td) 1259{ 1260 mtx_assert(&sched_lock, MA_OWNED); 1261 1262 td->td_slptime = ticks; 1263 td->td_base_pri = td->td_priority; 1264 1265 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1266 td->td_kse, td->td_slptime); 1267} 1268 1269void 1270sched_wakeup(struct thread *td) 1271{ 1272 mtx_assert(&sched_lock, MA_OWNED); 1273 1274 /* 1275 * Let the kseg know how long we slept for. This is because process 1276 * interactivity behavior is modeled in the kseg. 1277 */ 1278 if (td->td_slptime) { 1279 struct ksegrp *kg; 1280 int hzticks; 1281 1282 kg = td->td_ksegrp; 1283 hzticks = (ticks - td->td_slptime) << 10; 1284 if (hzticks >= SCHED_SLP_RUN_MAX) { 1285 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1286 kg->kg_runtime = 1; 1287 } else { 1288 kg->kg_slptime += hzticks; 1289 sched_interact_update(kg); 1290 } 1291 sched_priority(kg); 1292 if (td->td_kse) 1293 sched_slice(td->td_kse); 1294 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1295 td->td_kse, hzticks); 1296 td->td_slptime = 0; 1297 } 1298 setrunqueue(td, SRQ_BORING); 1299} 1300 1301/* 1302 * Penalize the parent for creating a new child and initialize the child's 1303 * priority. 1304 */ 1305void 1306sched_fork(struct thread *td, struct proc *p1) 1307{ 1308 1309 mtx_assert(&sched_lock, MA_OWNED); 1310 1311 p1->p_nice = td->td_proc->p_nice; 1312 sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1)); 1313 sched_fork_kse(td, FIRST_KSE_IN_PROC(p1)); 1314 sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1)); 1315} 1316 1317void 1318sched_fork_kse(struct thread *td, struct kse *child) 1319{ 1320 struct kse *ke = td->td_kse; 1321 1322 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1323 child->ke_cpu = ke->ke_cpu; 1324 child->ke_runq = NULL; 1325 1326 /* Grab our parents cpu estimation information. */ 1327 child->ke_ticks = ke->ke_ticks; 1328 child->ke_ltick = ke->ke_ltick; 1329 child->ke_ftick = ke->ke_ftick; 1330} 1331 1332void 1333sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 1334{ 1335 struct ksegrp *kg = td->td_ksegrp; 1336 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1337 1338 child->kg_slptime = kg->kg_slptime; 1339 child->kg_runtime = kg->kg_runtime; 1340 child->kg_user_pri = kg->kg_user_pri; 1341 sched_interact_fork(child); 1342 kg->kg_runtime += tickincr << 10; 1343 sched_interact_update(kg); 1344 1345 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1346 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1347 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1348} 1349 1350void 1351sched_fork_thread(struct thread *td, struct thread *child) 1352{ 1353} 1354 1355void 1356sched_class(struct ksegrp *kg, int class) 1357{ 1358 struct kseq *kseq; 1359 struct kse *ke; 1360 int nclass; 1361 int oclass; 1362 1363 mtx_assert(&sched_lock, MA_OWNED); 1364 if (kg->kg_pri_class == class) 1365 return; 1366 1367 nclass = PRI_BASE(class); 1368 oclass = PRI_BASE(kg->kg_pri_class); 1369 FOREACH_KSE_IN_GROUP(kg, ke) { 1370 if (ke->ke_state != KES_ONRUNQ && 1371 ke->ke_state != KES_THREAD) 1372 continue; 1373 kseq = KSEQ_CPU(ke->ke_cpu); 1374 1375#ifdef SMP 1376 /* 1377 * On SMP if we're on the RUNQ we must adjust the transferable 1378 * count because could be changing to or from an interrupt 1379 * class. 1380 */ 1381 if (ke->ke_state == KES_ONRUNQ) { 1382 if (KSE_CAN_MIGRATE(ke, oclass)) { 1383 kseq->ksq_transferable--; 1384 kseq->ksq_group->ksg_transferable--; 1385 } 1386 if (KSE_CAN_MIGRATE(ke, nclass)) { 1387 kseq->ksq_transferable++; 1388 kseq->ksq_group->ksg_transferable++; 1389 } 1390 } 1391#endif 1392 if (oclass == PRI_TIMESHARE) { 1393 kseq->ksq_load_timeshare--; 1394 kseq_nice_rem(kseq, kg->kg_proc->p_nice); 1395 } 1396 if (nclass == PRI_TIMESHARE) { 1397 kseq->ksq_load_timeshare++; 1398 kseq_nice_add(kseq, kg->kg_proc->p_nice); 1399 } 1400 } 1401 1402 kg->kg_pri_class = class; 1403} 1404 1405/* 1406 * Return some of the child's priority and interactivity to the parent. 1407 */ 1408void 1409sched_exit(struct proc *p, struct thread *td) 1410{ 1411 mtx_assert(&sched_lock, MA_OWNED); 1412 sched_exit_kse(FIRST_KSE_IN_PROC(p), td); 1413 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td); 1414} 1415 1416void 1417sched_exit_kse(struct kse *ke, struct thread *td) 1418{ 1419 kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse); 1420} 1421 1422void 1423sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) 1424{ 1425 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ 1426 kg->kg_runtime += td->td_ksegrp->kg_runtime; 1427 sched_interact_update(kg); 1428} 1429 1430void 1431sched_exit_thread(struct thread *td, struct thread *child) 1432{ 1433} 1434 1435void 1436sched_clock(struct thread *td) 1437{ 1438 struct kseq *kseq; 1439 struct ksegrp *kg; 1440 struct kse *ke; 1441 1442 mtx_assert(&sched_lock, MA_OWNED); 1443 kseq = KSEQ_SELF(); 1444#ifdef SMP 1445 if (ticks == bal_tick) 1446 sched_balance(); 1447 if (ticks == gbal_tick) 1448 sched_balance_groups(); 1449 /* 1450 * We could have been assigned a non real-time thread without an 1451 * IPI. 1452 */ 1453 if (kseq->ksq_assigned) 1454 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ 1455#endif 1456 /* 1457 * sched_setup() apparently happens prior to stathz being set. We 1458 * need to resolve the timers earlier in the boot so we can avoid 1459 * calculating this here. 1460 */ 1461 if (realstathz == 0) { 1462 realstathz = stathz ? stathz : hz; 1463 tickincr = hz / realstathz; 1464 /* 1465 * XXX This does not work for values of stathz that are much 1466 * larger than hz. 1467 */ 1468 if (tickincr == 0) 1469 tickincr = 1; 1470 } 1471 1472 ke = td->td_kse; 1473 kg = ke->ke_ksegrp; 1474 1475 /* Adjust ticks for pctcpu */ 1476 ke->ke_ticks++; 1477 ke->ke_ltick = ticks; 1478 1479 /* Go up to one second beyond our max and then trim back down */ 1480 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1481 sched_pctcpu_update(ke); 1482 1483 if (td->td_flags & TDF_IDLETD) 1484 return; 1485 1486 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1487 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1488 /* 1489 * We only do slicing code for TIMESHARE ksegrps. 1490 */ 1491 if (kg->kg_pri_class != PRI_TIMESHARE) 1492 return; 1493 /* 1494 * We used a tick charge it to the ksegrp so that we can compute our 1495 * interactivity. 1496 */ 1497 kg->kg_runtime += tickincr << 10; 1498 sched_interact_update(kg); 1499 1500 /* 1501 * We used up one time slice. 1502 */ 1503 if (--ke->ke_slice > 0) 1504 return; 1505 /* 1506 * We're out of time, recompute priorities and requeue. 1507 */ 1508 kseq_load_rem(kseq, ke); 1509 sched_priority(kg); 1510 sched_slice(ke); 1511 if (SCHED_CURR(kg, ke)) 1512 ke->ke_runq = kseq->ksq_curr; 1513 else 1514 ke->ke_runq = kseq->ksq_next; 1515 kseq_load_add(kseq, ke); 1516 td->td_flags |= TDF_NEEDRESCHED; 1517} 1518 1519int 1520sched_runnable(void) 1521{ 1522 struct kseq *kseq; 1523 int load; 1524 1525 load = 1; 1526 1527 kseq = KSEQ_SELF(); 1528#ifdef SMP 1529 if (kseq->ksq_assigned) { 1530 mtx_lock_spin(&sched_lock); 1531 kseq_assign(kseq); 1532 mtx_unlock_spin(&sched_lock); 1533 } 1534#endif 1535 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1536 if (kseq->ksq_load > 0) 1537 goto out; 1538 } else 1539 if (kseq->ksq_load - 1 > 0) 1540 goto out; 1541 load = 0; 1542out: 1543 return (load); 1544} 1545 1546void 1547sched_userret(struct thread *td) 1548{ 1549 struct ksegrp *kg; 1550 1551 kg = td->td_ksegrp; 1552 1553 if (td->td_priority != kg->kg_user_pri) { 1554 mtx_lock_spin(&sched_lock); 1555 td->td_priority = kg->kg_user_pri; 1556 mtx_unlock_spin(&sched_lock); 1557 } 1558} 1559 1560struct kse * 1561sched_choose(void) 1562{ 1563 struct kseq *kseq; 1564 struct kse *ke; 1565 1566 mtx_assert(&sched_lock, MA_OWNED); 1567 kseq = KSEQ_SELF(); 1568#ifdef SMP 1569restart: 1570 if (kseq->ksq_assigned) 1571 kseq_assign(kseq); 1572#endif 1573 ke = kseq_choose(kseq); 1574 if (ke) { 1575#ifdef SMP 1576 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1577 if (kseq_idled(kseq) == 0) 1578 goto restart; 1579#endif 1580 kseq_runq_rem(kseq, ke); 1581 ke->ke_state = KES_THREAD; 1582 1583 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1584 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1585 ke, ke->ke_runq, ke->ke_slice, 1586 ke->ke_thread->td_priority); 1587 } 1588 return (ke); 1589 } 1590#ifdef SMP 1591 if (kseq_idled(kseq) == 0) 1592 goto restart; 1593#endif 1594 return (NULL); 1595} 1596 1597void 1598sched_add(struct thread *td, int flags) 1599{ 1600 1601 /* let jeff work out how to map the flags better */ 1602 /* I'm open to suggestions */ 1603 if (flags & SRQ_YIELDING) 1604 /* 1605 * Preempting during switching can be bad JUJU 1606 * especially for KSE processes 1607 */ 1608 sched_add_internal(td, 0); 1609 else 1610 sched_add_internal(td, 1); 1611} 1612 1613static void 1614sched_add_internal(struct thread *td, int preemptive) 1615{ 1616 struct kseq *kseq; 1617 struct ksegrp *kg; 1618 struct kse *ke; 1619#ifdef SMP 1620 int canmigrate; 1621#endif 1622 int class; 1623 1624 mtx_assert(&sched_lock, MA_OWNED); 1625 ke = td->td_kse; 1626 kg = td->td_ksegrp; 1627 if (ke->ke_flags & KEF_ASSIGNED) 1628 return; 1629 kseq = KSEQ_SELF(); 1630 KASSERT((ke->ke_thread != NULL), 1631 ("sched_add: No thread on KSE")); 1632 KASSERT((ke->ke_thread->td_kse != NULL), 1633 ("sched_add: No KSE on thread")); 1634 KASSERT(ke->ke_state != KES_ONRUNQ, 1635 ("sched_add: kse %p (%s) already in run queue", ke, 1636 ke->ke_proc->p_comm)); 1637 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1638 ("sched_add: process swapped out")); 1639 KASSERT(ke->ke_runq == NULL, 1640 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1641 1642 class = PRI_BASE(kg->kg_pri_class); 1643 switch (class) { 1644 case PRI_ITHD: 1645 case PRI_REALTIME: 1646 ke->ke_runq = kseq->ksq_curr; 1647 ke->ke_slice = SCHED_SLICE_MAX; 1648 ke->ke_cpu = PCPU_GET(cpuid); 1649 break; 1650 case PRI_TIMESHARE: 1651 if (SCHED_CURR(kg, ke)) 1652 ke->ke_runq = kseq->ksq_curr; 1653 else 1654 ke->ke_runq = kseq->ksq_next; 1655 break; 1656 case PRI_IDLE: 1657 /* 1658 * This is for priority prop. 1659 */ 1660 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1661 ke->ke_runq = kseq->ksq_curr; 1662 else 1663 ke->ke_runq = &kseq->ksq_idle; 1664 ke->ke_slice = SCHED_SLICE_MIN; 1665 break; 1666 default: 1667 panic("Unknown pri class."); 1668 break; 1669 } 1670#ifdef SMP 1671 /* 1672 * Don't migrate running threads here. Force the long term balancer 1673 * to do it. 1674 */ 1675 canmigrate = KSE_CAN_MIGRATE(ke, class); 1676 if (ke->ke_flags & KEF_HOLD) { 1677 ke->ke_flags &= ~KEF_HOLD; 1678 canmigrate = 0; 1679 } 1680 /* 1681 * If this thread is pinned or bound, notify the target cpu. 1682 */ 1683 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { 1684 ke->ke_runq = NULL; 1685 kseq_notify(ke, ke->ke_cpu); 1686 return; 1687 } 1688 /* 1689 * If we had been idle, clear our bit in the group and potentially 1690 * the global bitmap. If not, see if we should transfer this thread. 1691 */ 1692 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1693 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1694 /* 1695 * Check to see if our group is unidling, and if so, remove it 1696 * from the global idle mask. 1697 */ 1698 if (kseq->ksq_group->ksg_idlemask == 1699 kseq->ksq_group->ksg_cpumask) 1700 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1701 /* 1702 * Now remove ourselves from the group specific idle mask. 1703 */ 1704 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1705 } else if (kseq->ksq_load > 1 && canmigrate) 1706 if (kseq_transfer(kseq, ke, class)) 1707 return; 1708 ke->ke_cpu = PCPU_GET(cpuid); 1709#endif 1710 /* 1711 * XXX With preemption this is not necessary. 1712 */ 1713 if (td->td_priority < curthread->td_priority && 1714 ke->ke_runq == kseq->ksq_curr) 1715 curthread->td_flags |= TDF_NEEDRESCHED; 1716 if (preemptive && maybe_preempt(td)) 1717 return; 1718 ke->ke_ksegrp->kg_runq_kses++; 1719 ke->ke_state = KES_ONRUNQ; 1720 1721 kseq_runq_add(kseq, ke); 1722 kseq_load_add(kseq, ke); 1723} 1724 1725void 1726sched_rem(struct thread *td) 1727{ 1728 struct kseq *kseq; 1729 struct kse *ke; 1730 1731 ke = td->td_kse; 1732 /* 1733 * It is safe to just return here because sched_rem() is only ever 1734 * used in places where we're immediately going to add the 1735 * kse back on again. In that case it'll be added with the correct 1736 * thread and priority when the caller drops the sched_lock. 1737 */ 1738 if (ke->ke_flags & KEF_ASSIGNED) 1739 return; 1740 mtx_assert(&sched_lock, MA_OWNED); 1741 KASSERT((ke->ke_state == KES_ONRUNQ), 1742 ("sched_rem: KSE not on run queue")); 1743 1744 ke->ke_state = KES_THREAD; 1745 ke->ke_ksegrp->kg_runq_kses--; 1746 kseq = KSEQ_CPU(ke->ke_cpu); 1747 kseq_runq_rem(kseq, ke); 1748 kseq_load_rem(kseq, ke); 1749} 1750 1751fixpt_t 1752sched_pctcpu(struct thread *td) 1753{ 1754 fixpt_t pctcpu; 1755 struct kse *ke; 1756 1757 pctcpu = 0; 1758 ke = td->td_kse; 1759 if (ke == NULL) 1760 return (0); 1761 1762 mtx_lock_spin(&sched_lock); 1763 if (ke->ke_ticks) { 1764 int rtick; 1765 1766 /* 1767 * Don't update more frequently than twice a second. Allowing 1768 * this causes the cpu usage to decay away too quickly due to 1769 * rounding errors. 1770 */ 1771 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1772 ke->ke_ltick < (ticks - (hz / 2))) 1773 sched_pctcpu_update(ke); 1774 /* How many rtick per second ? */ 1775 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1776 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1777 } 1778 1779 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1780 mtx_unlock_spin(&sched_lock); 1781 1782 return (pctcpu); 1783} 1784 1785void 1786sched_bind(struct thread *td, int cpu) 1787{ 1788 struct kse *ke; 1789 1790 mtx_assert(&sched_lock, MA_OWNED); 1791 ke = td->td_kse; 1792 ke->ke_flags |= KEF_BOUND; 1793#ifdef SMP 1794 if (PCPU_GET(cpuid) == cpu) 1795 return; 1796 /* sched_rem without the runq_remove */ 1797 ke->ke_state = KES_THREAD; 1798 ke->ke_ksegrp->kg_runq_kses--; 1799 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1800 kseq_notify(ke, cpu); 1801 /* When we return from mi_switch we'll be on the correct cpu. */ 1802 mi_switch(SW_VOL, NULL); 1803#endif 1804} 1805 1806void 1807sched_unbind(struct thread *td) 1808{ 1809 mtx_assert(&sched_lock, MA_OWNED); 1810 td->td_kse->ke_flags &= ~KEF_BOUND; 1811} 1812 1813int 1814sched_load(void) 1815{ 1816#ifdef SMP 1817 int total; 1818 int i; 1819 1820 total = 0; 1821 for (i = 0; i <= ksg_maxid; i++) 1822 total += KSEQ_GROUP(i)->ksg_load; 1823 return (total); 1824#else 1825 return (KSEQ_SELF()->ksq_sysload); 1826#endif 1827} 1828 1829int 1830sched_sizeof_kse(void) 1831{ 1832 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1833} 1834 1835int 1836sched_sizeof_ksegrp(void) 1837{ 1838 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1839} 1840 1841int 1842sched_sizeof_proc(void) 1843{ 1844 return (sizeof(struct proc)); 1845} 1846 1847int 1848sched_sizeof_thread(void) 1849{ 1850 return (sizeof(struct thread) + sizeof(struct td_sched)); 1851} 1852