sched_ule.c revision 148383
1/*- 2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 148383 2005-07-25 10:21:49Z delphij $"); 29 30#include "opt_hwpmc_hooks.h" 31#include "opt_sched.h" 32 33#define kse td_sched 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/kdb.h> 38#include <sys/kernel.h> 39#include <sys/ktr.h> 40#include <sys/lock.h> 41#include <sys/mutex.h> 42#include <sys/proc.h> 43#include <sys/resource.h> 44#include <sys/resourcevar.h> 45#include <sys/sched.h> 46#include <sys/smp.h> 47#include <sys/sx.h> 48#include <sys/sysctl.h> 49#include <sys/sysproto.h> 50#include <sys/turnstile.h> 51#include <sys/vmmeter.h> 52#ifdef KTRACE 53#include <sys/uio.h> 54#include <sys/ktrace.h> 55#endif 56 57#ifdef HWPMC_HOOKS 58#include <sys/pmckern.h> 59#endif 60 61#include <machine/cpu.h> 62#include <machine/smp.h> 63 64/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 65/* XXX This is bogus compatability crap for ps */ 66static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 67SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 68 69static void sched_setup(void *dummy); 70SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 71 72static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 73 74SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 75 "Scheduler name"); 76 77static int slice_min = 1; 78SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 79 80static int slice_max = 10; 81SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 82 83int realstathz; 84int tickincr = 1; 85 86/* 87 * The following datastructures are allocated within their parent structure 88 * but are scheduler specific. 89 */ 90/* 91 * The schedulable entity that can be given a context to run. A process may 92 * have several of these. 93 */ 94struct kse { 95 TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ 96 int ke_flags; /* (j) KEF_* flags. */ 97 struct thread *ke_thread; /* (*) Active associated thread. */ 98 fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ 99 char ke_rqindex; /* (j) Run queue index. */ 100 enum { 101 KES_THREAD = 0x0, /* slaved to thread state */ 102 KES_ONRUNQ 103 } ke_state; /* (j) thread sched specific status. */ 104 int ke_slptime; 105 int ke_slice; 106 struct runq *ke_runq; 107 u_char ke_cpu; /* CPU that we have affinity for. */ 108 /* The following variables are only used for pctcpu calculation */ 109 int ke_ltick; /* Last tick that we were running on */ 110 int ke_ftick; /* First tick that we were running on */ 111 int ke_ticks; /* Tick count */ 112 113}; 114#define td_kse td_sched 115#define td_slptime td_kse->ke_slptime 116#define ke_proc ke_thread->td_proc 117#define ke_ksegrp ke_thread->td_ksegrp 118#define ke_assign ke_procq.tqe_next 119/* flags kept in ke_flags */ 120#define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */ 121#define KEF_BOUND 0x0002 /* Thread can not migrate. */ 122#define KEF_XFERABLE 0x0004 /* Thread was added as transferable. */ 123#define KEF_HOLD 0x0008 /* Thread is temporarily bound. */ 124#define KEF_REMOVED 0x0010 /* Thread was removed while ASSIGNED */ 125#define KEF_INTERNAL 0x0020 /* Thread added due to migration. */ 126#define KEF_DIDRUN 0x02000 /* Thread actually ran. */ 127#define KEF_EXIT 0x04000 /* Thread is being killed. */ 128 129struct kg_sched { 130 struct thread *skg_last_assigned; /* (j) Last thread assigned to */ 131 /* the system scheduler */ 132 int skg_slptime; /* Number of ticks we vol. slept */ 133 int skg_runtime; /* Number of ticks we were running */ 134 int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ 135 int skg_concurrency; /* (j) Num threads requested in group.*/ 136}; 137#define kg_last_assigned kg_sched->skg_last_assigned 138#define kg_avail_opennings kg_sched->skg_avail_opennings 139#define kg_concurrency kg_sched->skg_concurrency 140#define kg_runtime kg_sched->skg_runtime 141#define kg_slptime kg_sched->skg_slptime 142 143#define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++ 144#define SLOT_USE(kg) (kg)->kg_avail_opennings-- 145 146static struct kse kse0; 147static struct kg_sched kg_sched0; 148 149/* 150 * The priority is primarily determined by the interactivity score. Thus, we 151 * give lower(better) priorities to kse groups that use less CPU. The nice 152 * value is then directly added to this to allow nice to have some effect 153 * on latency. 154 * 155 * PRI_RANGE: Total priority range for timeshare threads. 156 * PRI_NRESV: Number of nice values. 157 * PRI_BASE: The start of the dynamic range. 158 */ 159#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 160#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 161#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 162#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 163#define SCHED_PRI_INTERACT(score) \ 164 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 165 166/* 167 * These determine the interactivity of a process. 168 * 169 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 170 * before throttling back. 171 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 172 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 173 * INTERACT_THRESH: Threshhold for placement on the current runq. 174 */ 175#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 176#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 177#define SCHED_INTERACT_MAX (100) 178#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 179#define SCHED_INTERACT_THRESH (30) 180 181/* 182 * These parameters and macros determine the size of the time slice that is 183 * granted to each thread. 184 * 185 * SLICE_MIN: Minimum time slice granted, in units of ticks. 186 * SLICE_MAX: Maximum time slice granted. 187 * SLICE_RANGE: Range of available time slices scaled by hz. 188 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 189 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 190 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 191 */ 192#define SCHED_SLICE_MIN (slice_min) 193#define SCHED_SLICE_MAX (slice_max) 194#define SCHED_SLICE_INTERACTIVE (slice_max) 195#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 196#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 197#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 198#define SCHED_SLICE_NICE(nice) \ 199 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 200 201/* 202 * This macro determines whether or not the thread belongs on the current or 203 * next run queue. 204 */ 205#define SCHED_INTERACTIVE(kg) \ 206 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 207#define SCHED_CURR(kg, ke) \ 208 ((ke->ke_thread->td_flags & TDF_BORROWING) || SCHED_INTERACTIVE(kg)) 209 210/* 211 * Cpu percentage computation macros and defines. 212 * 213 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 214 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 215 */ 216 217#define SCHED_CPU_TIME 10 218#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 219 220/* 221 * kseq - per processor runqs and statistics. 222 */ 223struct kseq { 224 struct runq ksq_idle; /* Queue of IDLE threads. */ 225 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 226 struct runq *ksq_next; /* Next timeshare queue. */ 227 struct runq *ksq_curr; /* Current queue. */ 228 int ksq_load_timeshare; /* Load for timeshare. */ 229 int ksq_load; /* Aggregate load. */ 230 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 231 short ksq_nicemin; /* Least nice. */ 232#ifdef SMP 233 int ksq_transferable; 234 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 235 struct kseq_group *ksq_group; /* Our processor group. */ 236 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 237#else 238 int ksq_sysload; /* For loadavg, !ITHD load. */ 239#endif 240}; 241 242#ifdef SMP 243/* 244 * kseq groups are groups of processors which can cheaply share threads. When 245 * one processor in the group goes idle it will check the runqs of the other 246 * processors in its group prior to halting and waiting for an interrupt. 247 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 248 * In a numa environment we'd want an idle bitmap per group and a two tiered 249 * load balancer. 250 */ 251struct kseq_group { 252 int ksg_cpus; /* Count of CPUs in this kseq group. */ 253 cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ 254 cpumask_t ksg_idlemask; /* Idle cpus in this group. */ 255 cpumask_t ksg_mask; /* Bit mask for first cpu. */ 256 int ksg_load; /* Total load of this group. */ 257 int ksg_transferable; /* Transferable load of this group. */ 258 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 259}; 260#endif 261 262/* 263 * One kse queue per processor. 264 */ 265#ifdef SMP 266static cpumask_t kseq_idle; 267static int ksg_maxid; 268static struct kseq kseq_cpu[MAXCPU]; 269static struct kseq_group kseq_groups[MAXCPU]; 270static int bal_tick; 271static int gbal_tick; 272static int balance_groups; 273 274#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 275#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 276#define KSEQ_ID(x) ((x) - kseq_cpu) 277#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 278#else /* !SMP */ 279static struct kseq kseq_cpu; 280 281#define KSEQ_SELF() (&kseq_cpu) 282#define KSEQ_CPU(x) (&kseq_cpu) 283#endif 284 285static void slot_fill(struct ksegrp *); 286static struct kse *sched_choose(void); /* XXX Should be thread * */ 287static void sched_slice(struct kse *); 288static void sched_priority(struct ksegrp *); 289static void sched_thread_priority(struct thread *, u_char); 290static int sched_interact_score(struct ksegrp *); 291static void sched_interact_update(struct ksegrp *); 292static void sched_interact_fork(struct ksegrp *); 293static void sched_pctcpu_update(struct kse *); 294 295/* Operations on per processor queues */ 296static struct kse * kseq_choose(struct kseq *); 297static void kseq_setup(struct kseq *); 298static void kseq_load_add(struct kseq *, struct kse *); 299static void kseq_load_rem(struct kseq *, struct kse *); 300static __inline void kseq_runq_add(struct kseq *, struct kse *, int); 301static __inline void kseq_runq_rem(struct kseq *, struct kse *); 302static void kseq_nice_add(struct kseq *, int); 303static void kseq_nice_rem(struct kseq *, int); 304void kseq_print(int cpu); 305#ifdef SMP 306static int kseq_transfer(struct kseq *, struct kse *, int); 307static struct kse *runq_steal(struct runq *); 308static void sched_balance(void); 309static void sched_balance_groups(void); 310static void sched_balance_group(struct kseq_group *); 311static void sched_balance_pair(struct kseq *, struct kseq *); 312static void kseq_move(struct kseq *, int); 313static int kseq_idled(struct kseq *); 314static void kseq_notify(struct kse *, int); 315static void kseq_assign(struct kseq *); 316static struct kse *kseq_steal(struct kseq *, int); 317#define KSE_CAN_MIGRATE(ke) \ 318 ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 319#endif 320 321void 322kseq_print(int cpu) 323{ 324 struct kseq *kseq; 325 int i; 326 327 kseq = KSEQ_CPU(cpu); 328 329 printf("kseq:\n"); 330 printf("\tload: %d\n", kseq->ksq_load); 331 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 332#ifdef SMP 333 printf("\tload transferable: %d\n", kseq->ksq_transferable); 334#endif 335 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 336 printf("\tnice counts:\n"); 337 for (i = 0; i < SCHED_PRI_NRESV; i++) 338 if (kseq->ksq_nice[i]) 339 printf("\t\t%d = %d\n", 340 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 341} 342 343static __inline void 344kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags) 345{ 346#ifdef SMP 347 if (KSE_CAN_MIGRATE(ke)) { 348 kseq->ksq_transferable++; 349 kseq->ksq_group->ksg_transferable++; 350 ke->ke_flags |= KEF_XFERABLE; 351 } 352#endif 353 runq_add(ke->ke_runq, ke, flags); 354} 355 356static __inline void 357kseq_runq_rem(struct kseq *kseq, struct kse *ke) 358{ 359#ifdef SMP 360 if (ke->ke_flags & KEF_XFERABLE) { 361 kseq->ksq_transferable--; 362 kseq->ksq_group->ksg_transferable--; 363 ke->ke_flags &= ~KEF_XFERABLE; 364 } 365#endif 366 runq_remove(ke->ke_runq, ke); 367} 368 369static void 370kseq_load_add(struct kseq *kseq, struct kse *ke) 371{ 372 int class; 373 mtx_assert(&sched_lock, MA_OWNED); 374 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 375 if (class == PRI_TIMESHARE) 376 kseq->ksq_load_timeshare++; 377 kseq->ksq_load++; 378 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); 379 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 380#ifdef SMP 381 kseq->ksq_group->ksg_load++; 382#else 383 kseq->ksq_sysload++; 384#endif 385 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 386 kseq_nice_add(kseq, ke->ke_proc->p_nice); 387} 388 389static void 390kseq_load_rem(struct kseq *kseq, struct kse *ke) 391{ 392 int class; 393 mtx_assert(&sched_lock, MA_OWNED); 394 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 395 if (class == PRI_TIMESHARE) 396 kseq->ksq_load_timeshare--; 397 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 398#ifdef SMP 399 kseq->ksq_group->ksg_load--; 400#else 401 kseq->ksq_sysload--; 402#endif 403 kseq->ksq_load--; 404 CTR1(KTR_SCHED, "load: %d", kseq->ksq_load); 405 ke->ke_runq = NULL; 406 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 407 kseq_nice_rem(kseq, ke->ke_proc->p_nice); 408} 409 410static void 411kseq_nice_add(struct kseq *kseq, int nice) 412{ 413 mtx_assert(&sched_lock, MA_OWNED); 414 /* Normalize to zero. */ 415 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 416 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 417 kseq->ksq_nicemin = nice; 418} 419 420static void 421kseq_nice_rem(struct kseq *kseq, int nice) 422{ 423 int n; 424 425 mtx_assert(&sched_lock, MA_OWNED); 426 /* Normalize to zero. */ 427 n = nice + SCHED_PRI_NHALF; 428 kseq->ksq_nice[n]--; 429 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 430 431 /* 432 * If this wasn't the smallest nice value or there are more in 433 * this bucket we can just return. Otherwise we have to recalculate 434 * the smallest nice. 435 */ 436 if (nice != kseq->ksq_nicemin || 437 kseq->ksq_nice[n] != 0 || 438 kseq->ksq_load_timeshare == 0) 439 return; 440 441 for (; n < SCHED_PRI_NRESV; n++) 442 if (kseq->ksq_nice[n]) { 443 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 444 return; 445 } 446} 447 448#ifdef SMP 449/* 450 * sched_balance is a simple CPU load balancing algorithm. It operates by 451 * finding the least loaded and most loaded cpu and equalizing their load 452 * by migrating some processes. 453 * 454 * Dealing only with two CPUs at a time has two advantages. Firstly, most 455 * installations will only have 2 cpus. Secondly, load balancing too much at 456 * once can have an unpleasant effect on the system. The scheduler rarely has 457 * enough information to make perfect decisions. So this algorithm chooses 458 * algorithm simplicity and more gradual effects on load in larger systems. 459 * 460 * It could be improved by considering the priorities and slices assigned to 461 * each task prior to balancing them. There are many pathological cases with 462 * any approach and so the semi random algorithm below may work as well as any. 463 * 464 */ 465static void 466sched_balance(void) 467{ 468 struct kseq_group *high; 469 struct kseq_group *low; 470 struct kseq_group *ksg; 471 int cnt; 472 int i; 473 474 bal_tick = ticks + (random() % (hz * 2)); 475 if (smp_started == 0) 476 return; 477 low = high = NULL; 478 i = random() % (ksg_maxid + 1); 479 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 480 ksg = KSEQ_GROUP(i); 481 /* 482 * Find the CPU with the highest load that has some 483 * threads to transfer. 484 */ 485 if ((high == NULL || ksg->ksg_load > high->ksg_load) 486 && ksg->ksg_transferable) 487 high = ksg; 488 if (low == NULL || ksg->ksg_load < low->ksg_load) 489 low = ksg; 490 if (++i > ksg_maxid) 491 i = 0; 492 } 493 if (low != NULL && high != NULL && high != low) 494 sched_balance_pair(LIST_FIRST(&high->ksg_members), 495 LIST_FIRST(&low->ksg_members)); 496} 497 498static void 499sched_balance_groups(void) 500{ 501 int i; 502 503 gbal_tick = ticks + (random() % (hz * 2)); 504 mtx_assert(&sched_lock, MA_OWNED); 505 if (smp_started) 506 for (i = 0; i <= ksg_maxid; i++) 507 sched_balance_group(KSEQ_GROUP(i)); 508} 509 510static void 511sched_balance_group(struct kseq_group *ksg) 512{ 513 struct kseq *kseq; 514 struct kseq *high; 515 struct kseq *low; 516 int load; 517 518 if (ksg->ksg_transferable == 0) 519 return; 520 low = NULL; 521 high = NULL; 522 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 523 load = kseq->ksq_load; 524 if (high == NULL || load > high->ksq_load) 525 high = kseq; 526 if (low == NULL || load < low->ksq_load) 527 low = kseq; 528 } 529 if (high != NULL && low != NULL && high != low) 530 sched_balance_pair(high, low); 531} 532 533static void 534sched_balance_pair(struct kseq *high, struct kseq *low) 535{ 536 int transferable; 537 int high_load; 538 int low_load; 539 int move; 540 int diff; 541 int i; 542 543 /* 544 * If we're transfering within a group we have to use this specific 545 * kseq's transferable count, otherwise we can steal from other members 546 * of the group. 547 */ 548 if (high->ksq_group == low->ksq_group) { 549 transferable = high->ksq_transferable; 550 high_load = high->ksq_load; 551 low_load = low->ksq_load; 552 } else { 553 transferable = high->ksq_group->ksg_transferable; 554 high_load = high->ksq_group->ksg_load; 555 low_load = low->ksq_group->ksg_load; 556 } 557 if (transferable == 0) 558 return; 559 /* 560 * Determine what the imbalance is and then adjust that to how many 561 * kses we actually have to give up (transferable). 562 */ 563 diff = high_load - low_load; 564 move = diff / 2; 565 if (diff & 0x1) 566 move++; 567 move = min(move, transferable); 568 for (i = 0; i < move; i++) 569 kseq_move(high, KSEQ_ID(low)); 570 return; 571} 572 573static void 574kseq_move(struct kseq *from, int cpu) 575{ 576 struct kseq *kseq; 577 struct kseq *to; 578 struct kse *ke; 579 580 kseq = from; 581 to = KSEQ_CPU(cpu); 582 ke = kseq_steal(kseq, 1); 583 if (ke == NULL) { 584 struct kseq_group *ksg; 585 586 ksg = kseq->ksq_group; 587 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 588 if (kseq == from || kseq->ksq_transferable == 0) 589 continue; 590 ke = kseq_steal(kseq, 1); 591 break; 592 } 593 if (ke == NULL) 594 panic("kseq_move: No KSEs available with a " 595 "transferable count of %d\n", 596 ksg->ksg_transferable); 597 } 598 if (kseq == to) 599 return; 600 ke->ke_state = KES_THREAD; 601 kseq_runq_rem(kseq, ke); 602 kseq_load_rem(kseq, ke); 603 kseq_notify(ke, cpu); 604} 605 606static int 607kseq_idled(struct kseq *kseq) 608{ 609 struct kseq_group *ksg; 610 struct kseq *steal; 611 struct kse *ke; 612 613 ksg = kseq->ksq_group; 614 /* 615 * If we're in a cpu group, try and steal kses from another cpu in 616 * the group before idling. 617 */ 618 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 619 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 620 if (steal == kseq || steal->ksq_transferable == 0) 621 continue; 622 ke = kseq_steal(steal, 0); 623 if (ke == NULL) 624 continue; 625 ke->ke_state = KES_THREAD; 626 kseq_runq_rem(steal, ke); 627 kseq_load_rem(steal, ke); 628 ke->ke_cpu = PCPU_GET(cpuid); 629 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; 630 sched_add(ke->ke_thread, SRQ_YIELDING); 631 return (0); 632 } 633 } 634 /* 635 * We only set the idled bit when all of the cpus in the group are 636 * idle. Otherwise we could get into a situation where a KSE bounces 637 * back and forth between two idle cores on seperate physical CPUs. 638 */ 639 ksg->ksg_idlemask |= PCPU_GET(cpumask); 640 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 641 return (1); 642 atomic_set_int(&kseq_idle, ksg->ksg_mask); 643 return (1); 644} 645 646static void 647kseq_assign(struct kseq *kseq) 648{ 649 struct kse *nke; 650 struct kse *ke; 651 652 do { 653 *(volatile struct kse **)&ke = kseq->ksq_assigned; 654 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned, 655 (uintptr_t)ke, (uintptr_t)NULL)); 656 for (; ke != NULL; ke = nke) { 657 nke = ke->ke_assign; 658 kseq->ksq_group->ksg_load--; 659 kseq->ksq_load--; 660 ke->ke_flags &= ~KEF_ASSIGNED; 661 ke->ke_flags |= KEF_INTERNAL | KEF_HOLD; 662 sched_add(ke->ke_thread, SRQ_YIELDING); 663 } 664} 665 666static void 667kseq_notify(struct kse *ke, int cpu) 668{ 669 struct kseq *kseq; 670 struct thread *td; 671 struct pcpu *pcpu; 672 int class; 673 int prio; 674 675 kseq = KSEQ_CPU(cpu); 676 /* XXX */ 677 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 678 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 679 (kseq_idle & kseq->ksq_group->ksg_mask)) 680 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 681 kseq->ksq_group->ksg_load++; 682 kseq->ksq_load++; 683 ke->ke_cpu = cpu; 684 ke->ke_flags |= KEF_ASSIGNED; 685 prio = ke->ke_thread->td_priority; 686 687 /* 688 * Place a KSE on another cpu's queue and force a resched. 689 */ 690 do { 691 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; 692 } while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned, 693 (uintptr_t)ke->ke_assign, (uintptr_t)ke)); 694 /* 695 * Without sched_lock we could lose a race where we set NEEDRESCHED 696 * on a thread that is switched out before the IPI is delivered. This 697 * would lead us to miss the resched. This will be a problem once 698 * sched_lock is pushed down. 699 */ 700 pcpu = pcpu_find(cpu); 701 td = pcpu->pc_curthread; 702 if (ke->ke_thread->td_priority < td->td_priority || 703 td == pcpu->pc_idlethread) { 704 td->td_flags |= TDF_NEEDRESCHED; 705 ipi_selected(1 << cpu, IPI_AST); 706 } 707} 708 709static struct kse * 710runq_steal(struct runq *rq) 711{ 712 struct rqhead *rqh; 713 struct rqbits *rqb; 714 struct kse *ke; 715 int word; 716 int bit; 717 718 mtx_assert(&sched_lock, MA_OWNED); 719 rqb = &rq->rq_status; 720 for (word = 0; word < RQB_LEN; word++) { 721 if (rqb->rqb_bits[word] == 0) 722 continue; 723 for (bit = 0; bit < RQB_BPW; bit++) { 724 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 725 continue; 726 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 727 TAILQ_FOREACH(ke, rqh, ke_procq) { 728 if (KSE_CAN_MIGRATE(ke)) 729 return (ke); 730 } 731 } 732 } 733 return (NULL); 734} 735 736static struct kse * 737kseq_steal(struct kseq *kseq, int stealidle) 738{ 739 struct kse *ke; 740 741 /* 742 * Steal from next first to try to get a non-interactive task that 743 * may not have run for a while. 744 */ 745 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 746 return (ke); 747 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 748 return (ke); 749 if (stealidle) 750 return (runq_steal(&kseq->ksq_idle)); 751 return (NULL); 752} 753 754int 755kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 756{ 757 struct kseq_group *nksg; 758 struct kseq_group *ksg; 759 struct kseq *old; 760 int cpu; 761 int idx; 762 763 if (smp_started == 0) 764 return (0); 765 cpu = 0; 766 /* 767 * If our load exceeds a certain threshold we should attempt to 768 * reassign this thread. The first candidate is the cpu that 769 * originally ran the thread. If it is idle, assign it there, 770 * otherwise, pick an idle cpu. 771 * 772 * The threshold at which we start to reassign kses has a large impact 773 * on the overall performance of the system. Tuned too high and 774 * some CPUs may idle. Too low and there will be excess migration 775 * and context switches. 776 */ 777 old = KSEQ_CPU(ke->ke_cpu); 778 nksg = old->ksq_group; 779 ksg = kseq->ksq_group; 780 if (kseq_idle) { 781 if (kseq_idle & nksg->ksg_mask) { 782 cpu = ffs(nksg->ksg_idlemask); 783 if (cpu) { 784 CTR2(KTR_SCHED, 785 "kseq_transfer: %p found old cpu %X " 786 "in idlemask.", ke, cpu); 787 goto migrate; 788 } 789 } 790 /* 791 * Multiple cpus could find this bit simultaneously 792 * but the race shouldn't be terrible. 793 */ 794 cpu = ffs(kseq_idle); 795 if (cpu) { 796 CTR2(KTR_SCHED, "kseq_transfer: %p found %X " 797 "in idlemask.", ke, cpu); 798 goto migrate; 799 } 800 } 801 idx = 0; 802#if 0 803 if (old->ksq_load < kseq->ksq_load) { 804 cpu = ke->ke_cpu + 1; 805 CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X " 806 "load less than ours.", ke, cpu); 807 goto migrate; 808 } 809 /* 810 * No new CPU was found, look for one with less load. 811 */ 812 for (idx = 0; idx <= ksg_maxid; idx++) { 813 nksg = KSEQ_GROUP(idx); 814 if (nksg->ksg_load /*+ (nksg->ksg_cpus * 2)*/ < ksg->ksg_load) { 815 cpu = ffs(nksg->ksg_cpumask); 816 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less " 817 "than ours.", ke, cpu); 818 goto migrate; 819 } 820 } 821#endif 822 /* 823 * If another cpu in this group has idled, assign a thread over 824 * to them after checking to see if there are idled groups. 825 */ 826 if (ksg->ksg_idlemask) { 827 cpu = ffs(ksg->ksg_idlemask); 828 if (cpu) { 829 CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in " 830 "group.", ke, cpu); 831 goto migrate; 832 } 833 } 834 return (0); 835migrate: 836 /* 837 * Now that we've found an idle CPU, migrate the thread. 838 */ 839 cpu--; 840 ke->ke_runq = NULL; 841 kseq_notify(ke, cpu); 842 843 return (1); 844} 845 846#endif /* SMP */ 847 848/* 849 * Pick the highest priority task we have and return it. 850 */ 851 852static struct kse * 853kseq_choose(struct kseq *kseq) 854{ 855 struct runq *swap; 856 struct kse *ke; 857 int nice; 858 859 mtx_assert(&sched_lock, MA_OWNED); 860 swap = NULL; 861 862 for (;;) { 863 ke = runq_choose(kseq->ksq_curr); 864 if (ke == NULL) { 865 /* 866 * We already swapped once and didn't get anywhere. 867 */ 868 if (swap) 869 break; 870 swap = kseq->ksq_curr; 871 kseq->ksq_curr = kseq->ksq_next; 872 kseq->ksq_next = swap; 873 continue; 874 } 875 /* 876 * If we encounter a slice of 0 the kse is in a 877 * TIMESHARE kse group and its nice was too far out 878 * of the range that receives slices. 879 */ 880 nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin); 881 if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH && 882 ke->ke_proc->p_nice != 0)) { 883 runq_remove(ke->ke_runq, ke); 884 sched_slice(ke); 885 ke->ke_runq = kseq->ksq_next; 886 runq_add(ke->ke_runq, ke, 0); 887 continue; 888 } 889 return (ke); 890 } 891 892 return (runq_choose(&kseq->ksq_idle)); 893} 894 895static void 896kseq_setup(struct kseq *kseq) 897{ 898 runq_init(&kseq->ksq_timeshare[0]); 899 runq_init(&kseq->ksq_timeshare[1]); 900 runq_init(&kseq->ksq_idle); 901 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 902 kseq->ksq_next = &kseq->ksq_timeshare[1]; 903 kseq->ksq_load = 0; 904 kseq->ksq_load_timeshare = 0; 905} 906 907static void 908sched_setup(void *dummy) 909{ 910#ifdef SMP 911 int i; 912#endif 913 914 slice_min = (hz/100); /* 10ms */ 915 slice_max = (hz/7); /* ~140ms */ 916 917#ifdef SMP 918 balance_groups = 0; 919 /* 920 * Initialize the kseqs. 921 */ 922 for (i = 0; i < MAXCPU; i++) { 923 struct kseq *ksq; 924 925 ksq = &kseq_cpu[i]; 926 ksq->ksq_assigned = NULL; 927 kseq_setup(&kseq_cpu[i]); 928 } 929 if (smp_topology == NULL) { 930 struct kseq_group *ksg; 931 struct kseq *ksq; 932 int cpus; 933 934 for (cpus = 0, i = 0; i < MAXCPU; i++) { 935 if (CPU_ABSENT(i)) 936 continue; 937 ksq = &kseq_cpu[cpus]; 938 ksg = &kseq_groups[cpus]; 939 /* 940 * Setup a kseq group with one member. 941 */ 942 ksq->ksq_transferable = 0; 943 ksq->ksq_group = ksg; 944 ksg->ksg_cpus = 1; 945 ksg->ksg_idlemask = 0; 946 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 947 ksg->ksg_load = 0; 948 ksg->ksg_transferable = 0; 949 LIST_INIT(&ksg->ksg_members); 950 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 951 cpus++; 952 } 953 ksg_maxid = cpus - 1; 954 } else { 955 struct kseq_group *ksg; 956 struct cpu_group *cg; 957 int j; 958 959 for (i = 0; i < smp_topology->ct_count; i++) { 960 cg = &smp_topology->ct_group[i]; 961 ksg = &kseq_groups[i]; 962 /* 963 * Initialize the group. 964 */ 965 ksg->ksg_idlemask = 0; 966 ksg->ksg_load = 0; 967 ksg->ksg_transferable = 0; 968 ksg->ksg_cpus = cg->cg_count; 969 ksg->ksg_cpumask = cg->cg_mask; 970 LIST_INIT(&ksg->ksg_members); 971 /* 972 * Find all of the group members and add them. 973 */ 974 for (j = 0; j < MAXCPU; j++) { 975 if ((cg->cg_mask & (1 << j)) != 0) { 976 if (ksg->ksg_mask == 0) 977 ksg->ksg_mask = 1 << j; 978 kseq_cpu[j].ksq_transferable = 0; 979 kseq_cpu[j].ksq_group = ksg; 980 LIST_INSERT_HEAD(&ksg->ksg_members, 981 &kseq_cpu[j], ksq_siblings); 982 } 983 } 984 if (ksg->ksg_cpus > 1) 985 balance_groups = 1; 986 } 987 ksg_maxid = smp_topology->ct_count - 1; 988 } 989 /* 990 * Stagger the group and global load balancer so they do not 991 * interfere with each other. 992 */ 993 bal_tick = ticks + hz; 994 if (balance_groups) 995 gbal_tick = ticks + (hz / 2); 996#else 997 kseq_setup(KSEQ_SELF()); 998#endif 999 mtx_lock_spin(&sched_lock); 1000 kseq_load_add(KSEQ_SELF(), &kse0); 1001 mtx_unlock_spin(&sched_lock); 1002} 1003 1004/* 1005 * Scale the scheduling priority according to the "interactivity" of this 1006 * process. 1007 */ 1008static void 1009sched_priority(struct ksegrp *kg) 1010{ 1011 int pri; 1012 1013 if (kg->kg_pri_class != PRI_TIMESHARE) 1014 return; 1015 1016 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 1017 pri += SCHED_PRI_BASE; 1018 pri += kg->kg_proc->p_nice; 1019 1020 if (pri > PRI_MAX_TIMESHARE) 1021 pri = PRI_MAX_TIMESHARE; 1022 else if (pri < PRI_MIN_TIMESHARE) 1023 pri = PRI_MIN_TIMESHARE; 1024 1025 kg->kg_user_pri = pri; 1026 1027 return; 1028} 1029 1030/* 1031 * Calculate a time slice based on the properties of the kseg and the runq 1032 * that we're on. This is only for PRI_TIMESHARE ksegrps. 1033 */ 1034static void 1035sched_slice(struct kse *ke) 1036{ 1037 struct kseq *kseq; 1038 struct ksegrp *kg; 1039 1040 kg = ke->ke_ksegrp; 1041 kseq = KSEQ_CPU(ke->ke_cpu); 1042 1043 if (ke->ke_thread->td_flags & TDF_BORROWING) { 1044 ke->ke_slice = SCHED_SLICE_MIN; 1045 return; 1046 } 1047 1048 /* 1049 * Rationale: 1050 * KSEs in interactive ksegs get a minimal slice so that we 1051 * quickly notice if it abuses its advantage. 1052 * 1053 * KSEs in non-interactive ksegs are assigned a slice that is 1054 * based on the ksegs nice value relative to the least nice kseg 1055 * on the run queue for this cpu. 1056 * 1057 * If the KSE is less nice than all others it gets the maximum 1058 * slice and other KSEs will adjust their slice relative to 1059 * this when they first expire. 1060 * 1061 * There is 20 point window that starts relative to the least 1062 * nice kse on the run queue. Slice size is determined by 1063 * the kse distance from the last nice ksegrp. 1064 * 1065 * If the kse is outside of the window it will get no slice 1066 * and will be reevaluated each time it is selected on the 1067 * run queue. The exception to this is nice 0 ksegs when 1068 * a nice -20 is running. They are always granted a minimum 1069 * slice. 1070 */ 1071 if (!SCHED_INTERACTIVE(kg)) { 1072 int nice; 1073 1074 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); 1075 if (kseq->ksq_load_timeshare == 0 || 1076 kg->kg_proc->p_nice < kseq->ksq_nicemin) 1077 ke->ke_slice = SCHED_SLICE_MAX; 1078 else if (nice <= SCHED_SLICE_NTHRESH) 1079 ke->ke_slice = SCHED_SLICE_NICE(nice); 1080 else if (kg->kg_proc->p_nice == 0) 1081 ke->ke_slice = SCHED_SLICE_MIN; 1082 else 1083 ke->ke_slice = 0; 1084 } else 1085 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1086 1087 return; 1088} 1089 1090/* 1091 * This routine enforces a maximum limit on the amount of scheduling history 1092 * kept. It is called after either the slptime or runtime is adjusted. 1093 * This routine will not operate correctly when slp or run times have been 1094 * adjusted to more than double their maximum. 1095 */ 1096static void 1097sched_interact_update(struct ksegrp *kg) 1098{ 1099 int sum; 1100 1101 sum = kg->kg_runtime + kg->kg_slptime; 1102 if (sum < SCHED_SLP_RUN_MAX) 1103 return; 1104 /* 1105 * If we have exceeded by more than 1/5th then the algorithm below 1106 * will not bring us back into range. Dividing by two here forces 1107 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1108 */ 1109 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1110 kg->kg_runtime /= 2; 1111 kg->kg_slptime /= 2; 1112 return; 1113 } 1114 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1115 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1116} 1117 1118static void 1119sched_interact_fork(struct ksegrp *kg) 1120{ 1121 int ratio; 1122 int sum; 1123 1124 sum = kg->kg_runtime + kg->kg_slptime; 1125 if (sum > SCHED_SLP_RUN_FORK) { 1126 ratio = sum / SCHED_SLP_RUN_FORK; 1127 kg->kg_runtime /= ratio; 1128 kg->kg_slptime /= ratio; 1129 } 1130} 1131 1132static int 1133sched_interact_score(struct ksegrp *kg) 1134{ 1135 int div; 1136 1137 if (kg->kg_runtime > kg->kg_slptime) { 1138 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1139 return (SCHED_INTERACT_HALF + 1140 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1141 } if (kg->kg_slptime > kg->kg_runtime) { 1142 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1143 return (kg->kg_runtime / div); 1144 } 1145 1146 /* 1147 * This can happen if slptime and runtime are 0. 1148 */ 1149 return (0); 1150 1151} 1152 1153/* 1154 * Very early in the boot some setup of scheduler-specific 1155 * parts of proc0 and of soem scheduler resources needs to be done. 1156 * Called from: 1157 * proc0_init() 1158 */ 1159void 1160schedinit(void) 1161{ 1162 /* 1163 * Set up the scheduler specific parts of proc0. 1164 */ 1165 proc0.p_sched = NULL; /* XXX */ 1166 ksegrp0.kg_sched = &kg_sched0; 1167 thread0.td_sched = &kse0; 1168 kse0.ke_thread = &thread0; 1169 kse0.ke_state = KES_THREAD; 1170 kg_sched0.skg_concurrency = 1; 1171 kg_sched0.skg_avail_opennings = 0; /* we are already running */ 1172} 1173 1174/* 1175 * This is only somewhat accurate since given many processes of the same 1176 * priority they will switch when their slices run out, which will be 1177 * at most SCHED_SLICE_MAX. 1178 */ 1179int 1180sched_rr_interval(void) 1181{ 1182 return (SCHED_SLICE_MAX); 1183} 1184 1185static void 1186sched_pctcpu_update(struct kse *ke) 1187{ 1188 /* 1189 * Adjust counters and watermark for pctcpu calc. 1190 */ 1191 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1192 /* 1193 * Shift the tick count out so that the divide doesn't 1194 * round away our results. 1195 */ 1196 ke->ke_ticks <<= 10; 1197 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1198 SCHED_CPU_TICKS; 1199 ke->ke_ticks >>= 10; 1200 } else 1201 ke->ke_ticks = 0; 1202 ke->ke_ltick = ticks; 1203 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1204} 1205 1206void 1207sched_thread_priority(struct thread *td, u_char prio) 1208{ 1209 struct kse *ke; 1210 1211 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)", 1212 td, td->td_proc->p_comm, td->td_priority, prio, curthread, 1213 curthread->td_proc->p_comm); 1214 ke = td->td_kse; 1215 mtx_assert(&sched_lock, MA_OWNED); 1216 if (td->td_priority == prio) 1217 return; 1218 if (TD_ON_RUNQ(td)) { 1219 /* 1220 * If the priority has been elevated due to priority 1221 * propagation, we may have to move ourselves to a new 1222 * queue. We still call adjustrunqueue below in case kse 1223 * needs to fix things up. 1224 */ 1225 if (prio < td->td_priority && ke->ke_runq != NULL && 1226 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1227 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1228 runq_remove(ke->ke_runq, ke); 1229 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1230 runq_add(ke->ke_runq, ke, 0); 1231 } 1232 /* 1233 * Hold this kse on this cpu so that sched_prio() doesn't 1234 * cause excessive migration. We only want migration to 1235 * happen as the result of a wakeup. 1236 */ 1237 ke->ke_flags |= KEF_HOLD; 1238 adjustrunqueue(td, prio); 1239 ke->ke_flags &= ~KEF_HOLD; 1240 } else 1241 td->td_priority = prio; 1242} 1243 1244/* 1245 * Update a thread's priority when it is lent another thread's 1246 * priority. 1247 */ 1248void 1249sched_lend_prio(struct thread *td, u_char prio) 1250{ 1251 1252 td->td_flags |= TDF_BORROWING; 1253 sched_thread_priority(td, prio); 1254} 1255 1256/* 1257 * Restore a thread's priority when priority propagation is 1258 * over. The prio argument is the minimum priority the thread 1259 * needs to have to satisfy other possible priority lending 1260 * requests. If the thread's regular priority is less 1261 * important than prio, the thread will keep a priority boost 1262 * of prio. 1263 */ 1264void 1265sched_unlend_prio(struct thread *td, u_char prio) 1266{ 1267 u_char base_pri; 1268 1269 if (td->td_base_pri >= PRI_MIN_TIMESHARE && 1270 td->td_base_pri <= PRI_MAX_TIMESHARE) 1271 base_pri = td->td_ksegrp->kg_user_pri; 1272 else 1273 base_pri = td->td_base_pri; 1274 if (prio >= base_pri) { 1275 td->td_flags &= ~TDF_BORROWING; 1276 sched_thread_priority(td, base_pri); 1277 } else 1278 sched_lend_prio(td, prio); 1279} 1280 1281void 1282sched_prio(struct thread *td, u_char prio) 1283{ 1284 u_char oldprio; 1285 1286 /* First, update the base priority. */ 1287 td->td_base_pri = prio; 1288 1289 /* 1290 * If the thread is borrowing another thread's priority, don't 1291 * ever lower the priority. 1292 */ 1293 if (td->td_flags & TDF_BORROWING && td->td_priority < prio) 1294 return; 1295 1296 /* Change the real priority. */ 1297 oldprio = td->td_priority; 1298 sched_thread_priority(td, prio); 1299 1300 /* 1301 * If the thread is on a turnstile, then let the turnstile update 1302 * its state. 1303 */ 1304 if (TD_ON_LOCK(td) && oldprio != prio) 1305 turnstile_adjust(td, oldprio); 1306} 1307 1308void 1309sched_switch(struct thread *td, struct thread *newtd, int flags) 1310{ 1311 struct kseq *ksq; 1312 struct kse *ke; 1313 1314 mtx_assert(&sched_lock, MA_OWNED); 1315 1316 ke = td->td_kse; 1317 ksq = KSEQ_SELF(); 1318 1319 td->td_lastcpu = td->td_oncpu; 1320 td->td_oncpu = NOCPU; 1321 td->td_flags &= ~TDF_NEEDRESCHED; 1322 td->td_owepreempt = 0; 1323 1324 /* 1325 * If the KSE has been assigned it may be in the process of switching 1326 * to the new cpu. This is the case in sched_bind(). 1327 */ 1328 if (td == PCPU_GET(idlethread)) { 1329 TD_SET_CAN_RUN(td); 1330 } else if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1331 /* We are ending our run so make our slot available again */ 1332 SLOT_RELEASE(td->td_ksegrp); 1333 kseq_load_rem(ksq, ke); 1334 if (TD_IS_RUNNING(td)) { 1335 /* 1336 * Don't allow the thread to migrate 1337 * from a preemption. 1338 */ 1339 ke->ke_flags |= KEF_HOLD; 1340 setrunqueue(td, (flags & SW_PREEMPT) ? 1341 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1342 SRQ_OURSELF|SRQ_YIELDING); 1343 ke->ke_flags &= ~KEF_HOLD; 1344 } else if ((td->td_proc->p_flag & P_HADTHREADS) && 1345 (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)) 1346 /* 1347 * We will not be on the run queue. 1348 * So we must be sleeping or similar. 1349 * Don't use the slot if we will need it 1350 * for newtd. 1351 */ 1352 slot_fill(td->td_ksegrp); 1353 } 1354 if (newtd != NULL) { 1355 /* 1356 * If we bring in a thread account for it as if it had been 1357 * added to the run queue and then chosen. 1358 */ 1359 newtd->td_kse->ke_flags |= KEF_DIDRUN; 1360 newtd->td_kse->ke_runq = ksq->ksq_curr; 1361 TD_SET_RUNNING(newtd); 1362 kseq_load_add(KSEQ_SELF(), newtd->td_kse); 1363 /* 1364 * XXX When we preempt, we've already consumed a slot because 1365 * we got here through sched_add(). However, newtd can come 1366 * from thread_switchout() which can't SLOT_USE() because 1367 * the SLOT code is scheduler dependent. We must use the 1368 * slot here otherwise. 1369 */ 1370 if ((flags & SW_PREEMPT) == 0) 1371 SLOT_USE(newtd->td_ksegrp); 1372 } else 1373 newtd = choosethread(); 1374 if (td != newtd) { 1375#ifdef HWPMC_HOOKS 1376 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1377 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 1378#endif 1379 cpu_switch(td, newtd); 1380#ifdef HWPMC_HOOKS 1381 if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 1382 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN); 1383#endif 1384 } 1385 1386 sched_lock.mtx_lock = (uintptr_t)td; 1387 1388 td->td_oncpu = PCPU_GET(cpuid); 1389} 1390 1391void 1392sched_nice(struct proc *p, int nice) 1393{ 1394 struct ksegrp *kg; 1395 struct kse *ke; 1396 struct thread *td; 1397 struct kseq *kseq; 1398 1399 PROC_LOCK_ASSERT(p, MA_OWNED); 1400 mtx_assert(&sched_lock, MA_OWNED); 1401 /* 1402 * We need to adjust the nice counts for running KSEs. 1403 */ 1404 FOREACH_KSEGRP_IN_PROC(p, kg) { 1405 if (kg->kg_pri_class == PRI_TIMESHARE) { 1406 FOREACH_THREAD_IN_GROUP(kg, td) { 1407 ke = td->td_kse; 1408 if (ke->ke_runq == NULL) 1409 continue; 1410 kseq = KSEQ_CPU(ke->ke_cpu); 1411 kseq_nice_rem(kseq, p->p_nice); 1412 kseq_nice_add(kseq, nice); 1413 } 1414 } 1415 } 1416 p->p_nice = nice; 1417 FOREACH_KSEGRP_IN_PROC(p, kg) { 1418 sched_priority(kg); 1419 FOREACH_THREAD_IN_GROUP(kg, td) 1420 td->td_flags |= TDF_NEEDRESCHED; 1421 } 1422} 1423 1424void 1425sched_sleep(struct thread *td) 1426{ 1427 mtx_assert(&sched_lock, MA_OWNED); 1428 1429 td->td_slptime = ticks; 1430} 1431 1432void 1433sched_wakeup(struct thread *td) 1434{ 1435 mtx_assert(&sched_lock, MA_OWNED); 1436 1437 /* 1438 * Let the kseg know how long we slept for. This is because process 1439 * interactivity behavior is modeled in the kseg. 1440 */ 1441 if (td->td_slptime) { 1442 struct ksegrp *kg; 1443 int hzticks; 1444 1445 kg = td->td_ksegrp; 1446 hzticks = (ticks - td->td_slptime) << 10; 1447 if (hzticks >= SCHED_SLP_RUN_MAX) { 1448 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1449 kg->kg_runtime = 1; 1450 } else { 1451 kg->kg_slptime += hzticks; 1452 sched_interact_update(kg); 1453 } 1454 sched_priority(kg); 1455 sched_slice(td->td_kse); 1456 td->td_slptime = 0; 1457 } 1458 setrunqueue(td, SRQ_BORING); 1459} 1460 1461/* 1462 * Penalize the parent for creating a new child and initialize the child's 1463 * priority. 1464 */ 1465void 1466sched_fork(struct thread *td, struct thread *childtd) 1467{ 1468 1469 mtx_assert(&sched_lock, MA_OWNED); 1470 1471 sched_fork_ksegrp(td, childtd->td_ksegrp); 1472 sched_fork_thread(td, childtd); 1473} 1474 1475void 1476sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 1477{ 1478 struct ksegrp *kg = td->td_ksegrp; 1479 mtx_assert(&sched_lock, MA_OWNED); 1480 1481 child->kg_slptime = kg->kg_slptime; 1482 child->kg_runtime = kg->kg_runtime; 1483 child->kg_user_pri = kg->kg_user_pri; 1484 sched_interact_fork(child); 1485 kg->kg_runtime += tickincr << 10; 1486 sched_interact_update(kg); 1487} 1488 1489void 1490sched_fork_thread(struct thread *td, struct thread *child) 1491{ 1492 struct kse *ke; 1493 struct kse *ke2; 1494 1495 sched_newthread(child); 1496 ke = td->td_kse; 1497 ke2 = child->td_kse; 1498 ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1499 ke2->ke_cpu = ke->ke_cpu; 1500 ke2->ke_runq = NULL; 1501 1502 /* Grab our parents cpu estimation information. */ 1503 ke2->ke_ticks = ke->ke_ticks; 1504 ke2->ke_ltick = ke->ke_ltick; 1505 ke2->ke_ftick = ke->ke_ftick; 1506} 1507 1508void 1509sched_class(struct ksegrp *kg, int class) 1510{ 1511 struct kseq *kseq; 1512 struct kse *ke; 1513 struct thread *td; 1514 int nclass; 1515 int oclass; 1516 1517 mtx_assert(&sched_lock, MA_OWNED); 1518 if (kg->kg_pri_class == class) 1519 return; 1520 1521 nclass = PRI_BASE(class); 1522 oclass = PRI_BASE(kg->kg_pri_class); 1523 FOREACH_THREAD_IN_GROUP(kg, td) { 1524 ke = td->td_kse; 1525 if ((ke->ke_state != KES_ONRUNQ && 1526 ke->ke_state != KES_THREAD) || ke->ke_runq == NULL) 1527 continue; 1528 kseq = KSEQ_CPU(ke->ke_cpu); 1529 1530#ifdef SMP 1531 /* 1532 * On SMP if we're on the RUNQ we must adjust the transferable 1533 * count because could be changing to or from an interrupt 1534 * class. 1535 */ 1536 if (ke->ke_state == KES_ONRUNQ) { 1537 if (KSE_CAN_MIGRATE(ke)) { 1538 kseq->ksq_transferable--; 1539 kseq->ksq_group->ksg_transferable--; 1540 } 1541 if (KSE_CAN_MIGRATE(ke)) { 1542 kseq->ksq_transferable++; 1543 kseq->ksq_group->ksg_transferable++; 1544 } 1545 } 1546#endif 1547 if (oclass == PRI_TIMESHARE) { 1548 kseq->ksq_load_timeshare--; 1549 kseq_nice_rem(kseq, kg->kg_proc->p_nice); 1550 } 1551 if (nclass == PRI_TIMESHARE) { 1552 kseq->ksq_load_timeshare++; 1553 kseq_nice_add(kseq, kg->kg_proc->p_nice); 1554 } 1555 } 1556 1557 kg->kg_pri_class = class; 1558} 1559 1560/* 1561 * Return some of the child's priority and interactivity to the parent. 1562 */ 1563void 1564sched_exit(struct proc *p, struct thread *childtd) 1565{ 1566 mtx_assert(&sched_lock, MA_OWNED); 1567 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); 1568 sched_exit_thread(NULL, childtd); 1569} 1570 1571void 1572sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) 1573{ 1574 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ 1575 kg->kg_runtime += td->td_ksegrp->kg_runtime; 1576 sched_interact_update(kg); 1577} 1578 1579void 1580sched_exit_thread(struct thread *td, struct thread *childtd) 1581{ 1582 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d", 1583 childtd, childtd->td_proc->p_comm, childtd->td_priority); 1584 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1585} 1586 1587void 1588sched_clock(struct thread *td) 1589{ 1590 struct kseq *kseq; 1591 struct ksegrp *kg; 1592 struct kse *ke; 1593 1594 mtx_assert(&sched_lock, MA_OWNED); 1595 kseq = KSEQ_SELF(); 1596#ifdef SMP 1597 if (ticks >= bal_tick) 1598 sched_balance(); 1599 if (ticks >= gbal_tick && balance_groups) 1600 sched_balance_groups(); 1601 /* 1602 * We could have been assigned a non real-time thread without an 1603 * IPI. 1604 */ 1605 if (kseq->ksq_assigned) 1606 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ 1607#endif 1608 /* 1609 * sched_setup() apparently happens prior to stathz being set. We 1610 * need to resolve the timers earlier in the boot so we can avoid 1611 * calculating this here. 1612 */ 1613 if (realstathz == 0) { 1614 realstathz = stathz ? stathz : hz; 1615 tickincr = hz / realstathz; 1616 /* 1617 * XXX This does not work for values of stathz that are much 1618 * larger than hz. 1619 */ 1620 if (tickincr == 0) 1621 tickincr = 1; 1622 } 1623 1624 ke = td->td_kse; 1625 kg = ke->ke_ksegrp; 1626 1627 /* Adjust ticks for pctcpu */ 1628 ke->ke_ticks++; 1629 ke->ke_ltick = ticks; 1630 1631 /* Go up to one second beyond our max and then trim back down */ 1632 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1633 sched_pctcpu_update(ke); 1634 1635 if (td->td_flags & TDF_IDLETD) 1636 return; 1637 /* 1638 * We only do slicing code for TIMESHARE ksegrps. 1639 */ 1640 if (kg->kg_pri_class != PRI_TIMESHARE) 1641 return; 1642 /* 1643 * We used a tick charge it to the ksegrp so that we can compute our 1644 * interactivity. 1645 */ 1646 kg->kg_runtime += tickincr << 10; 1647 sched_interact_update(kg); 1648 1649 /* 1650 * We used up one time slice. 1651 */ 1652 if (--ke->ke_slice > 0) 1653 return; 1654 /* 1655 * We're out of time, recompute priorities and requeue. 1656 */ 1657 kseq_load_rem(kseq, ke); 1658 sched_priority(kg); 1659 sched_slice(ke); 1660 if (SCHED_CURR(kg, ke)) 1661 ke->ke_runq = kseq->ksq_curr; 1662 else 1663 ke->ke_runq = kseq->ksq_next; 1664 kseq_load_add(kseq, ke); 1665 td->td_flags |= TDF_NEEDRESCHED; 1666} 1667 1668int 1669sched_runnable(void) 1670{ 1671 struct kseq *kseq; 1672 int load; 1673 1674 load = 1; 1675 1676 kseq = KSEQ_SELF(); 1677#ifdef SMP 1678 if (kseq->ksq_assigned) { 1679 mtx_lock_spin(&sched_lock); 1680 kseq_assign(kseq); 1681 mtx_unlock_spin(&sched_lock); 1682 } 1683#endif 1684 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1685 if (kseq->ksq_load > 0) 1686 goto out; 1687 } else 1688 if (kseq->ksq_load - 1 > 0) 1689 goto out; 1690 load = 0; 1691out: 1692 return (load); 1693} 1694 1695void 1696sched_userret(struct thread *td) 1697{ 1698 struct ksegrp *kg; 1699 1700 KASSERT((td->td_flags & TDF_BORROWING) == 0, 1701 ("thread with borrowed priority returning to userland")); 1702 kg = td->td_ksegrp; 1703 if (td->td_priority != kg->kg_user_pri) { 1704 mtx_lock_spin(&sched_lock); 1705 td->td_priority = kg->kg_user_pri; 1706 td->td_base_pri = kg->kg_user_pri; 1707 mtx_unlock_spin(&sched_lock); 1708 } 1709} 1710 1711struct kse * 1712sched_choose(void) 1713{ 1714 struct kseq *kseq; 1715 struct kse *ke; 1716 1717 mtx_assert(&sched_lock, MA_OWNED); 1718 kseq = KSEQ_SELF(); 1719#ifdef SMP 1720restart: 1721 if (kseq->ksq_assigned) 1722 kseq_assign(kseq); 1723#endif 1724 ke = kseq_choose(kseq); 1725 if (ke) { 1726#ifdef SMP 1727 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1728 if (kseq_idled(kseq) == 0) 1729 goto restart; 1730#endif 1731 kseq_runq_rem(kseq, ke); 1732 ke->ke_state = KES_THREAD; 1733 return (ke); 1734 } 1735#ifdef SMP 1736 if (kseq_idled(kseq) == 0) 1737 goto restart; 1738#endif 1739 return (NULL); 1740} 1741 1742void 1743sched_add(struct thread *td, int flags) 1744{ 1745 struct kseq *kseq; 1746 struct ksegrp *kg; 1747 struct kse *ke; 1748 int preemptive; 1749 int canmigrate; 1750 int class; 1751 1752 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 1753 td, td->td_proc->p_comm, td->td_priority, curthread, 1754 curthread->td_proc->p_comm); 1755 mtx_assert(&sched_lock, MA_OWNED); 1756 ke = td->td_kse; 1757 kg = td->td_ksegrp; 1758 canmigrate = 1; 1759 preemptive = !(flags & SRQ_YIELDING); 1760 class = PRI_BASE(kg->kg_pri_class); 1761 kseq = KSEQ_SELF(); 1762 if ((ke->ke_flags & KEF_INTERNAL) == 0) 1763 SLOT_USE(td->td_ksegrp); 1764 ke->ke_flags &= ~KEF_INTERNAL; 1765#ifdef SMP 1766 if (ke->ke_flags & KEF_ASSIGNED) { 1767 if (ke->ke_flags & KEF_REMOVED) 1768 ke->ke_flags &= ~KEF_REMOVED; 1769 return; 1770 } 1771 canmigrate = KSE_CAN_MIGRATE(ke); 1772#endif 1773 KASSERT(ke->ke_state != KES_ONRUNQ, 1774 ("sched_add: kse %p (%s) already in run queue", ke, 1775 ke->ke_proc->p_comm)); 1776 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1777 ("sched_add: process swapped out")); 1778 KASSERT(ke->ke_runq == NULL, 1779 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1780 switch (class) { 1781 case PRI_ITHD: 1782 case PRI_REALTIME: 1783 ke->ke_runq = kseq->ksq_curr; 1784 ke->ke_slice = SCHED_SLICE_MAX; 1785 if (canmigrate) 1786 ke->ke_cpu = PCPU_GET(cpuid); 1787 break; 1788 case PRI_TIMESHARE: 1789 if (SCHED_CURR(kg, ke)) 1790 ke->ke_runq = kseq->ksq_curr; 1791 else 1792 ke->ke_runq = kseq->ksq_next; 1793 break; 1794 case PRI_IDLE: 1795 /* 1796 * This is for priority prop. 1797 */ 1798 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1799 ke->ke_runq = kseq->ksq_curr; 1800 else 1801 ke->ke_runq = &kseq->ksq_idle; 1802 ke->ke_slice = SCHED_SLICE_MIN; 1803 break; 1804 default: 1805 panic("Unknown pri class."); 1806 break; 1807 } 1808#ifdef SMP 1809 /* 1810 * Don't migrate running threads here. Force the long term balancer 1811 * to do it. 1812 */ 1813 if (ke->ke_flags & KEF_HOLD) { 1814 ke->ke_flags &= ~KEF_HOLD; 1815 canmigrate = 0; 1816 } 1817 /* 1818 * If this thread is pinned or bound, notify the target cpu. 1819 */ 1820 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { 1821 ke->ke_runq = NULL; 1822 kseq_notify(ke, ke->ke_cpu); 1823 return; 1824 } 1825 /* 1826 * If we had been idle, clear our bit in the group and potentially 1827 * the global bitmap. If not, see if we should transfer this thread. 1828 */ 1829 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1830 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1831 /* 1832 * Check to see if our group is unidling, and if so, remove it 1833 * from the global idle mask. 1834 */ 1835 if (kseq->ksq_group->ksg_idlemask == 1836 kseq->ksq_group->ksg_cpumask) 1837 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1838 /* 1839 * Now remove ourselves from the group specific idle mask. 1840 */ 1841 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1842 } else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD) 1843 if (kseq_transfer(kseq, ke, class)) 1844 return; 1845 ke->ke_cpu = PCPU_GET(cpuid); 1846#endif 1847 if (td->td_priority < curthread->td_priority && 1848 ke->ke_runq == kseq->ksq_curr) 1849 curthread->td_flags |= TDF_NEEDRESCHED; 1850 if (preemptive && maybe_preempt(td)) 1851 return; 1852 ke->ke_state = KES_ONRUNQ; 1853 1854 kseq_runq_add(kseq, ke, flags); 1855 kseq_load_add(kseq, ke); 1856} 1857 1858void 1859sched_rem(struct thread *td) 1860{ 1861 struct kseq *kseq; 1862 struct kse *ke; 1863 1864 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 1865 td, td->td_proc->p_comm, td->td_priority, curthread, 1866 curthread->td_proc->p_comm); 1867 mtx_assert(&sched_lock, MA_OWNED); 1868 ke = td->td_kse; 1869 SLOT_RELEASE(td->td_ksegrp); 1870 if (ke->ke_flags & KEF_ASSIGNED) { 1871 ke->ke_flags |= KEF_REMOVED; 1872 return; 1873 } 1874 KASSERT((ke->ke_state == KES_ONRUNQ), 1875 ("sched_rem: KSE not on run queue")); 1876 1877 ke->ke_state = KES_THREAD; 1878 kseq = KSEQ_CPU(ke->ke_cpu); 1879 kseq_runq_rem(kseq, ke); 1880 kseq_load_rem(kseq, ke); 1881} 1882 1883fixpt_t 1884sched_pctcpu(struct thread *td) 1885{ 1886 fixpt_t pctcpu; 1887 struct kse *ke; 1888 1889 pctcpu = 0; 1890 ke = td->td_kse; 1891 if (ke == NULL) 1892 return (0); 1893 1894 mtx_lock_spin(&sched_lock); 1895 if (ke->ke_ticks) { 1896 int rtick; 1897 1898 /* 1899 * Don't update more frequently than twice a second. Allowing 1900 * this causes the cpu usage to decay away too quickly due to 1901 * rounding errors. 1902 */ 1903 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1904 ke->ke_ltick < (ticks - (hz / 2))) 1905 sched_pctcpu_update(ke); 1906 /* How many rtick per second ? */ 1907 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1908 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1909 } 1910 1911 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1912 mtx_unlock_spin(&sched_lock); 1913 1914 return (pctcpu); 1915} 1916 1917void 1918sched_bind(struct thread *td, int cpu) 1919{ 1920 struct kse *ke; 1921 1922 mtx_assert(&sched_lock, MA_OWNED); 1923 ke = td->td_kse; 1924 ke->ke_flags |= KEF_BOUND; 1925#ifdef SMP 1926 if (PCPU_GET(cpuid) == cpu) 1927 return; 1928 /* sched_rem without the runq_remove */ 1929 ke->ke_state = KES_THREAD; 1930 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1931 kseq_notify(ke, cpu); 1932 /* When we return from mi_switch we'll be on the correct cpu. */ 1933 mi_switch(SW_VOL, NULL); 1934#endif 1935} 1936 1937void 1938sched_unbind(struct thread *td) 1939{ 1940 mtx_assert(&sched_lock, MA_OWNED); 1941 td->td_kse->ke_flags &= ~KEF_BOUND; 1942} 1943 1944int 1945sched_is_bound(struct thread *td) 1946{ 1947 mtx_assert(&sched_lock, MA_OWNED); 1948 return (td->td_kse->ke_flags & KEF_BOUND); 1949} 1950 1951int 1952sched_load(void) 1953{ 1954#ifdef SMP 1955 int total; 1956 int i; 1957 1958 total = 0; 1959 for (i = 0; i <= ksg_maxid; i++) 1960 total += KSEQ_GROUP(i)->ksg_load; 1961 return (total); 1962#else 1963 return (KSEQ_SELF()->ksq_sysload); 1964#endif 1965} 1966 1967int 1968sched_sizeof_ksegrp(void) 1969{ 1970 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1971} 1972 1973int 1974sched_sizeof_proc(void) 1975{ 1976 return (sizeof(struct proc)); 1977} 1978 1979int 1980sched_sizeof_thread(void) 1981{ 1982 return (sizeof(struct thread) + sizeof(struct td_sched)); 1983} 1984#define KERN_SWITCH_INCLUDE 1 1985#include "kern/kern_switch.c" 1986