sched_ule.c revision 123487
1210753Srpaulo/*- 2210753Srpaulo * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3210753Srpaulo * All rights reserved. 4210753Srpaulo * 5210753Srpaulo * Redistribution and use in source and binary forms, with or without 6210753Srpaulo * modification, are permitted provided that the following conditions 7210753Srpaulo * are met: 8210753Srpaulo * 1. Redistributions of source code must retain the above copyright 9210753Srpaulo * notice unmodified, this list of conditions, and the following 10210753Srpaulo * disclaimer. 11210753Srpaulo * 2. Redistributions in binary form must reproduce the above copyright 12210753Srpaulo * notice, this list of conditions and the following disclaimer in the 13210753Srpaulo * documentation and/or other materials provided with the distribution. 14210753Srpaulo * 15210753Srpaulo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16210753Srpaulo * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17210753Srpaulo * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18210753Srpaulo * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19210753Srpaulo * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20210753Srpaulo * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21210753Srpaulo * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22210753Srpaulo * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23210753Srpaulo * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24210753Srpaulo * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25210753Srpaulo */ 26210753Srpaulo 27210753Srpaulo#include <sys/cdefs.h> 28210753Srpaulo__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 123487 2003-12-12 07:33:51Z jeff $"); 29210753Srpaulo 30210753Srpaulo#include <sys/param.h> 31210753Srpaulo#include <sys/systm.h> 32210753Srpaulo#include <sys/kernel.h> 33210753Srpaulo#include <sys/ktr.h> 34210753Srpaulo#include <sys/lock.h> 35210753Srpaulo#include <sys/mutex.h> 36210753Srpaulo#include <sys/proc.h> 37210753Srpaulo#include <sys/resource.h> 38210753Srpaulo#include <sys/resourcevar.h> 39210753Srpaulo#include <sys/sched.h> 40210753Srpaulo#include <sys/smp.h> 41210753Srpaulo#include <sys/sx.h> 42210753Srpaulo#include <sys/sysctl.h> 43210753Srpaulo#include <sys/sysproto.h> 44210753Srpaulo#include <sys/vmmeter.h> 45210753Srpaulo#ifdef DDB 46210753Srpaulo#include <ddb/ddb.h> 47210753Srpaulo#endif 48210753Srpaulo#ifdef KTRACE 49210753Srpaulo#include <sys/uio.h> 50210753Srpaulo#include <sys/ktrace.h> 51210753Srpaulo#endif 52210753Srpaulo 53210753Srpaulo#include <machine/cpu.h> 54210753Srpaulo#include <machine/smp.h> 55210753Srpaulo 56210753Srpaulo#define KTR_ULE KTR_NFS 57210753Srpaulo 58210753Srpaulo/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 59210753Srpaulo/* XXX This is bogus compatability crap for ps */ 60210753Srpaulostatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 61210753SrpauloSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 62210753Srpaulo 63210753Srpaulostatic void sched_setup(void *dummy); 64210753SrpauloSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 65210753Srpaulo 66210753Srpaulostatic SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED"); 67210753Srpaulo 68210753Srpaulostatic int sched_strict; 69210753SrpauloSYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, ""); 70210753Srpaulo 71210753Srpaulostatic int slice_min = 1; 72210753SrpauloSYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 73210753Srpaulo 74210753Srpaulostatic int slice_max = 10; 75210753SrpauloSYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 76210753Srpaulo 77210753Srpauloint realstathz; 78210753Srpauloint tickincr = 1; 79210753Srpaulo 80210753Srpaulo#ifdef SMP 81210753Srpaulo/* Callouts to handle load balancing SMP systems. */ 82210753Srpaulostatic struct callout kseq_lb_callout; 83210753Srpaulostatic struct callout kseq_group_callout; 84210753Srpaulo#endif 85210753Srpaulo 86210753Srpaulo/* 87210753Srpaulo * These datastructures are allocated within their parent datastructure but 88210753Srpaulo * are scheduler specific. 89210753Srpaulo */ 90210753Srpaulo 91210753Srpaulostruct ke_sched { 92210753Srpaulo int ske_slice; 93210753Srpaulo struct runq *ske_runq; 94210753Srpaulo /* The following variables are only used for pctcpu calculation */ 95210753Srpaulo int ske_ltick; /* Last tick that we were running on */ 96210753Srpaulo int ske_ftick; /* First tick that we were running on */ 97210753Srpaulo int ske_ticks; /* Tick count */ 98210753Srpaulo /* CPU that we have affinity for. */ 99210753Srpaulo u_char ske_cpu; 100210753Srpaulo}; 101210753Srpaulo#define ke_slice ke_sched->ske_slice 102210753Srpaulo#define ke_runq ke_sched->ske_runq 103210753Srpaulo#define ke_ltick ke_sched->ske_ltick 104210753Srpaulo#define ke_ftick ke_sched->ske_ftick 105210753Srpaulo#define ke_ticks ke_sched->ske_ticks 106210753Srpaulo#define ke_cpu ke_sched->ske_cpu 107210753Srpaulo#define ke_assign ke_procq.tqe_next 108210753Srpaulo 109210753Srpaulo#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */ 110210753Srpaulo#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */ 111210753Srpaulo 112210753Srpaulostruct kg_sched { 113210753Srpaulo int skg_slptime; /* Number of ticks we vol. slept */ 114210753Srpaulo int skg_runtime; /* Number of ticks we were running */ 115210753Srpaulo}; 116210753Srpaulo#define kg_slptime kg_sched->skg_slptime 117210753Srpaulo#define kg_runtime kg_sched->skg_runtime 118210753Srpaulo 119210753Srpaulostruct td_sched { 120210753Srpaulo int std_slptime; 121210753Srpaulo}; 122210753Srpaulo#define td_slptime td_sched->std_slptime 123211545Srpaulo 124210753Srpaulostruct td_sched td_sched; 125210753Srpaulostruct ke_sched ke_sched; 126struct kg_sched kg_sched; 127 128struct ke_sched *kse0_sched = &ke_sched; 129struct kg_sched *ksegrp0_sched = &kg_sched; 130struct p_sched *proc0_sched = NULL; 131struct td_sched *thread0_sched = &td_sched; 132 133/* 134 * The priority is primarily determined by the interactivity score. Thus, we 135 * give lower(better) priorities to kse groups that use less CPU. The nice 136 * value is then directly added to this to allow nice to have some effect 137 * on latency. 138 * 139 * PRI_RANGE: Total priority range for timeshare threads. 140 * PRI_NRESV: Number of nice values. 141 * PRI_BASE: The start of the dynamic range. 142 */ 143#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 144#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 145#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 146#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 147#define SCHED_PRI_INTERACT(score) \ 148 ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 149 150/* 151 * These determine the interactivity of a process. 152 * 153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 154 * before throttling back. 155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 156 * INTERACT_MAX: Maximum interactivity value. Smaller is better. 157 * INTERACT_THRESH: Threshhold for placement on the current runq. 158 */ 159#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 160#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 161#define SCHED_INTERACT_MAX (100) 162#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 163#define SCHED_INTERACT_THRESH (30) 164 165/* 166 * These parameters and macros determine the size of the time slice that is 167 * granted to each thread. 168 * 169 * SLICE_MIN: Minimum time slice granted, in units of ticks. 170 * SLICE_MAX: Maximum time slice granted. 171 * SLICE_RANGE: Range of available time slices scaled by hz. 172 * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 173 * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 174 * SLICE_NTHRESH: The nice cutoff point for slice assignment. 175 */ 176#define SCHED_SLICE_MIN (slice_min) 177#define SCHED_SLICE_MAX (slice_max) 178#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 179#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 180#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 181#define SCHED_SLICE_NICE(nice) \ 182 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 183 184/* 185 * This macro determines whether or not the kse belongs on the current or 186 * next run queue. 187 */ 188#define SCHED_INTERACTIVE(kg) \ 189 (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 190#define SCHED_CURR(kg, ke) \ 191 (ke->ke_thread->td_priority != kg->kg_user_pri || \ 192 SCHED_INTERACTIVE(kg)) 193 194/* 195 * Cpu percentage computation macros and defines. 196 * 197 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 198 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 199 */ 200 201#define SCHED_CPU_TIME 10 202#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 203 204/* 205 * kseq - per processor runqs and statistics. 206 */ 207struct kseq { 208 struct runq ksq_idle; /* Queue of IDLE threads. */ 209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 210 struct runq *ksq_next; /* Next timeshare queue. */ 211 struct runq *ksq_curr; /* Current queue. */ 212 int ksq_load_timeshare; /* Load for timeshare. */ 213 int ksq_load; /* Aggregate load. */ 214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 215 short ksq_nicemin; /* Least nice. */ 216#ifdef SMP 217 int ksq_transferable; 218 LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 219 struct kseq_group *ksq_group; /* Our processor group. */ 220 volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 221#endif 222}; 223 224#ifdef SMP 225/* 226 * kseq groups are groups of processors which can cheaply share threads. When 227 * one processor in the group goes idle it will check the runqs of the other 228 * processors in its group prior to halting and waiting for an interrupt. 229 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 230 * In a numa environment we'd want an idle bitmap per group and a two tiered 231 * load balancer. 232 */ 233struct kseq_group { 234 int ksg_cpus; /* Count of CPUs in this kseq group. */ 235 int ksg_cpumask; /* Mask of cpus in this group. */ 236 int ksg_idlemask; /* Idle cpus in this group. */ 237 int ksg_mask; /* Bit mask for first cpu. */ 238 int ksg_load; /* Total load of this group. */ 239 int ksg_transferable; /* Transferable load of this group. */ 240 LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 241}; 242#endif 243 244/* 245 * One kse queue per processor. 246 */ 247#ifdef SMP 248static int kseq_idle; 249static int ksg_maxid; 250static struct kseq kseq_cpu[MAXCPU]; 251static struct kseq_group kseq_groups[MAXCPU]; 252#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 253#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 254#define KSEQ_ID(x) ((x) - kseq_cpu) 255#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 256#else /* !SMP */ 257static struct kseq kseq_cpu; 258#define KSEQ_SELF() (&kseq_cpu) 259#define KSEQ_CPU(x) (&kseq_cpu) 260#endif 261 262static void sched_slice(struct kse *ke); 263static void sched_priority(struct ksegrp *kg); 264static int sched_interact_score(struct ksegrp *kg); 265static void sched_interact_update(struct ksegrp *kg); 266static void sched_interact_fork(struct ksegrp *kg); 267static void sched_pctcpu_update(struct kse *ke); 268 269/* Operations on per processor queues */ 270static struct kse * kseq_choose(struct kseq *kseq); 271static void kseq_setup(struct kseq *kseq); 272static void kseq_load_add(struct kseq *kseq, struct kse *ke); 273static void kseq_load_rem(struct kseq *kseq, struct kse *ke); 274static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 275static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 276static void kseq_nice_add(struct kseq *kseq, int nice); 277static void kseq_nice_rem(struct kseq *kseq, int nice); 278void kseq_print(int cpu); 279#ifdef SMP 280static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 281static struct kse *runq_steal(struct runq *rq); 282static void sched_balance(void *arg); 283static void sched_balance_group(struct kseq_group *ksg); 284static void sched_balance_pair(struct kseq *high, struct kseq *low); 285static void kseq_move(struct kseq *from, int cpu); 286static int kseq_idled(struct kseq *kseq); 287static void kseq_notify(struct kse *ke, int cpu); 288static void kseq_assign(struct kseq *); 289static struct kse *kseq_steal(struct kseq *kseq, int stealidle); 290#define KSE_CAN_MIGRATE(ke, class) \ 291 ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \ 292 ((ke)->ke_flags & KEF_BOUND) == 0) 293#endif 294 295void 296kseq_print(int cpu) 297{ 298 struct kseq *kseq; 299 int i; 300 301 kseq = KSEQ_CPU(cpu); 302 303 printf("kseq:\n"); 304 printf("\tload: %d\n", kseq->ksq_load); 305 printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 306#ifdef SMP 307 printf("\tload transferable: %d\n", kseq->ksq_transferable); 308#endif 309 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 310 printf("\tnice counts:\n"); 311 for (i = 0; i < SCHED_PRI_NRESV; i++) 312 if (kseq->ksq_nice[i]) 313 printf("\t\t%d = %d\n", 314 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 315} 316 317static __inline void 318kseq_runq_add(struct kseq *kseq, struct kse *ke) 319{ 320#ifdef SMP 321 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 322 kseq->ksq_transferable++; 323 kseq->ksq_group->ksg_transferable++; 324 } 325#endif 326 runq_add(ke->ke_runq, ke); 327} 328 329static __inline void 330kseq_runq_rem(struct kseq *kseq, struct kse *ke) 331{ 332#ifdef SMP 333 if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 334 kseq->ksq_transferable--; 335 kseq->ksq_group->ksg_transferable--; 336 } 337#endif 338 runq_remove(ke->ke_runq, ke); 339} 340 341static void 342kseq_load_add(struct kseq *kseq, struct kse *ke) 343{ 344 int class; 345 mtx_assert(&sched_lock, MA_OWNED); 346 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 347 if (class == PRI_TIMESHARE) 348 kseq->ksq_load_timeshare++; 349 kseq->ksq_load++; 350#ifdef SMP 351 if (class != PRI_ITHD) 352 kseq->ksq_group->ksg_load++; 353#endif 354 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 355 CTR6(KTR_ULE, 356 "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 357 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 358 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 359 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 360 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); 361} 362 363static void 364kseq_load_rem(struct kseq *kseq, struct kse *ke) 365{ 366 int class; 367 mtx_assert(&sched_lock, MA_OWNED); 368 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 369 if (class == PRI_TIMESHARE) 370 kseq->ksq_load_timeshare--; 371#ifdef SMP 372 if (class != PRI_ITHD) 373 kseq->ksq_group->ksg_load--; 374#endif 375 kseq->ksq_load--; 376 ke->ke_runq = NULL; 377 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 378 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); 379} 380 381static void 382kseq_nice_add(struct kseq *kseq, int nice) 383{ 384 mtx_assert(&sched_lock, MA_OWNED); 385 /* Normalize to zero. */ 386 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 387 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 388 kseq->ksq_nicemin = nice; 389} 390 391static void 392kseq_nice_rem(struct kseq *kseq, int nice) 393{ 394 int n; 395 396 mtx_assert(&sched_lock, MA_OWNED); 397 /* Normalize to zero. */ 398 n = nice + SCHED_PRI_NHALF; 399 kseq->ksq_nice[n]--; 400 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 401 402 /* 403 * If this wasn't the smallest nice value or there are more in 404 * this bucket we can just return. Otherwise we have to recalculate 405 * the smallest nice. 406 */ 407 if (nice != kseq->ksq_nicemin || 408 kseq->ksq_nice[n] != 0 || 409 kseq->ksq_load_timeshare == 0) 410 return; 411 412 for (; n < SCHED_PRI_NRESV; n++) 413 if (kseq->ksq_nice[n]) { 414 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 415 return; 416 } 417} 418 419#ifdef SMP 420/* 421 * sched_balance is a simple CPU load balancing algorithm. It operates by 422 * finding the least loaded and most loaded cpu and equalizing their load 423 * by migrating some processes. 424 * 425 * Dealing only with two CPUs at a time has two advantages. Firstly, most 426 * installations will only have 2 cpus. Secondly, load balancing too much at 427 * once can have an unpleasant effect on the system. The scheduler rarely has 428 * enough information to make perfect decisions. So this algorithm chooses 429 * algorithm simplicity and more gradual effects on load in larger systems. 430 * 431 * It could be improved by considering the priorities and slices assigned to 432 * each task prior to balancing them. There are many pathological cases with 433 * any approach and so the semi random algorithm below may work as well as any. 434 * 435 */ 436static void 437sched_balance(void *arg) 438{ 439 struct kseq_group *high; 440 struct kseq_group *low; 441 struct kseq_group *ksg; 442 int timo; 443 int cnt; 444 int i; 445 446 mtx_lock_spin(&sched_lock); 447 if (smp_started == 0) 448 goto out; 449 low = high = NULL; 450 i = random() % (ksg_maxid + 1); 451 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 452 ksg = KSEQ_GROUP(i); 453 /* 454 * Find the CPU with the highest load that has some 455 * threads to transfer. 456 */ 457 if ((high == NULL || ksg->ksg_load > high->ksg_load) 458 && ksg->ksg_transferable) 459 high = ksg; 460 if (low == NULL || ksg->ksg_load < low->ksg_load) 461 low = ksg; 462 if (++i > ksg_maxid) 463 i = 0; 464 } 465 if (low != NULL && high != NULL && high != low) 466 sched_balance_pair(LIST_FIRST(&high->ksg_members), 467 LIST_FIRST(&low->ksg_members)); 468out: 469 mtx_unlock_spin(&sched_lock); 470 timo = random() % (hz * 2); 471 callout_reset(&kseq_lb_callout, timo, sched_balance, NULL); 472} 473 474static void 475sched_balance_groups(void *arg) 476{ 477 int timo; 478 int i; 479 480 mtx_lock_spin(&sched_lock); 481 if (smp_started) 482 for (i = 0; i <= ksg_maxid; i++) 483 sched_balance_group(KSEQ_GROUP(i)); 484 mtx_unlock_spin(&sched_lock); 485 timo = random() % (hz * 2); 486 callout_reset(&kseq_group_callout, timo, sched_balance_groups, NULL); 487} 488 489static void 490sched_balance_group(struct kseq_group *ksg) 491{ 492 struct kseq *kseq; 493 struct kseq *high; 494 struct kseq *low; 495 int load; 496 497 if (ksg->ksg_transferable == 0) 498 return; 499 low = NULL; 500 high = NULL; 501 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 502 load = kseq->ksq_load; 503 if (kseq == KSEQ_CPU(0)) 504 load--; 505 if (high == NULL || load > high->ksq_load) 506 high = kseq; 507 if (low == NULL || load < low->ksq_load) 508 low = kseq; 509 } 510 if (high != NULL && low != NULL && high != low) 511 sched_balance_pair(high, low); 512} 513 514static void 515sched_balance_pair(struct kseq *high, struct kseq *low) 516{ 517 int transferable; 518 int high_load; 519 int low_load; 520 int move; 521 int diff; 522 int i; 523 524 /* 525 * If we're transfering within a group we have to use this specific 526 * kseq's transferable count, otherwise we can steal from other members 527 * of the group. 528 */ 529 if (high->ksq_group == low->ksq_group) { 530 transferable = high->ksq_transferable; 531 high_load = high->ksq_load; 532 low_load = low->ksq_load; 533 /* 534 * XXX If we encounter cpu 0 we must remember to reduce it's 535 * load by 1 to reflect the swi that is running the callout. 536 * At some point we should really fix load balancing of the 537 * swi and then this wont matter. 538 */ 539 if (high == KSEQ_CPU(0)) 540 high_load--; 541 if (low == KSEQ_CPU(0)) 542 low_load--; 543 } else { 544 transferable = high->ksq_group->ksg_transferable; 545 high_load = high->ksq_group->ksg_load; 546 low_load = low->ksq_group->ksg_load; 547 } 548 if (transferable == 0) 549 return; 550 /* 551 * Determine what the imbalance is and then adjust that to how many 552 * kses we actually have to give up (transferable). 553 */ 554 diff = high_load - low_load; 555 move = diff / 2; 556 if (diff & 0x1) 557 move++; 558 move = min(move, transferable); 559 for (i = 0; i < move; i++) 560 kseq_move(high, KSEQ_ID(low)); 561 return; 562} 563 564static void 565kseq_move(struct kseq *from, int cpu) 566{ 567 struct kseq *kseq; 568 struct kseq *to; 569 struct kse *ke; 570 571 kseq = from; 572 to = KSEQ_CPU(cpu); 573 ke = kseq_steal(kseq, 1); 574 if (ke == NULL) { 575 struct kseq_group *ksg; 576 577 ksg = kseq->ksq_group; 578 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 579 if (kseq == from || kseq->ksq_transferable == 0) 580 continue; 581 ke = kseq_steal(kseq, 1); 582 break; 583 } 584 if (ke == NULL) 585 panic("kseq_move: No KSEs available with a " 586 "transferable count of %d\n", 587 ksg->ksg_transferable); 588 } 589 if (kseq == to) 590 return; 591 ke->ke_state = KES_THREAD; 592 kseq_runq_rem(kseq, ke); 593 kseq_load_rem(kseq, ke); 594 595 ke->ke_cpu = cpu; 596 kseq_notify(ke, cpu); 597} 598 599static int 600kseq_idled(struct kseq *kseq) 601{ 602 struct kseq_group *ksg; 603 struct kseq *steal; 604 struct kse *ke; 605 606 ksg = kseq->ksq_group; 607 /* 608 * If we're in a cpu group, try and steal kses from another cpu in 609 * the group before idling. 610 */ 611 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 612 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 613 if (steal == kseq || steal->ksq_transferable == 0) 614 continue; 615 ke = kseq_steal(steal, 0); 616 if (ke == NULL) 617 continue; 618 ke->ke_state = KES_THREAD; 619 kseq_runq_rem(steal, ke); 620 kseq_load_rem(steal, ke); 621 ke->ke_cpu = PCPU_GET(cpuid); 622 sched_add(ke->ke_thread); 623 return (0); 624 } 625 } 626 /* 627 * We only set the idled bit when all of the cpus in the group are 628 * idle. Otherwise we could get into a situation where a KSE bounces 629 * back and forth between two idle cores on seperate physical CPUs. 630 */ 631 ksg->ksg_idlemask |= PCPU_GET(cpumask); 632 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 633 return (1); 634 atomic_set_int(&kseq_idle, ksg->ksg_mask); 635 return (1); 636} 637 638static void 639kseq_assign(struct kseq *kseq) 640{ 641 struct kse *nke; 642 struct kse *ke; 643 644 do { 645 (volatile struct kse *)ke = kseq->ksq_assigned; 646 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 647 for (; ke != NULL; ke = nke) { 648 nke = ke->ke_assign; 649 ke->ke_flags &= ~KEF_ASSIGNED; 650 sched_add(ke->ke_thread); 651 } 652} 653 654static void 655kseq_notify(struct kse *ke, int cpu) 656{ 657 struct kseq *kseq; 658 struct thread *td; 659 struct pcpu *pcpu; 660 661 ke->ke_flags |= KEF_ASSIGNED; 662 663 kseq = KSEQ_CPU(cpu); 664 665 /* 666 * Place a KSE on another cpu's queue and force a resched. 667 */ 668 do { 669 (volatile struct kse *)ke->ke_assign = kseq->ksq_assigned; 670 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 671 pcpu = pcpu_find(cpu); 672 td = pcpu->pc_curthread; 673 if (ke->ke_thread->td_priority < td->td_priority || 674 td == pcpu->pc_idlethread) { 675 td->td_flags |= TDF_NEEDRESCHED; 676 ipi_selected(1 << cpu, IPI_AST); 677 } 678} 679 680static struct kse * 681runq_steal(struct runq *rq) 682{ 683 struct rqhead *rqh; 684 struct rqbits *rqb; 685 struct kse *ke; 686 int word; 687 int bit; 688 689 mtx_assert(&sched_lock, MA_OWNED); 690 rqb = &rq->rq_status; 691 for (word = 0; word < RQB_LEN; word++) { 692 if (rqb->rqb_bits[word] == 0) 693 continue; 694 for (bit = 0; bit < RQB_BPW; bit++) { 695 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 696 continue; 697 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 698 TAILQ_FOREACH(ke, rqh, ke_procq) { 699 if (KSE_CAN_MIGRATE(ke, 700 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 701 return (ke); 702 } 703 } 704 } 705 return (NULL); 706} 707 708static struct kse * 709kseq_steal(struct kseq *kseq, int stealidle) 710{ 711 struct kse *ke; 712 713 /* 714 * Steal from next first to try to get a non-interactive task that 715 * may not have run for a while. 716 */ 717 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 718 return (ke); 719 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 720 return (ke); 721 if (stealidle) 722 return (runq_steal(&kseq->ksq_idle)); 723 return (NULL); 724} 725 726int 727kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 728{ 729 struct kseq_group *ksg; 730 int cpu; 731 732 cpu = 0; 733 ksg = kseq->ksq_group; 734 735 /* 736 * XXX This ksg_transferable might work better if we were checking 737 * against a global group load. As it is now, this prevents us from 738 * transfering a thread from a group that is potentially bogged down 739 * with non transferable load. 740 */ 741 if (ksg->ksg_transferable > ksg->ksg_cpus && kseq_idle) { 742 /* 743 * Multiple cpus could find this bit simultaneously 744 * but the race shouldn't be terrible. 745 */ 746 cpu = ffs(kseq_idle); 747 if (cpu) 748 atomic_clear_int(&kseq_idle, 1 << (cpu - 1)); 749 } 750 /* 751 * If another cpu in this group has idled, assign a thread over 752 * to them after checking to see if there are idled groups. 753 */ 754 if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) { 755 cpu = ffs(ksg->ksg_idlemask); 756 if (cpu) 757 ksg->ksg_idlemask &= ~(1 << (cpu - 1)); 758 } 759 /* 760 * Now that we've found an idle CPU, migrate the thread. 761 */ 762 if (cpu) { 763 cpu--; 764 ke->ke_cpu = cpu; 765 ke->ke_runq = NULL; 766 kseq_notify(ke, cpu); 767 return (1); 768 } 769 return (0); 770} 771 772#endif /* SMP */ 773 774/* 775 * Pick the highest priority task we have and return it. 776 */ 777 778static struct kse * 779kseq_choose(struct kseq *kseq) 780{ 781 struct kse *ke; 782 struct runq *swap; 783 784 mtx_assert(&sched_lock, MA_OWNED); 785 swap = NULL; 786 787 for (;;) { 788 ke = runq_choose(kseq->ksq_curr); 789 if (ke == NULL) { 790 /* 791 * We already swaped once and didn't get anywhere. 792 */ 793 if (swap) 794 break; 795 swap = kseq->ksq_curr; 796 kseq->ksq_curr = kseq->ksq_next; 797 kseq->ksq_next = swap; 798 continue; 799 } 800 /* 801 * If we encounter a slice of 0 the kse is in a 802 * TIMESHARE kse group and its nice was too far out 803 * of the range that receives slices. 804 */ 805 if (ke->ke_slice == 0) { 806 runq_remove(ke->ke_runq, ke); 807 sched_slice(ke); 808 ke->ke_runq = kseq->ksq_next; 809 runq_add(ke->ke_runq, ke); 810 continue; 811 } 812 return (ke); 813 } 814 815 return (runq_choose(&kseq->ksq_idle)); 816} 817 818static void 819kseq_setup(struct kseq *kseq) 820{ 821 runq_init(&kseq->ksq_timeshare[0]); 822 runq_init(&kseq->ksq_timeshare[1]); 823 runq_init(&kseq->ksq_idle); 824 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 825 kseq->ksq_next = &kseq->ksq_timeshare[1]; 826 kseq->ksq_load = 0; 827 kseq->ksq_load_timeshare = 0; 828} 829 830static void 831sched_setup(void *dummy) 832{ 833#ifdef SMP 834 int balance_groups; 835 int i; 836#endif 837 838 slice_min = (hz/100); /* 10ms */ 839 slice_max = (hz/7); /* ~140ms */ 840 841#ifdef SMP 842 balance_groups = 0; 843 /* 844 * Initialize the kseqs. 845 */ 846 for (i = 0; i < MAXCPU; i++) { 847 struct kseq *ksq; 848 849 ksq = &kseq_cpu[i]; 850 ksq->ksq_assigned = NULL; 851 kseq_setup(&kseq_cpu[i]); 852 } 853 if (smp_topology == NULL) { 854 struct kseq_group *ksg; 855 struct kseq *ksq; 856 857 for (i = 0; i < MAXCPU; i++) { 858 ksq = &kseq_cpu[i]; 859 ksg = &kseq_groups[i]; 860 /* 861 * Setup a kse group with one member. 862 */ 863 ksq->ksq_transferable = 0; 864 ksq->ksq_group = ksg; 865 ksg->ksg_cpus = 1; 866 ksg->ksg_idlemask = 0; 867 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 868 ksg->ksg_load = 0; 869 ksg->ksg_transferable = 0; 870 LIST_INIT(&ksg->ksg_members); 871 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 872 } 873 } else { 874 struct kseq_group *ksg; 875 struct cpu_group *cg; 876 int j; 877 878 for (i = 0; i < smp_topology->ct_count; i++) { 879 cg = &smp_topology->ct_group[i]; 880 ksg = &kseq_groups[i]; 881 /* 882 * Initialize the group. 883 */ 884 ksg->ksg_idlemask = 0; 885 ksg->ksg_load = 0; 886 ksg->ksg_transferable = 0; 887 ksg->ksg_cpus = cg->cg_count; 888 ksg->ksg_cpumask = cg->cg_mask; 889 LIST_INIT(&ksg->ksg_members); 890 /* 891 * Find all of the group members and add them. 892 */ 893 for (j = 0; j < MAXCPU; j++) { 894 if ((cg->cg_mask & (1 << j)) != 0) { 895 if (ksg->ksg_mask == 0) 896 ksg->ksg_mask = 1 << j; 897 kseq_cpu[j].ksq_transferable = 0; 898 kseq_cpu[j].ksq_group = ksg; 899 LIST_INSERT_HEAD(&ksg->ksg_members, 900 &kseq_cpu[j], ksq_siblings); 901 } 902 } 903 if (ksg->ksg_cpus > 1) 904 balance_groups = 1; 905 } 906 ksg_maxid = smp_topology->ct_count - 1; 907 } 908 callout_init(&kseq_lb_callout, CALLOUT_MPSAFE); 909 callout_init(&kseq_group_callout, CALLOUT_MPSAFE); 910 sched_balance(NULL); 911 /* 912 * Stagger the group and global load balancer so they do not 913 * interfere with each other. 914 */ 915 if (balance_groups) 916 callout_reset(&kseq_group_callout, hz / 2, 917 sched_balance_groups, NULL); 918#else 919 kseq_setup(KSEQ_SELF()); 920#endif 921 mtx_lock_spin(&sched_lock); 922 kseq_load_add(KSEQ_SELF(), &kse0); 923 mtx_unlock_spin(&sched_lock); 924} 925 926/* 927 * Scale the scheduling priority according to the "interactivity" of this 928 * process. 929 */ 930static void 931sched_priority(struct ksegrp *kg) 932{ 933 int pri; 934 935 if (kg->kg_pri_class != PRI_TIMESHARE) 936 return; 937 938 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 939 pri += SCHED_PRI_BASE; 940 pri += kg->kg_nice; 941 942 if (pri > PRI_MAX_TIMESHARE) 943 pri = PRI_MAX_TIMESHARE; 944 else if (pri < PRI_MIN_TIMESHARE) 945 pri = PRI_MIN_TIMESHARE; 946 947 kg->kg_user_pri = pri; 948 949 return; 950} 951 952/* 953 * Calculate a time slice based on the properties of the kseg and the runq 954 * that we're on. This is only for PRI_TIMESHARE ksegrps. 955 */ 956static void 957sched_slice(struct kse *ke) 958{ 959 struct kseq *kseq; 960 struct ksegrp *kg; 961 962 kg = ke->ke_ksegrp; 963 kseq = KSEQ_CPU(ke->ke_cpu); 964 965 /* 966 * Rationale: 967 * KSEs in interactive ksegs get the minimum slice so that we 968 * quickly notice if it abuses its advantage. 969 * 970 * KSEs in non-interactive ksegs are assigned a slice that is 971 * based on the ksegs nice value relative to the least nice kseg 972 * on the run queue for this cpu. 973 * 974 * If the KSE is less nice than all others it gets the maximum 975 * slice and other KSEs will adjust their slice relative to 976 * this when they first expire. 977 * 978 * There is 20 point window that starts relative to the least 979 * nice kse on the run queue. Slice size is determined by 980 * the kse distance from the last nice ksegrp. 981 * 982 * If the kse is outside of the window it will get no slice 983 * and will be reevaluated each time it is selected on the 984 * run queue. The exception to this is nice 0 ksegs when 985 * a nice -20 is running. They are always granted a minimum 986 * slice. 987 */ 988 if (!SCHED_INTERACTIVE(kg)) { 989 int nice; 990 991 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); 992 if (kseq->ksq_load_timeshare == 0 || 993 kg->kg_nice < kseq->ksq_nicemin) 994 ke->ke_slice = SCHED_SLICE_MAX; 995 else if (nice <= SCHED_SLICE_NTHRESH) 996 ke->ke_slice = SCHED_SLICE_NICE(nice); 997 else if (kg->kg_nice == 0) 998 ke->ke_slice = SCHED_SLICE_MIN; 999 else 1000 ke->ke_slice = 0; 1001 } else 1002 ke->ke_slice = SCHED_SLICE_MIN; 1003 1004 CTR6(KTR_ULE, 1005 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1006 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, 1007 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1008 1009 return; 1010} 1011 1012/* 1013 * This routine enforces a maximum limit on the amount of scheduling history 1014 * kept. It is called after either the slptime or runtime is adjusted. 1015 * This routine will not operate correctly when slp or run times have been 1016 * adjusted to more than double their maximum. 1017 */ 1018static void 1019sched_interact_update(struct ksegrp *kg) 1020{ 1021 int sum; 1022 1023 sum = kg->kg_runtime + kg->kg_slptime; 1024 if (sum < SCHED_SLP_RUN_MAX) 1025 return; 1026 /* 1027 * If we have exceeded by more than 1/5th then the algorithm below 1028 * will not bring us back into range. Dividing by two here forces 1029 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1030 */ 1031 if (sum > (SCHED_INTERACT_MAX / 5) * 6) { 1032 kg->kg_runtime /= 2; 1033 kg->kg_slptime /= 2; 1034 return; 1035 } 1036 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1037 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1038} 1039 1040static void 1041sched_interact_fork(struct ksegrp *kg) 1042{ 1043 int ratio; 1044 int sum; 1045 1046 sum = kg->kg_runtime + kg->kg_slptime; 1047 if (sum > SCHED_SLP_RUN_FORK) { 1048 ratio = sum / SCHED_SLP_RUN_FORK; 1049 kg->kg_runtime /= ratio; 1050 kg->kg_slptime /= ratio; 1051 } 1052} 1053 1054static int 1055sched_interact_score(struct ksegrp *kg) 1056{ 1057 int div; 1058 1059 if (kg->kg_runtime > kg->kg_slptime) { 1060 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1061 return (SCHED_INTERACT_HALF + 1062 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1063 } if (kg->kg_slptime > kg->kg_runtime) { 1064 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1065 return (kg->kg_runtime / div); 1066 } 1067 1068 /* 1069 * This can happen if slptime and runtime are 0. 1070 */ 1071 return (0); 1072 1073} 1074 1075/* 1076 * This is only somewhat accurate since given many processes of the same 1077 * priority they will switch when their slices run out, which will be 1078 * at most SCHED_SLICE_MAX. 1079 */ 1080int 1081sched_rr_interval(void) 1082{ 1083 return (SCHED_SLICE_MAX); 1084} 1085 1086static void 1087sched_pctcpu_update(struct kse *ke) 1088{ 1089 /* 1090 * Adjust counters and watermark for pctcpu calc. 1091 */ 1092 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1093 /* 1094 * Shift the tick count out so that the divide doesn't 1095 * round away our results. 1096 */ 1097 ke->ke_ticks <<= 10; 1098 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1099 SCHED_CPU_TICKS; 1100 ke->ke_ticks >>= 10; 1101 } else 1102 ke->ke_ticks = 0; 1103 ke->ke_ltick = ticks; 1104 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1105} 1106 1107void 1108sched_prio(struct thread *td, u_char prio) 1109{ 1110 struct kse *ke; 1111 1112 ke = td->td_kse; 1113 mtx_assert(&sched_lock, MA_OWNED); 1114 if (TD_ON_RUNQ(td)) { 1115 /* 1116 * If the priority has been elevated due to priority 1117 * propagation, we may have to move ourselves to a new 1118 * queue. We still call adjustrunqueue below in case kse 1119 * needs to fix things up. 1120 */ 1121 if (prio < td->td_priority && ke && 1122 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1123 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1124 runq_remove(ke->ke_runq, ke); 1125 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1126 runq_add(ke->ke_runq, ke); 1127 } 1128 adjustrunqueue(td, prio); 1129 } else 1130 td->td_priority = prio; 1131} 1132 1133void 1134sched_switch(struct thread *td) 1135{ 1136 struct thread *newtd; 1137 struct kse *ke; 1138 1139 mtx_assert(&sched_lock, MA_OWNED); 1140 1141 ke = td->td_kse; 1142 1143 td->td_last_kse = ke; 1144 td->td_lastcpu = td->td_oncpu; 1145 td->td_oncpu = NOCPU; 1146 td->td_flags &= ~TDF_NEEDRESCHED; 1147 1148 /* 1149 * If the KSE has been assigned it may be in the process of switching 1150 * to the new cpu. This is the case in sched_bind(). 1151 */ 1152 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1153 if (TD_IS_RUNNING(td)) { 1154 if (td->td_proc->p_flag & P_SA) { 1155 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1156 setrunqueue(td); 1157 } else 1158 kseq_runq_add(KSEQ_SELF(), ke); 1159 } else { 1160 if (ke->ke_runq) 1161 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1162 /* 1163 * We will not be on the run queue. So we must be 1164 * sleeping or similar. 1165 */ 1166 if (td->td_proc->p_flag & P_SA) 1167 kse_reassign(ke); 1168 } 1169 } 1170 newtd = choosethread(); 1171 if (td != newtd) 1172 cpu_switch(td, newtd); 1173 sched_lock.mtx_lock = (uintptr_t)td; 1174 1175 td->td_oncpu = PCPU_GET(cpuid); 1176} 1177 1178void 1179sched_nice(struct ksegrp *kg, int nice) 1180{ 1181 struct kse *ke; 1182 struct thread *td; 1183 struct kseq *kseq; 1184 1185 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED); 1186 mtx_assert(&sched_lock, MA_OWNED); 1187 /* 1188 * We need to adjust the nice counts for running KSEs. 1189 */ 1190 if (kg->kg_pri_class == PRI_TIMESHARE) 1191 FOREACH_KSE_IN_GROUP(kg, ke) { 1192 if (ke->ke_runq == NULL) 1193 continue; 1194 kseq = KSEQ_CPU(ke->ke_cpu); 1195 kseq_nice_rem(kseq, kg->kg_nice); 1196 kseq_nice_add(kseq, nice); 1197 } 1198 kg->kg_nice = nice; 1199 sched_priority(kg); 1200 FOREACH_THREAD_IN_GROUP(kg, td) 1201 td->td_flags |= TDF_NEEDRESCHED; 1202} 1203 1204void 1205sched_sleep(struct thread *td, u_char prio) 1206{ 1207 mtx_assert(&sched_lock, MA_OWNED); 1208 1209 td->td_slptime = ticks; 1210 td->td_priority = prio; 1211 1212 CTR2(KTR_ULE, "sleep kse %p (tick: %d)", 1213 td->td_kse, td->td_slptime); 1214} 1215 1216void 1217sched_wakeup(struct thread *td) 1218{ 1219 mtx_assert(&sched_lock, MA_OWNED); 1220 1221 /* 1222 * Let the kseg know how long we slept for. This is because process 1223 * interactivity behavior is modeled in the kseg. 1224 */ 1225 if (td->td_slptime) { 1226 struct ksegrp *kg; 1227 int hzticks; 1228 1229 kg = td->td_ksegrp; 1230 hzticks = (ticks - td->td_slptime) << 10; 1231 if (hzticks >= SCHED_SLP_RUN_MAX) { 1232 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1233 kg->kg_runtime = 1; 1234 } else { 1235 kg->kg_slptime += hzticks; 1236 sched_interact_update(kg); 1237 } 1238 sched_priority(kg); 1239 if (td->td_kse) 1240 sched_slice(td->td_kse); 1241 CTR2(KTR_ULE, "wakeup kse %p (%d ticks)", 1242 td->td_kse, hzticks); 1243 td->td_slptime = 0; 1244 } 1245 setrunqueue(td); 1246} 1247 1248/* 1249 * Penalize the parent for creating a new child and initialize the child's 1250 * priority. 1251 */ 1252void 1253sched_fork(struct proc *p, struct proc *p1) 1254{ 1255 1256 mtx_assert(&sched_lock, MA_OWNED); 1257 1258 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1)); 1259 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1)); 1260 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1)); 1261} 1262 1263void 1264sched_fork_kse(struct kse *ke, struct kse *child) 1265{ 1266 1267 child->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1268 child->ke_cpu = ke->ke_cpu; 1269 child->ke_runq = NULL; 1270 1271 /* Grab our parents cpu estimation information. */ 1272 child->ke_ticks = ke->ke_ticks; 1273 child->ke_ltick = ke->ke_ltick; 1274 child->ke_ftick = ke->ke_ftick; 1275} 1276 1277void 1278sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1279{ 1280 PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED); 1281 1282 child->kg_slptime = kg->kg_slptime; 1283 child->kg_runtime = kg->kg_runtime; 1284 child->kg_user_pri = kg->kg_user_pri; 1285 child->kg_nice = kg->kg_nice; 1286 sched_interact_fork(child); 1287 kg->kg_runtime += tickincr << 10; 1288 sched_interact_update(kg); 1289 1290 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1291 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1292 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1293} 1294 1295void 1296sched_fork_thread(struct thread *td, struct thread *child) 1297{ 1298} 1299 1300void 1301sched_class(struct ksegrp *kg, int class) 1302{ 1303 struct kseq *kseq; 1304 struct kse *ke; 1305 int nclass; 1306 int oclass; 1307 1308 mtx_assert(&sched_lock, MA_OWNED); 1309 if (kg->kg_pri_class == class) 1310 return; 1311 1312 nclass = PRI_BASE(class); 1313 oclass = PRI_BASE(kg->kg_pri_class); 1314 FOREACH_KSE_IN_GROUP(kg, ke) { 1315 if (ke->ke_state != KES_ONRUNQ && 1316 ke->ke_state != KES_THREAD) 1317 continue; 1318 kseq = KSEQ_CPU(ke->ke_cpu); 1319 1320#ifdef SMP 1321 /* 1322 * On SMP if we're on the RUNQ we must adjust the transferable 1323 * count because could be changing to or from an interrupt 1324 * class. 1325 */ 1326 if (ke->ke_state == KES_ONRUNQ) { 1327 if (KSE_CAN_MIGRATE(ke, oclass)) { 1328 kseq->ksq_transferable--; 1329 kseq->ksq_group->ksg_transferable--; 1330 } 1331 if (KSE_CAN_MIGRATE(ke, nclass)) { 1332 kseq->ksq_transferable++; 1333 kseq->ksq_group->ksg_transferable++; 1334 } 1335 } 1336#endif 1337 if (oclass == PRI_TIMESHARE) { 1338 kseq->ksq_load_timeshare--; 1339 kseq_nice_rem(kseq, kg->kg_nice); 1340 } 1341 if (nclass == PRI_TIMESHARE) { 1342 kseq->ksq_load_timeshare++; 1343 kseq_nice_add(kseq, kg->kg_nice); 1344 } 1345 } 1346 1347 kg->kg_pri_class = class; 1348} 1349 1350/* 1351 * Return some of the child's priority and interactivity to the parent. 1352 */ 1353void 1354sched_exit(struct proc *p, struct proc *child) 1355{ 1356 mtx_assert(&sched_lock, MA_OWNED); 1357 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child)); 1358 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child)); 1359} 1360 1361void 1362sched_exit_kse(struct kse *ke, struct kse *child) 1363{ 1364 kseq_load_rem(KSEQ_CPU(child->ke_cpu), child); 1365} 1366 1367void 1368sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child) 1369{ 1370 /* kg->kg_slptime += child->kg_slptime; */ 1371 kg->kg_runtime += child->kg_runtime; 1372 sched_interact_update(kg); 1373} 1374 1375void 1376sched_exit_thread(struct thread *td, struct thread *child) 1377{ 1378} 1379 1380void 1381sched_clock(struct thread *td) 1382{ 1383 struct kseq *kseq; 1384 struct ksegrp *kg; 1385 struct kse *ke; 1386 1387 /* 1388 * sched_setup() apparently happens prior to stathz being set. We 1389 * need to resolve the timers earlier in the boot so we can avoid 1390 * calculating this here. 1391 */ 1392 if (realstathz == 0) { 1393 realstathz = stathz ? stathz : hz; 1394 tickincr = hz / realstathz; 1395 /* 1396 * XXX This does not work for values of stathz that are much 1397 * larger than hz. 1398 */ 1399 if (tickincr == 0) 1400 tickincr = 1; 1401 } 1402 1403 ke = td->td_kse; 1404 kg = ke->ke_ksegrp; 1405 1406 mtx_assert(&sched_lock, MA_OWNED); 1407 KASSERT((td != NULL), ("schedclock: null thread pointer")); 1408 1409 /* Adjust ticks for pctcpu */ 1410 ke->ke_ticks++; 1411 ke->ke_ltick = ticks; 1412 1413 /* Go up to one second beyond our max and then trim back down */ 1414 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1415 sched_pctcpu_update(ke); 1416 1417 if (td->td_flags & TDF_IDLETD) 1418 return; 1419 1420 CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)", 1421 ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1422 /* 1423 * We only do slicing code for TIMESHARE ksegrps. 1424 */ 1425 if (kg->kg_pri_class != PRI_TIMESHARE) 1426 return; 1427 /* 1428 * We used a tick charge it to the ksegrp so that we can compute our 1429 * interactivity. 1430 */ 1431 kg->kg_runtime += tickincr << 10; 1432 sched_interact_update(kg); 1433 1434 /* 1435 * We used up one time slice. 1436 */ 1437 if (--ke->ke_slice > 0) 1438 return; 1439 /* 1440 * We're out of time, recompute priorities and requeue. 1441 */ 1442 kseq = KSEQ_SELF(); 1443 kseq_load_rem(kseq, ke); 1444 sched_priority(kg); 1445 sched_slice(ke); 1446 if (SCHED_CURR(kg, ke)) 1447 ke->ke_runq = kseq->ksq_curr; 1448 else 1449 ke->ke_runq = kseq->ksq_next; 1450 kseq_load_add(kseq, ke); 1451 td->td_flags |= TDF_NEEDRESCHED; 1452} 1453 1454int 1455sched_runnable(void) 1456{ 1457 struct kseq *kseq; 1458 int load; 1459 1460 load = 1; 1461 1462 kseq = KSEQ_SELF(); 1463#ifdef SMP 1464 if (kseq->ksq_assigned) { 1465 mtx_lock_spin(&sched_lock); 1466 kseq_assign(kseq); 1467 mtx_unlock_spin(&sched_lock); 1468 } 1469#endif 1470 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1471 if (kseq->ksq_load > 0) 1472 goto out; 1473 } else 1474 if (kseq->ksq_load - 1 > 0) 1475 goto out; 1476 load = 0; 1477out: 1478 return (load); 1479} 1480 1481void 1482sched_userret(struct thread *td) 1483{ 1484 struct ksegrp *kg; 1485 1486 kg = td->td_ksegrp; 1487 1488 if (td->td_priority != kg->kg_user_pri) { 1489 mtx_lock_spin(&sched_lock); 1490 td->td_priority = kg->kg_user_pri; 1491 mtx_unlock_spin(&sched_lock); 1492 } 1493} 1494 1495struct kse * 1496sched_choose(void) 1497{ 1498 struct kseq *kseq; 1499 struct kse *ke; 1500 1501 mtx_assert(&sched_lock, MA_OWNED); 1502 kseq = KSEQ_SELF(); 1503#ifdef SMP 1504restart: 1505 if (kseq->ksq_assigned) 1506 kseq_assign(kseq); 1507#endif 1508 ke = kseq_choose(kseq); 1509 if (ke) { 1510#ifdef SMP 1511 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1512 if (kseq_idled(kseq) == 0) 1513 goto restart; 1514#endif 1515 kseq_runq_rem(kseq, ke); 1516 ke->ke_state = KES_THREAD; 1517 1518 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1519 CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)", 1520 ke, ke->ke_runq, ke->ke_slice, 1521 ke->ke_thread->td_priority); 1522 } 1523 return (ke); 1524 } 1525#ifdef SMP 1526 if (kseq_idled(kseq) == 0) 1527 goto restart; 1528#endif 1529 return (NULL); 1530} 1531 1532void 1533sched_add(struct thread *td) 1534{ 1535 struct kseq *kseq; 1536 struct ksegrp *kg; 1537 struct kse *ke; 1538 int class; 1539 1540 mtx_assert(&sched_lock, MA_OWNED); 1541 ke = td->td_kse; 1542 kg = td->td_ksegrp; 1543 if (ke->ke_flags & KEF_ASSIGNED) 1544 return; 1545 kseq = KSEQ_SELF(); 1546 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE")); 1547 KASSERT((ke->ke_thread->td_kse != NULL), 1548 ("sched_add: No KSE on thread")); 1549 KASSERT(ke->ke_state != KES_ONRUNQ, 1550 ("sched_add: kse %p (%s) already in run queue", ke, 1551 ke->ke_proc->p_comm)); 1552 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1553 ("sched_add: process swapped out")); 1554 KASSERT(ke->ke_runq == NULL, 1555 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1556 1557 class = PRI_BASE(kg->kg_pri_class); 1558 switch (class) { 1559 case PRI_ITHD: 1560 case PRI_REALTIME: 1561 ke->ke_runq = kseq->ksq_curr; 1562 ke->ke_slice = SCHED_SLICE_MAX; 1563 ke->ke_cpu = PCPU_GET(cpuid); 1564 break; 1565 case PRI_TIMESHARE: 1566 if (SCHED_CURR(kg, ke)) 1567 ke->ke_runq = kseq->ksq_curr; 1568 else 1569 ke->ke_runq = kseq->ksq_next; 1570 break; 1571 case PRI_IDLE: 1572 /* 1573 * This is for priority prop. 1574 */ 1575 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1576 ke->ke_runq = kseq->ksq_curr; 1577 else 1578 ke->ke_runq = &kseq->ksq_idle; 1579 ke->ke_slice = SCHED_SLICE_MIN; 1580 break; 1581 default: 1582 panic("Unknown pri class."); 1583 break; 1584 } 1585#ifdef SMP 1586 if (ke->ke_cpu != PCPU_GET(cpuid)) { 1587 kseq_notify(ke, ke->ke_cpu); 1588 return; 1589 } 1590 /* 1591 * If there are any idle groups, give them our extra load. The 1592 * threshold at which we start to reassign kses has a large impact 1593 * on the overall performance of the system. Tuned too high and 1594 * some CPUs may idle. Too low and there will be excess migration 1595 * and context swiches. 1596 */ 1597 if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class)) 1598 if (kseq_transfer(kseq, ke, class)) 1599 return; 1600 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1601 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1602 /* 1603 * Check to see if our group is unidling, and if so, remove it 1604 * from the global idle mask. 1605 */ 1606 if (kseq->ksq_group->ksg_idlemask == 1607 kseq->ksq_group->ksg_cpumask) 1608 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1609 /* 1610 * Now remove ourselves from the group specific idle mask. 1611 */ 1612 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1613 } 1614#endif 1615 if (td->td_priority < curthread->td_priority) 1616 curthread->td_flags |= TDF_NEEDRESCHED; 1617 1618 ke->ke_ksegrp->kg_runq_kses++; 1619 ke->ke_state = KES_ONRUNQ; 1620 1621 kseq_runq_add(kseq, ke); 1622 kseq_load_add(kseq, ke); 1623} 1624 1625void 1626sched_rem(struct thread *td) 1627{ 1628 struct kseq *kseq; 1629 struct kse *ke; 1630 1631 ke = td->td_kse; 1632 /* 1633 * It is safe to just return here because sched_rem() is only ever 1634 * used in places where we're immediately going to add the 1635 * kse back on again. In that case it'll be added with the correct 1636 * thread and priority when the caller drops the sched_lock. 1637 */ 1638 if (ke->ke_flags & KEF_ASSIGNED) 1639 return; 1640 mtx_assert(&sched_lock, MA_OWNED); 1641 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); 1642 1643 ke->ke_state = KES_THREAD; 1644 ke->ke_ksegrp->kg_runq_kses--; 1645 kseq = KSEQ_CPU(ke->ke_cpu); 1646 kseq_runq_rem(kseq, ke); 1647 kseq_load_rem(kseq, ke); 1648} 1649 1650fixpt_t 1651sched_pctcpu(struct thread *td) 1652{ 1653 fixpt_t pctcpu; 1654 struct kse *ke; 1655 1656 pctcpu = 0; 1657 ke = td->td_kse; 1658 if (ke == NULL) 1659 return (0); 1660 1661 mtx_lock_spin(&sched_lock); 1662 if (ke->ke_ticks) { 1663 int rtick; 1664 1665 /* 1666 * Don't update more frequently than twice a second. Allowing 1667 * this causes the cpu usage to decay away too quickly due to 1668 * rounding errors. 1669 */ 1670 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1671 ke->ke_ltick < (ticks - (hz / 2))) 1672 sched_pctcpu_update(ke); 1673 /* How many rtick per second ? */ 1674 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1675 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1676 } 1677 1678 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1679 mtx_unlock_spin(&sched_lock); 1680 1681 return (pctcpu); 1682} 1683 1684void 1685sched_bind(struct thread *td, int cpu) 1686{ 1687 struct kse *ke; 1688 1689 mtx_assert(&sched_lock, MA_OWNED); 1690 ke = td->td_kse; 1691 ke->ke_flags |= KEF_BOUND; 1692#ifdef SMP 1693 if (PCPU_GET(cpuid) == cpu) 1694 return; 1695 /* sched_rem without the runq_remove */ 1696 ke->ke_state = KES_THREAD; 1697 ke->ke_ksegrp->kg_runq_kses--; 1698 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1699 ke->ke_cpu = cpu; 1700 kseq_notify(ke, cpu); 1701 /* When we return from mi_switch we'll be on the correct cpu. */ 1702 td->td_proc->p_stats->p_ru.ru_nvcsw++; 1703 mi_switch(); 1704#endif 1705} 1706 1707void 1708sched_unbind(struct thread *td) 1709{ 1710 mtx_assert(&sched_lock, MA_OWNED); 1711 td->td_kse->ke_flags &= ~KEF_BOUND; 1712} 1713 1714int 1715sched_sizeof_kse(void) 1716{ 1717 return (sizeof(struct kse) + sizeof(struct ke_sched)); 1718} 1719 1720int 1721sched_sizeof_ksegrp(void) 1722{ 1723 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1724} 1725 1726int 1727sched_sizeof_proc(void) 1728{ 1729 return (sizeof(struct proc)); 1730} 1731 1732int 1733sched_sizeof_thread(void) 1734{ 1735 return (sizeof(struct thread) + sizeof(struct td_sched)); 1736} 1737