sched_ule.c revision 109971
1/*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/kern/sched_ule.c 109971 2003-01-28 09:30:17Z jeff $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/kernel.h> 32#include <sys/ktr.h> 33#include <sys/lock.h> 34#include <sys/mutex.h> 35#include <sys/proc.h> 36#include <sys/sched.h> 37#include <sys/smp.h> 38#include <sys/sx.h> 39#include <sys/sysctl.h> 40#include <sys/sysproto.h> 41#include <sys/vmmeter.h> 42#ifdef DDB 43#include <ddb/ddb.h> 44#endif 45#ifdef KTRACE 46#include <sys/uio.h> 47#include <sys/ktrace.h> 48#endif 49 50#include <machine/cpu.h> 51 52/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 53/* XXX This is bogus compatability crap for ps */ 54static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 55SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 56 57static void sched_setup(void *dummy); 58SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 59 60/* 61 * These datastructures are allocated within their parent datastructure but 62 * are scheduler specific. 63 */ 64 65struct ke_sched { 66 int ske_slice; 67 struct runq *ske_runq; 68 /* The following variables are only used for pctcpu calculation */ 69 int ske_ltick; /* Last tick that we were running on */ 70 int ske_ftick; /* First tick that we were running on */ 71 int ske_ticks; /* Tick count */ 72}; 73#define ke_slice ke_sched->ske_slice 74#define ke_runq ke_sched->ske_runq 75#define ke_ltick ke_sched->ske_ltick 76#define ke_ftick ke_sched->ske_ftick 77#define ke_ticks ke_sched->ske_ticks 78 79struct kg_sched { 80 int skg_slptime; 81}; 82#define kg_slptime kg_sched->skg_slptime 83 84struct td_sched { 85 int std_slptime; 86}; 87#define td_slptime td_sched->std_slptime 88 89struct ke_sched ke_sched; 90struct kg_sched kg_sched; 91struct td_sched td_sched; 92 93struct ke_sched *kse0_sched = &ke_sched; 94struct kg_sched *ksegrp0_sched = &kg_sched; 95struct p_sched *proc0_sched = NULL; 96struct td_sched *thread0_sched = &td_sched; 97 98/* 99 * This priority range has 20 priorities on either end that are reachable 100 * only through nice values. 101 */ 102#define SCHED_PRI_NRESV 40 103#define SCHED_PRI_RANGE ((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) - \ 104 SCHED_PRI_NRESV) 105 106/* 107 * These determine how sleep time effects the priority of a process. 108 * 109 * SLP_MAX: Maximum amount of accrued sleep time. 110 * SLP_SCALE: Scale the number of ticks slept across the dynamic priority 111 * range. 112 * SLP_TOPRI: Convert a number of ticks slept into a priority value. 113 * SLP_DECAY: Reduce the sleep time to 50% for every granted slice. 114 */ 115#define SCHED_SLP_MAX (hz * 2) 116#define SCHED_SLP_SCALE(slp) (((slp) * SCHED_PRI_RANGE) / SCHED_SLP_MAX) 117#define SCHED_SLP_TOPRI(slp) (SCHED_PRI_RANGE - SCHED_SLP_SCALE((slp)) + \ 118 SCHED_PRI_NRESV / 2) 119#define SCHED_SLP_DECAY(slp) ((slp) / 2) /* XXX Multiple kses break */ 120 121/* 122 * These parameters and macros determine the size of the time slice that is 123 * granted to each thread. 124 * 125 * SLICE_MIN: Minimum time slice granted, in units of ticks. 126 * SLICE_MAX: Maximum time slice granted. 127 * SLICE_RANGE: Range of available time slices scaled by hz. 128 * SLICE_SCALE: The number slices granted per unit of pri or slp. 129 * PRI_TOSLICE: Compute a slice size that is proportional to the priority. 130 * SLP_TOSLICE: Compute a slice size that is inversely proportional to the 131 * amount of time slept. (smaller slices for interactive ksegs) 132 * PRI_COMP: This determines what fraction of the actual slice comes from 133 * the slice size computed from the priority. 134 * SLP_COMP: This determines what component of the actual slice comes from 135 * the slize size computed from the sleep time. 136 */ 137#define SCHED_SLICE_MIN (hz / 100) 138#define SCHED_SLICE_MAX (hz / 10) 139#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 140#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 141#define SCHED_PRI_TOSLICE(pri) \ 142 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((pri), SCHED_PRI_RANGE)) 143#define SCHED_SLP_TOSLICE(slp) \ 144 (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((slp), SCHED_SLP_MAX)) 145#define SCHED_SLP_COMP(slice) (((slice) / 5) * 3) /* 60% */ 146#define SCHED_PRI_COMP(slice) (((slice) / 5) * 2) /* 40% */ 147 148/* 149 * This macro determines whether or not the kse belongs on the current or 150 * next run queue. 151 */ 152#define SCHED_CURR(kg) ((kg)->kg_slptime > (hz / 4) || \ 153 (kg)->kg_pri_class != PRI_TIMESHARE) 154 155/* 156 * Cpu percentage computation macros and defines. 157 * 158 * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 159 * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 160 */ 161 162#define SCHED_CPU_TIME 60 163#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 164 165/* 166 * kseq - pair of runqs per processor 167 */ 168 169struct kseq { 170 struct runq ksq_runqs[2]; 171 struct runq *ksq_curr; 172 struct runq *ksq_next; 173 int ksq_load; /* Total runnable */ 174}; 175 176/* 177 * One kse queue per processor. 178 */ 179struct kseq kseq_cpu[MAXCPU]; 180 181static int sched_slice(struct ksegrp *kg); 182static int sched_priority(struct ksegrp *kg); 183void sched_pctcpu_update(struct kse *ke); 184void sched_check_runqs(void); 185int sched_pickcpu(void); 186 187static void 188sched_setup(void *dummy) 189{ 190 int i; 191 192 mtx_lock_spin(&sched_lock); 193 /* init kseqs */ 194 for (i = 0; i < MAXCPU; i++) { 195 kseq_cpu[i].ksq_load = 0; 196 kseq_cpu[i].ksq_curr = &kseq_cpu[i].ksq_runqs[0]; 197 kseq_cpu[i].ksq_next = &kseq_cpu[i].ksq_runqs[1]; 198 runq_init(kseq_cpu[i].ksq_curr); 199 runq_init(kseq_cpu[i].ksq_next); 200 } 201 mtx_unlock_spin(&sched_lock); 202} 203 204/* 205 * Scale the scheduling priority according to the "interactivity" of this 206 * process. 207 */ 208static int 209sched_priority(struct ksegrp *kg) 210{ 211 int pri; 212 213 if (kg->kg_pri_class != PRI_TIMESHARE) 214 return (kg->kg_user_pri); 215 216 pri = SCHED_SLP_TOPRI(kg->kg_slptime); 217 CTR2(KTR_RUNQ, "sched_priority: slptime: %d\tpri: %d", 218 kg->kg_slptime, pri); 219 220 pri += PRI_MIN_TIMESHARE; 221 pri += kg->kg_nice; 222 223 if (pri > PRI_MAX_TIMESHARE) 224 pri = PRI_MAX_TIMESHARE; 225 else if (pri < PRI_MIN_TIMESHARE) 226 pri = PRI_MIN_TIMESHARE; 227 228 kg->kg_user_pri = pri; 229 230 return (kg->kg_user_pri); 231} 232 233/* 234 * Calculate a time slice based on the process priority. 235 */ 236static int 237sched_slice(struct ksegrp *kg) 238{ 239 int pslice; 240 int sslice; 241 int slice; 242 int pri; 243 244 pri = kg->kg_user_pri; 245 pri -= PRI_MIN_TIMESHARE; 246 pslice = SCHED_PRI_TOSLICE(pri); 247 sslice = SCHED_SLP_TOSLICE(kg->kg_slptime); 248 slice = SCHED_SLP_COMP(sslice) + SCHED_PRI_COMP(pslice); 249 kg->kg_slptime = SCHED_SLP_DECAY(kg->kg_slptime); 250 251 CTR4(KTR_RUNQ, 252 "sched_slice: pri: %d\tsslice: %d\tpslice: %d\tslice: %d", 253 pri, sslice, pslice, slice); 254 255 if (slice < SCHED_SLICE_MIN) 256 slice = SCHED_SLICE_MIN; 257 else if (slice > SCHED_SLICE_MAX) 258 slice = SCHED_SLICE_MAX; 259 260 return (slice); 261} 262 263int 264sched_rr_interval(void) 265{ 266 return (SCHED_SLICE_MAX); 267} 268 269void 270sched_pctcpu_update(struct kse *ke) 271{ 272 /* 273 * Adjust counters and watermark for pctcpu calc. 274 */ 275 ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) * 276 SCHED_CPU_TICKS; 277 ke->ke_ltick = ticks; 278 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 279} 280 281#ifdef SMP 282int 283sched_pickcpu(void) 284{ 285 int cpu; 286 int load; 287 int i; 288 289 if (!smp_started) 290 return (0); 291 292 cpu = PCPU_GET(cpuid); 293 load = kseq_cpu[cpu].ksq_load; 294 295 for (i = 0; i < mp_maxid; i++) { 296 if (CPU_ABSENT(i)) 297 continue; 298 if (kseq_cpu[i].ksq_load < load) { 299 cpu = i; 300 load = kseq_cpu[i].ksq_load; 301 } 302 } 303 304 CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu); 305 return (cpu); 306} 307#else 308int 309sched_pickcpu(void) 310{ 311 return (0); 312} 313#endif 314 315void 316sched_prio(struct thread *td, u_char prio) 317{ 318 struct kse *ke; 319 struct runq *rq; 320 321 mtx_assert(&sched_lock, MA_OWNED); 322 ke = td->td_kse; 323 td->td_priority = prio; 324 325 if (TD_ON_RUNQ(td)) { 326 rq = ke->ke_runq; 327 328 runq_remove(rq, ke); 329 runq_add(rq, ke); 330 } 331} 332 333void 334sched_switchout(struct thread *td) 335{ 336 struct kse *ke; 337 338 mtx_assert(&sched_lock, MA_OWNED); 339 340 ke = td->td_kse; 341 342 td->td_last_kse = ke; 343 td->td_lastcpu = ke->ke_oncpu; 344 ke->ke_flags &= ~KEF_NEEDRESCHED; 345 346 if (TD_IS_RUNNING(td)) { 347 setrunqueue(td); 348 return; 349 } else 350 td->td_kse->ke_runq = NULL; 351 352 /* 353 * We will not be on the run queue. So we must be 354 * sleeping or similar. 355 */ 356 if (td->td_proc->p_flag & P_KSES) 357 kse_reassign(ke); 358} 359 360void 361sched_switchin(struct thread *td) 362{ 363 /* struct kse *ke = td->td_kse; */ 364 mtx_assert(&sched_lock, MA_OWNED); 365 366 td->td_kse->ke_oncpu = PCPU_GET(cpuid); /* XXX */ 367 if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && 368 td->td_priority != td->td_ksegrp->kg_user_pri) 369 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 370} 371 372void 373sched_nice(struct ksegrp *kg, int nice) 374{ 375 struct thread *td; 376 377 kg->kg_nice = nice; 378 sched_priority(kg); 379 FOREACH_THREAD_IN_GROUP(kg, td) { 380 td->td_kse->ke_flags |= KEF_NEEDRESCHED; 381 } 382} 383 384void 385sched_sleep(struct thread *td, u_char prio) 386{ 387 mtx_assert(&sched_lock, MA_OWNED); 388 389 td->td_slptime = ticks; 390 td->td_priority = prio; 391 392 /* 393 * If this is an interactive task clear its queue so it moves back 394 * on to curr when it wakes up. Otherwise let it stay on the queue 395 * that it was assigned to. 396 */ 397 if (SCHED_CURR(td->td_kse->ke_ksegrp)) 398 td->td_kse->ke_runq = NULL; 399} 400 401void 402sched_wakeup(struct thread *td) 403{ 404 struct ksegrp *kg; 405 406 mtx_assert(&sched_lock, MA_OWNED); 407 408 /* 409 * Let the kseg know how long we slept for. This is because process 410 * interactivity behavior is modeled in the kseg. 411 */ 412 kg = td->td_ksegrp; 413 414 if (td->td_slptime) { 415 kg->kg_slptime += ticks - td->td_slptime; 416 if (kg->kg_slptime > SCHED_SLP_MAX) 417 kg->kg_slptime = SCHED_SLP_MAX; 418 td->td_priority = sched_priority(kg); 419 } 420 td->td_slptime = 0; 421 setrunqueue(td); 422 if (td->td_priority < curthread->td_priority) 423 curthread->td_kse->ke_flags |= KEF_NEEDRESCHED; 424} 425 426/* 427 * Penalize the parent for creating a new child and initialize the child's 428 * priority. 429 */ 430void 431sched_fork(struct ksegrp *kg, struct ksegrp *child) 432{ 433 struct kse *ckse; 434 struct kse *pkse; 435 436 mtx_assert(&sched_lock, MA_OWNED); 437 ckse = FIRST_KSE_IN_KSEGRP(child); 438 pkse = FIRST_KSE_IN_KSEGRP(kg); 439 440 /* XXX Need something better here */ 441 child->kg_slptime = kg->kg_slptime; 442 child->kg_user_pri = kg->kg_user_pri; 443 444 if (pkse->ke_oncpu != PCPU_GET(cpuid)) { 445 printf("pkse->ke_oncpu = %d\n", pkse->ke_oncpu); 446 printf("cpuid = %d", PCPU_GET(cpuid)); 447 Debugger("stop"); 448 } 449 450 ckse->ke_slice = pkse->ke_slice; 451 ckse->ke_oncpu = pkse->ke_oncpu; /* sched_pickcpu(); */ 452 ckse->ke_runq = NULL; 453 /* 454 * Claim that we've been running for one second for statistical 455 * purposes. 456 */ 457 ckse->ke_ticks = 0; 458 ckse->ke_ltick = ticks; 459 ckse->ke_ftick = ticks - hz; 460} 461 462/* 463 * Return some of the child's priority and interactivity to the parent. 464 */ 465void 466sched_exit(struct ksegrp *kg, struct ksegrp *child) 467{ 468 struct kseq *kseq; 469 struct kse *ke; 470 471 /* XXX Need something better here */ 472 mtx_assert(&sched_lock, MA_OWNED); 473 kg->kg_slptime = child->kg_slptime; 474 sched_priority(kg); 475 476 /* 477 * We drop the load here so that the running process leaves us with a 478 * load of at least one. 479 */ 480 ke = FIRST_KSE_IN_KSEGRP(kg); 481 kseq = &kseq_cpu[ke->ke_oncpu]; 482} 483 484int sched_clock_switches; 485 486void 487sched_clock(struct thread *td) 488{ 489 struct kse *ke; 490#if 0 491 struct kse *nke; 492#endif 493 struct ksegrp *kg; 494 struct kseq *kseq; 495 int cpu; 496 497 cpu = PCPU_GET(cpuid); 498 kseq = &kseq_cpu[cpu]; 499 500 mtx_assert(&sched_lock, MA_OWNED); 501 KASSERT((td != NULL), ("schedclock: null thread pointer")); 502 ke = td->td_kse; 503 kg = td->td_ksegrp; 504 505 ke->ke_ticks += 10000; 506 ke->ke_ltick = ticks; 507 /* Go up to one second beyond our max and then trim back down */ 508 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 509 sched_pctcpu_update(ke); 510 511 if (td->td_kse->ke_flags & KEF_IDLEKSE) { 512#if 0 513 if (nke && nke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 514 printf("Idle running with %s on the runq!\n", 515 nke->ke_proc->p_comm); 516 Debugger("stop"); 517 } 518#endif 519 return; 520 } 521#if 0 522 nke = runq_choose(kseq->ksq_curr); 523 524 if (nke && nke->ke_thread && 525 nke->ke_thread->td_priority < td->td_priority) { 526 sched_clock_switches++; 527 ke->ke_flags |= KEF_NEEDRESCHED; 528 } 529#endif 530 531 /* 532 * We used a tick, decrease our total sleep time. This decreases our 533 * "interactivity". 534 */ 535 if (kg->kg_slptime) 536 kg->kg_slptime--; 537 /* 538 * We used up one time slice. 539 */ 540 ke->ke_slice--; 541 /* 542 * We're out of time, recompute priorities and requeue 543 */ 544 if (ke->ke_slice == 0) { 545 struct kseq *kseq; 546 547 kseq = &kseq_cpu[ke->ke_oncpu]; 548 549 td->td_priority = sched_priority(kg); 550 ke->ke_slice = sched_slice(kg); 551 ke->ke_flags |= KEF_NEEDRESCHED; 552 ke->ke_runq = NULL; 553 } 554} 555 556void sched_print_load(void); 557 558void 559sched_print_load(void) 560{ 561 int cpu; 562 563 for (cpu = 0; cpu < mp_maxid; cpu++) { 564 if (CPU_ABSENT(cpu)) 565 continue; 566 printf("%d: %d\n", cpu, kseq_cpu[cpu].ksq_load); 567 } 568} 569 570int 571sched_runnable(void) 572{ 573 struct kseq *kseq; 574 int cpu; 575 576 cpu = PCPU_GET(cpuid); 577 kseq = &kseq_cpu[cpu]; 578 579 if (runq_check(kseq->ksq_curr)) 580 return (1); 581 582 if (runq_check(kseq->ksq_next)) 583 return (1); 584#ifdef SMP 585 if (smp_started) { 586 int i; 587 588 for (i = 0; i < mp_maxid; i++) { 589 if (CPU_ABSENT(i)) 590 continue; 591 if (kseq_cpu[i].ksq_load && i != cpu) 592 return (1); 593 } 594 } 595#endif 596 return (0); 597} 598 599void 600sched_userret(struct thread *td) 601{ 602 struct ksegrp *kg; 603 604 kg = td->td_ksegrp; 605 606 if (td->td_priority != kg->kg_user_pri) { 607 mtx_lock_spin(&sched_lock); 608 td->td_priority = kg->kg_user_pri; 609 mtx_unlock_spin(&sched_lock); 610 } 611} 612 613void 614sched_check_runqs(void) 615{ 616 struct kseq *kseq; 617 int cpu; 618 619 for (cpu = 0; cpu < mp_maxid; cpu++) { 620 if (CPU_ABSENT(cpu)) 621 continue; 622 kseq = &kseq_cpu[cpu]; 623 if (kseq->ksq_load != 624 (runq_depth(kseq->ksq_curr) + runq_depth(kseq->ksq_next))) { 625 printf("CPU: %d\tload: %d\tcurr: %d\tnext: %d\n", 626 cpu, kseq->ksq_load, runq_depth(kseq->ksq_curr), 627 runq_depth(kseq->ksq_next)); 628 Debugger("Imbalance"); 629 } 630 } 631} 632 633struct kse * sched_choose_kseq(struct kseq *kseq); 634 635struct kse * 636sched_choose_kseq(struct kseq *kseq) 637{ 638 struct kse *ke; 639 struct runq *swap; 640 641 if ((ke = runq_choose(kseq->ksq_curr)) == NULL) { 642 swap = kseq->ksq_curr; 643 kseq->ksq_curr = kseq->ksq_next; 644 kseq->ksq_next = swap; 645 ke = runq_choose(kseq->ksq_curr); 646 } 647 648 return (ke); 649} 650 651struct kse * 652sched_choose(void) 653{ 654 struct kse *ke; 655 int cpu; 656 657 cpu = PCPU_GET(cpuid); 658 ke = sched_choose_kseq(&kseq_cpu[cpu]); 659 660 if (ke) { 661 runq_remove(ke->ke_runq, ke); 662 ke->ke_state = KES_THREAD; 663#ifdef SMP 664 kseq_cpu[cpu].ksq_load--; 665#if 0 666 sched_check_runqs(); 667#endif 668#endif 669 } 670 671#ifdef SMP 672 if (ke == NULL && smp_started) { 673 int load; 674 int me; 675 int i; 676 677 me = cpu; 678 679 /* 680 * Find the cpu with the highest load and steal one proc. 681 */ 682 for (load = 0, i = 0; i < mp_maxid; i++) { 683 if (CPU_ABSENT(i) || i == me) 684 continue; 685 if (kseq_cpu[i].ksq_load > load) { 686 load = kseq_cpu[i].ksq_load; 687 cpu = i; 688 } 689 } 690 if (load) { 691 ke = sched_choose_kseq(&kseq_cpu[cpu]); 692 kseq_cpu[cpu].ksq_load--; 693 ke->ke_state = KES_THREAD; 694 runq_remove(ke->ke_runq, ke); 695 ke->ke_runq = NULL; 696 ke->ke_oncpu = me; 697 } 698 699 } 700#endif 701 return (ke); 702} 703 704void 705sched_add(struct kse *ke) 706{ 707 708 mtx_assert(&sched_lock, MA_OWNED); 709 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE")); 710 KASSERT((ke->ke_thread->td_kse != NULL), 711 ("runq_add: No KSE on thread")); 712 KASSERT(ke->ke_state != KES_ONRUNQ, 713 ("runq_add: kse %p (%s) already in run queue", ke, 714 ke->ke_proc->p_comm)); 715 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 716 ("runq_add: process swapped out")); 717 718 719 if (ke->ke_runq == NULL) { 720 struct kseq *kseq; 721 722 kseq = &kseq_cpu[ke->ke_oncpu]; 723 if (SCHED_CURR(ke->ke_ksegrp)) 724 ke->ke_runq = kseq->ksq_curr; 725 else 726 ke->ke_runq = kseq->ksq_next; 727 } 728 ke->ke_ksegrp->kg_runq_kses++; 729 ke->ke_state = KES_ONRUNQ; 730 731 runq_add(ke->ke_runq, ke); 732#ifdef SMP 733 kseq_cpu[ke->ke_oncpu].ksq_load++; 734#if 0 735 sched_check_runqs(); 736#endif 737#endif 738} 739 740void 741sched_rem(struct kse *ke) 742{ 743 mtx_assert(&sched_lock, MA_OWNED); 744 /* KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue")); */ 745 746 runq_remove(ke->ke_runq, ke); 747 ke->ke_runq = NULL; 748 ke->ke_state = KES_THREAD; 749 ke->ke_ksegrp->kg_runq_kses--; 750#ifdef SMP 751 kseq_cpu[ke->ke_oncpu].ksq_load--; 752#if 0 753 sched_check_runqs(); 754#endif 755#endif 756} 757 758fixpt_t 759sched_pctcpu(struct kse *ke) 760{ 761 fixpt_t pctcpu; 762 763 pctcpu = 0; 764 765 if (ke->ke_ticks) { 766 int rtick; 767 768 /* Update to account for time potentially spent sleeping */ 769 ke->ke_ltick = ticks; 770 sched_pctcpu_update(ke); 771 772 /* How many rtick per second ? */ 773 rtick = ke->ke_ticks / (SCHED_CPU_TIME * 10000); 774 pctcpu = (FSCALE * ((FSCALE * rtick)/stathz)) >> FSHIFT; 775 } 776 777 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 778 779 return (pctcpu); 780} 781 782int 783sched_sizeof_kse(void) 784{ 785 return (sizeof(struct kse) + sizeof(struct ke_sched)); 786} 787 788int 789sched_sizeof_ksegrp(void) 790{ 791 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 792} 793 794int 795sched_sizeof_proc(void) 796{ 797 return (sizeof(struct proc)); 798} 799 800int 801sched_sizeof_thread(void) 802{ 803 return (sizeof(struct thread) + sizeof(struct td_sched)); 804} 805