1/*- 2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 11 unchanged lines hidden (view full) --- 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> |
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121896 2003-11-02 10:56:48Z jeff $"); |
29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/lock.h> 35#include <sys/mutex.h> 36#include <sys/proc.h> --- 167 unchanged lines hidden (view full) --- 204 205#define KSEQ_NCLASS (PRI_IDLE + 1) /* Number of run classes. */ 206 207struct kseq { 208 struct runq ksq_idle; /* Queue of IDLE threads. */ 209 struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 210 struct runq *ksq_next; /* Next timeshare queue. */ 211 struct runq *ksq_curr; /* Current queue. */ |
212 int ksq_load_timeshare; /* Load for timeshare. */ |
213 int ksq_load; /* Aggregate load. */ 214 short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 215 short ksq_nicemin; /* Least nice. */ 216#ifdef SMP |
217 int ksq_load_transferable; /* kses that may be migrated. */ |
218 unsigned int ksq_rslices; /* Slices on run queue */ 219 int ksq_cpus; /* Count of CPUs in this kseq. */ 220 struct kse *ksq_assigned; /* KSEs assigned by another CPU. */ 221#endif 222}; 223 224/* 225 * One kse queue per processor. --- 32 unchanged lines hidden (view full) --- 258static struct kse *runq_steal(struct runq *rq); 259static struct kseq *kseq_load_highest(void); 260static void kseq_balance(void *arg); 261static void kseq_move(struct kseq *from, int cpu); 262static int kseq_find(void); 263static void kseq_notify(struct kse *ke, int cpu); 264static void kseq_assign(struct kseq *); 265static struct kse *kseq_steal(struct kseq *kseq); |
266#define KSE_CAN_MIGRATE(ke, class) ((class) != PRI_ITHD) |
267#endif 268 269void 270kseq_print(int cpu) 271{ 272 struct kseq *kseq; 273 int i; 274 275 kseq = KSEQ_CPU(cpu); 276 277 printf("kseq:\n"); 278 printf("\tload: %d\n", kseq->ksq_load); |
279 printf("\tload REALTIME: %d\n", kseq->ksq_load_timeshare); 280#ifdef SMP 281 printf("\tload transferable: %d\n", kseq->ksq_load_transferable); 282#endif |
283 printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 284 printf("\tnice counts:\n"); 285 for (i = 0; i < SCHED_PRI_NRESV; i++) 286 if (kseq->ksq_nice[i]) 287 printf("\t\t%d = %d\n", 288 i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 289} 290 291static void 292kseq_add(struct kseq *kseq, struct kse *ke) 293{ |
294 int class; |
295 mtx_assert(&sched_lock, MA_OWNED); |
296 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 297 if (class == PRI_TIMESHARE) 298 kseq->ksq_load_timeshare++; 299#ifdef SMP 300 if (KSE_CAN_MIGRATE(ke, class)) 301 kseq->ksq_load_transferable++; 302 kseq->ksq_rslices += ke->ke_slice; 303#endif |
304 kseq->ksq_load++; 305 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 306 CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 307 ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 308 ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin); 309 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 310 kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice); |
311} 312 313static void 314kseq_rem(struct kseq *kseq, struct kse *ke) 315{ |
316 int class; |
317 mtx_assert(&sched_lock, MA_OWNED); |
318 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 319 if (class == PRI_TIMESHARE) 320 kseq->ksq_load_timeshare--; 321#ifdef SMP 322 if (KSE_CAN_MIGRATE(ke, class)) 323 kseq->ksq_load_transferable--; 324 kseq->ksq_rslices -= ke->ke_slice; 325#endif |
326 kseq->ksq_load--; 327 ke->ke_runq = NULL; 328 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 329 kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice); |
330} 331 332static void 333kseq_nice_add(struct kseq *kseq, int nice) 334{ 335 mtx_assert(&sched_lock, MA_OWNED); 336 /* Normalize to zero. */ 337 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; |
338 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) |
339 kseq->ksq_nicemin = nice; 340} 341 342static void 343kseq_nice_rem(struct kseq *kseq, int nice) 344{ 345 int n; 346 --- 5 unchanged lines hidden (view full) --- 352 353 /* 354 * If this wasn't the smallest nice value or there are more in 355 * this bucket we can just return. Otherwise we have to recalculate 356 * the smallest nice. 357 */ 358 if (nice != kseq->ksq_nicemin || 359 kseq->ksq_nice[n] != 0 || |
360 kseq->ksq_load_timeshare == 0) |
361 return; 362 363 for (; n < SCHED_PRI_NRESV; n++) 364 if (kseq->ksq_nice[n]) { 365 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 366 return; 367 } 368} --- 47 unchanged lines hidden (view full) --- 416 if (low_load == -1 || kseq->ksq_load < low_load) { 417 low_load = kseq->ksq_load; 418 low_cpu = i; 419 } 420 } 421 422 kseq = KSEQ_CPU(high_cpu); 423 |
424 high_load = kseq->ksq_load_transferable; |
425 /* 426 * Nothing to do. 427 */ 428 if (high_load < kseq->ksq_cpus + 1) 429 goto out; 430 431 high_load -= kseq->ksq_cpus; 432 --- 33 unchanged lines hidden (view full) --- 466 kseq = KSEQ_CPU(i); 467 if (kseq->ksq_load > load) { 468 load = kseq->ksq_load; 469 cpu = i; 470 } 471 } 472 kseq = KSEQ_CPU(cpu); 473 |
474 if (kseq->ksq_load_transferable > kseq->ksq_cpus) |
475 return (kseq); 476 477 return (NULL); 478} 479 480static void 481kseq_move(struct kseq *from, int cpu) 482{ --- 94 unchanged lines hidden (view full) --- 577 for (word = 0; word < RQB_LEN; word++) { 578 if (rqb->rqb_bits[word] == 0) 579 continue; 580 for (bit = 0; bit < RQB_BPW; bit++) { 581 if ((rqb->rqb_bits[word] & (1 << bit)) == 0) 582 continue; 583 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 584 TAILQ_FOREACH(ke, rqh, ke_procq) { |
585 if (KSE_CAN_MIGRATE(ke, 586 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) |
587 return (ke); 588 } 589 } 590 } 591 return (NULL); 592} 593 594static struct kse * --- 54 unchanged lines hidden (view full) --- 649} 650 651static void 652kseq_setup(struct kseq *kseq) 653{ 654 runq_init(&kseq->ksq_timeshare[0]); 655 runq_init(&kseq->ksq_timeshare[1]); 656 runq_init(&kseq->ksq_idle); |
657 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 658 kseq->ksq_next = &kseq->ksq_timeshare[1]; |
659 kseq->ksq_load = 0; |
660 kseq->ksq_load_timeshare = 0; |
661#ifdef SMP |
662 kseq->ksq_load_transferable = 0; |
663 kseq->ksq_rslices = 0; 664 kseq->ksq_assigned = NULL; 665#endif 666} 667 668static void 669sched_setup(void *dummy) 670{ --- 103 unchanged lines hidden (view full) --- 774 * run queue. The exception to this is nice 0 ksegs when 775 * a nice -20 is running. They are always granted a minimum 776 * slice. 777 */ 778 if (!SCHED_INTERACTIVE(kg)) { 779 int nice; 780 781 nice = kg->kg_nice + (0 - kseq->ksq_nicemin); |
782 if (kseq->ksq_load_timeshare == 0 || |
783 kg->kg_nice < kseq->ksq_nicemin) 784 ke->ke_slice = SCHED_SLICE_MAX; 785 else if (nice <= SCHED_SLICE_NTHRESH) 786 ke->ke_slice = SCHED_SLICE_NICE(nice); 787 else if (kg->kg_nice == 0) 788 ke->ke_slice = SCHED_SLICE_MIN; 789 else 790 ke->ke_slice = 0; 791 } else 792 ke->ke_slice = SCHED_SLICE_MIN; 793 794 CTR6(KTR_ULE, 795 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 796 ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin, |
797 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); |
798 799 return; 800} 801 802/* 803 * This routine enforces a maximum limit on the amount of scheduling history 804 * kept. It is called after either the slptime or runtime is adjusted. 805 * This routine will not operate correctly when slp or run times have been --- 320 unchanged lines hidden (view full) --- 1126{ 1127} 1128 1129void 1130sched_class(struct ksegrp *kg, int class) 1131{ 1132 struct kseq *kseq; 1133 struct kse *ke; |
1134 int nclass; 1135 int oclass; |
1136 1137 mtx_assert(&sched_lock, MA_OWNED); 1138 if (kg->kg_pri_class == class) 1139 return; 1140 |
1141 nclass = PRI_BASE(class); 1142 oclass = PRI_BASE(kg->kg_pri_class); |
1143 FOREACH_KSE_IN_GROUP(kg, ke) { 1144 if (ke->ke_state != KES_ONRUNQ && 1145 ke->ke_state != KES_THREAD) 1146 continue; 1147 kseq = KSEQ_CPU(ke->ke_cpu); 1148 |
1149#ifdef SMP 1150 if (KSE_CAN_MIGRATE(ke, oclass)) 1151 kseq->ksq_load_transferable--; 1152 if (KSE_CAN_MIGRATE(ke, nclass)) 1153 kseq->ksq_load_transferable++; 1154#endif 1155 if (oclass == PRI_TIMESHARE) 1156 kseq->ksq_load_timeshare--; 1157 if (nclass == PRI_TIMESHARE) 1158 kseq->ksq_load_timeshare++; |
1159 1160 if (kg->kg_pri_class == PRI_TIMESHARE) 1161 kseq_nice_rem(kseq, kg->kg_nice); 1162 else if (class == PRI_TIMESHARE) 1163 kseq_nice_add(kseq, kg->kg_nice); 1164 } 1165 1166 kg->kg_pri_class = class; --- 251 unchanged lines hidden (view full) --- 1418 panic("Unknown pri class."); 1419 break; 1420 } 1421#ifdef SMP 1422 /* 1423 * If there are any idle processors, give them our extra load. 1424 */ 1425 if (kseq_idle && class != PRI_ITHD && |
1426 kseq->ksq_load_transferable >= kseq->ksq_cpus) { |
1427 int cpu; 1428 1429 /* 1430 * Multiple cpus could find this bit simultaneously but the 1431 * race shouldn't be terrible. 1432 */ 1433 cpu = ffs(kseq_idle); 1434 if (cpu) { --- 102 unchanged lines hidden --- |