1/*- 2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 22 unchanged lines hidden (view full) --- 31 * 32 * etymology: 33 * ULE is the last three letters in schedule. It owes its name to a 34 * generic user created for a scheduling system by Paul Mikesell at 35 * Isilon Systems and a general lack of creativity on the part of the author. 36 */ 37 38#include <sys/cdefs.h> |
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177435 2008-03-20 05:51:16Z jeff $"); |
40 41#include "opt_hwpmc_hooks.h" 42#include "opt_sched.h" 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/kdb.h> 47#include <sys/kernel.h> --- 30 unchanged lines hidden (view full) --- 78 79#define KTR_ULE 0 80 81/* 82 * Thread scheduler specific section. All fields are protected 83 * by the thread lock. 84 */ 85struct td_sched { |
86 struct runq *ts_runq; /* Run-queue we're queued on. */ 87 short ts_flags; /* TSF_* flags. */ |
88 u_char ts_cpu; /* CPU that we have affinity for. */ 89 int ts_rltick; /* Real last tick, for affinity. */ 90 int ts_slice; /* Ticks of slice remaining. */ 91 u_int ts_slptime; /* Number of ticks we vol. slept */ 92 u_int ts_runtime; /* Number of ticks we were running */ 93 int ts_ltick; /* Last tick that we were running on */ 94 int ts_ftick; /* First tick that we were running on */ 95 int ts_ticks; /* Tick count */ --- 154 unchanged lines hidden (view full) --- 250static void sched_priority(struct thread *); 251static void sched_thread_priority(struct thread *, u_char); 252static int sched_interact_score(struct thread *); 253static void sched_interact_update(struct thread *); 254static void sched_interact_fork(struct thread *); 255static void sched_pctcpu_update(struct td_sched *); 256 257/* Operations on per processor queues */ |
258static struct thread *tdq_choose(struct tdq *); |
259static void tdq_setup(struct tdq *); |
260static void tdq_load_add(struct tdq *, struct thread *); 261static void tdq_load_rem(struct tdq *, struct thread *); 262static __inline void tdq_runq_add(struct tdq *, struct thread *, int); 263static __inline void tdq_runq_rem(struct tdq *, struct thread *); |
264static inline int sched_shouldpreempt(int, int, int); 265void tdq_print(int cpu); 266static void runq_print(struct runq *rq); 267static void tdq_add(struct tdq *, struct thread *, int); 268#ifdef SMP 269static int tdq_move(struct tdq *, struct tdq *); 270static int tdq_idled(struct tdq *); |
271static void tdq_notify(struct tdq *, struct thread *); 272static struct thread *tdq_steal(struct tdq *, int); 273static struct thread *runq_steal(struct runq *, int); 274static int sched_pickcpu(struct thread *, int); |
275static void sched_balance(void); 276static int sched_balance_pair(struct tdq *, struct tdq *); |
277static inline struct tdq *sched_setcpu(struct thread *, int, int); |
278static inline struct mtx *thread_block_switch(struct thread *); 279static inline void thread_unblock_switch(struct thread *, struct mtx *); 280static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); 281#endif 282 283static void sched_setup(void *dummy); 284SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL); 285 286static void sched_initticks(void *dummy); 287SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, 288 NULL); 289 290/* 291 * Print the threads waiting on a run-queue. 292 */ 293static void 294runq_print(struct runq *rq) 295{ 296 struct rqhead *rqh; |
297 struct thread *td; |
298 int pri; 299 int j; 300 int i; 301 302 for (i = 0; i < RQB_LEN; i++) { 303 printf("\t\trunq bits %d 0x%zx\n", 304 i, rq->rq_status.rqb_bits[i]); 305 for (j = 0; j < RQB_BPW; j++) 306 if (rq->rq_status.rqb_bits[i] & (1ul << j)) { 307 pri = j + (i << RQB_L2BPW); 308 rqh = &rq->rq_queues[pri]; |
309 TAILQ_FOREACH(td, rqh, td_runq) { |
310 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n", |
311 td, td->td_name, td->td_priority, 312 td->td_rqindex, pri); |
313 } 314 } 315 } 316} 317 318/* 319 * Print the status of a per-cpu thread queue. Should be a ddb show cmd. 320 */ --- 55 unchanged lines hidden (view full) --- 376 377#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS) 378/* 379 * Add a thread to the actual run-queue. Keeps transferable counts up to 380 * date with what is actually on the run-queue. Selects the correct 381 * queue position for timeshare threads. 382 */ 383static __inline void |
384tdq_runq_add(struct tdq *tdq, struct thread *td, int flags) |
385{ |
386 struct td_sched *ts; |
387 u_char pri; 388 389 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
390 THREAD_LOCK_ASSERT(td, MA_OWNED); |
391 |
392 pri = td->td_priority; 393 ts = td->td_sched; 394 TD_SET_RUNQ(td); 395 if (THREAD_CAN_MIGRATE(td)) { |
396 tdq->tdq_transferable++; 397 ts->ts_flags |= TSF_XFERABLE; 398 } |
399 if (pri <= PRI_MAX_REALTIME) { 400 ts->ts_runq = &tdq->tdq_realtime; 401 } else if (pri <= PRI_MAX_TIMESHARE) { 402 ts->ts_runq = &tdq->tdq_timeshare; 403 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE, 404 ("Invalid priority %d on timeshare runq", pri)); 405 /* 406 * This queue contains only priorities between MIN and MAX --- 7 unchanged lines hidden (view full) --- 414 * can have a one slot difference between idx and 415 * ridx while we wait for threads to drain. 416 */ 417 if (tdq->tdq_ridx != tdq->tdq_idx && 418 pri == tdq->tdq_ridx) 419 pri = (unsigned char)(pri - 1) % RQ_NQS; 420 } else 421 pri = tdq->tdq_ridx; |
422 runq_add_pri(ts->ts_runq, td, pri, flags); |
423 return; 424 } else 425 ts->ts_runq = &tdq->tdq_idle; |
426 runq_add(ts->ts_runq, td, flags); |
427} 428 429/* 430 * Remove a thread from a run-queue. This typically happens when a thread 431 * is selected to run. Running threads are not on the queue and the 432 * transferable count does not reflect them. 433 */ 434static __inline void |
435tdq_runq_rem(struct tdq *tdq, struct thread *td) |
436{ |
437 struct td_sched *ts; 438 439 ts = td->td_sched; |
440 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 441 KASSERT(ts->ts_runq != NULL, |
442 ("tdq_runq_remove: thread %p null ts_runq", td)); |
443 if (ts->ts_flags & TSF_XFERABLE) { 444 tdq->tdq_transferable--; 445 ts->ts_flags &= ~TSF_XFERABLE; 446 } 447 if (ts->ts_runq == &tdq->tdq_timeshare) { 448 if (tdq->tdq_idx != tdq->tdq_ridx) |
449 runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx); |
450 else |
451 runq_remove_idx(ts->ts_runq, td, NULL); |
452 } else |
453 runq_remove(ts->ts_runq, td); |
454} 455 456/* 457 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load 458 * for this thread to the referenced thread queue. 459 */ 460static void |
461tdq_load_add(struct tdq *tdq, struct thread *td) |
462{ |
463 struct td_sched *ts; |
464 int class; 465 |
466 ts = td->td_sched; |
467 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
468 THREAD_LOCK_ASSERT(td, MA_OWNED); 469 class = PRI_BASE(td->td_pri_class); |
470 tdq->tdq_load++; 471 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load); |
472 if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) |
473 tdq->tdq_sysload++; 474} 475 476/* 477 * Remove the load from a thread that is transitioning to a sleep state or 478 * exiting. 479 */ 480static void |
481tdq_load_rem(struct tdq *tdq, struct thread *td) |
482{ |
483 struct td_sched *ts; |
484 int class; 485 |
486 ts = td->td_sched; 487 THREAD_LOCK_ASSERT(td, MA_OWNED); |
488 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
489 class = PRI_BASE(td->td_pri_class); 490 if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0) |
491 tdq->tdq_sysload--; 492 KASSERT(tdq->tdq_load != 0, 493 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq))); 494 tdq->tdq_load--; 495 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load); 496} 497 498/* 499 * Set lowpri to its exact value by searching the run-queue and 500 * evaluating curthread. curthread may be passed as an optimization. 501 */ 502static void 503tdq_setlowpri(struct tdq *tdq, struct thread *ctd) 504{ |
505 struct thread *td; 506 507 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 508 if (ctd == NULL) 509 ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread; |
510 td = tdq_choose(tdq); 511 if (td == NULL || td->td_priority > ctd->td_priority) |
512 tdq->tdq_lowpri = ctd->td_priority; 513 else 514 tdq->tdq_lowpri = td->td_priority; 515} 516 517#ifdef SMP 518struct cpu_search { 519 cpumask_t cs_mask; /* Mask of valid cpus. */ --- 324 unchanged lines hidden (view full) --- 844 struct tdq *tdq; 845 int cpu; 846 847 TDQ_LOCK_ASSERT(from, MA_OWNED); 848 TDQ_LOCK_ASSERT(to, MA_OWNED); 849 850 tdq = from; 851 cpu = TDQ_ID(to); |
852 td = tdq_steal(tdq, cpu); 853 if (td == NULL) |
854 return (0); |
855 ts = td->td_sched; |
856 /* 857 * Although the run queue is locked the thread may be blocked. Lock 858 * it to clear this and acquire the run-queue lock. 859 */ 860 thread_lock(td); 861 /* Drop recursive lock on from acquired via thread_lock(). */ 862 TDQ_UNLOCK(from); 863 sched_rem(td); --- 59 unchanged lines hidden (view full) --- 923 spinlock_exit(); 924 return (1); 925} 926 927/* 928 * Notify a remote cpu of new work. Sends an IPI if criteria are met. 929 */ 930static void |
931tdq_notify(struct tdq *tdq, struct thread *td) |
932{ 933 int cpri; 934 int pri; 935 int cpu; 936 937 if (tdq->tdq_ipipending) 938 return; |
939 cpu = td->td_sched->ts_cpu; 940 pri = td->td_priority; |
941 cpri = pcpu_find(cpu)->pc_curthread->td_priority; 942 if (!sched_shouldpreempt(pri, cpri, 1)) 943 return; 944 tdq->tdq_ipipending = 1; 945 ipi_selected(1 << cpu, IPI_PREEMPT); 946} 947 948/* 949 * Steals load from a timeshare queue. Honors the rotating queue head 950 * index. 951 */ |
952static struct thread * |
953runq_steal_from(struct runq *rq, int cpu, u_char start) 954{ |
955 struct rqbits *rqb; 956 struct rqhead *rqh; |
957 struct thread *td; |
958 int first; 959 int bit; 960 int pri; 961 int i; 962 963 rqb = &rq->rq_status; 964 bit = start & (RQB_BPW -1); 965 pri = 0; --- 7 unchanged lines hidden (view full) --- 973 if (rqb->rqb_bits[i] & (1ul << pri)) 974 break; 975 if (pri >= RQB_BPW) 976 continue; 977 } else 978 pri = RQB_FFS(rqb->rqb_bits[i]); 979 pri += (i << RQB_L2BPW); 980 rqh = &rq->rq_queues[pri]; |
981 TAILQ_FOREACH(td, rqh, td_runq) { 982 if (first && THREAD_CAN_MIGRATE(td) && 983 THREAD_CAN_SCHED(td, cpu)) 984 return (td); |
985 first = 1; 986 } 987 } 988 if (start != 0) { 989 start = 0; 990 goto again; 991 } 992 993 return (NULL); 994} 995 996/* 997 * Steals load from a standard linear queue. 998 */ |
999static struct thread * |
1000runq_steal(struct runq *rq, int cpu) 1001{ 1002 struct rqhead *rqh; 1003 struct rqbits *rqb; |
1004 struct thread *td; |
1005 int word; 1006 int bit; 1007 1008 rqb = &rq->rq_status; 1009 for (word = 0; word < RQB_LEN; word++) { 1010 if (rqb->rqb_bits[word] == 0) 1011 continue; 1012 for (bit = 0; bit < RQB_BPW; bit++) { 1013 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 1014 continue; 1015 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; |
1016 TAILQ_FOREACH(td, rqh, td_runq) 1017 if (THREAD_CAN_MIGRATE(td) && 1018 THREAD_CAN_SCHED(td, cpu)) 1019 return (td); |
1020 } 1021 } 1022 return (NULL); 1023} 1024 1025/* 1026 * Attempt to steal a thread in priority order from a thread queue. 1027 */ |
1028static struct thread * |
1029tdq_steal(struct tdq *tdq, int cpu) 1030{ |
1031 struct thread *td; |
1032 1033 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
1034 if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL) 1035 return (td); 1036 if ((td = runq_steal_from(&tdq->tdq_timeshare, 1037 cpu, tdq->tdq_ridx)) != NULL) 1038 return (td); |
1039 return (runq_steal(&tdq->tdq_idle, cpu)); 1040} 1041 1042/* 1043 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the 1044 * current lock and returns with the assigned queue locked. 1045 */ 1046static inline struct tdq * |
1047sched_setcpu(struct thread *td, int cpu, int flags) |
1048{ |
1049 |
1050 struct tdq *tdq; 1051 |
1052 THREAD_LOCK_ASSERT(td, MA_OWNED); |
1053 tdq = TDQ_CPU(cpu); |
1054 td->td_sched->ts_cpu = cpu; 1055 /* 1056 * If the lock matches just return the queue. 1057 */ |
1058 if (td->td_lock == TDQ_LOCKPTR(tdq)) 1059 return (tdq); 1060#ifdef notyet 1061 /* 1062 * If the thread isn't running its lockptr is a 1063 * turnstile or a sleepqueue. We can just lock_set without 1064 * blocking. 1065 */ --- 9 unchanged lines hidden (view full) --- 1075 */ 1076 thread_lock_block(td); 1077 TDQ_LOCK(tdq); 1078 thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); 1079 return (tdq); 1080} 1081 1082static int |
1083sched_pickcpu(struct thread *td, int flags) |
1084{ 1085 struct cpu_group *cg; |
1086 struct td_sched *ts; |
1087 struct tdq *tdq; 1088 cpumask_t mask; 1089 int self; 1090 int pri; 1091 int cpu; 1092 1093 self = PCPU_GET(cpuid); |
1094 ts = td->td_sched; |
1095 if (smp_started == 0) 1096 return (self); 1097 /* 1098 * Don't migrate a running thread from sched_switch(). 1099 */ 1100 if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td)) 1101 return (ts->ts_cpu); 1102 /* --- 37 unchanged lines hidden (view full) --- 1140 KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu.")); 1141 return (cpu); 1142} 1143#endif 1144 1145/* 1146 * Pick the highest priority task we have and return it. 1147 */ |
1148static struct thread * |
1149tdq_choose(struct tdq *tdq) 1150{ |
1151 struct thread *td; |
1152 1153 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
1154 td = runq_choose(&tdq->tdq_realtime); 1155 if (td != NULL) 1156 return (td); 1157 td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx); 1158 if (td != NULL) { 1159 KASSERT(td->td_priority >= PRI_MIN_TIMESHARE, |
1160 ("tdq_choose: Invalid priority on timeshare queue %d", |
1161 td->td_priority)); 1162 return (td); |
1163 } |
1164 td = runq_choose(&tdq->tdq_idle); 1165 if (td != NULL) { 1166 KASSERT(td->td_priority >= PRI_MIN_IDLE, |
1167 ("tdq_choose: Invalid priority on idle queue %d", |
1168 td->td_priority)); 1169 return (td); |
1170 } 1171 1172 return (NULL); 1173} 1174 1175/* 1176 * Initialize a thread queue. 1177 */ --- 55 unchanged lines hidden (view full) --- 1233 */ 1234 realstathz = hz; 1235 sched_slice = (realstathz/10); /* ~100ms */ 1236 tickincr = 1 << SCHED_TICK_SHIFT; 1237 1238 /* Add thread0's load since it's running. */ 1239 TDQ_LOCK(tdq); 1240 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF()); |
1241 tdq_load_add(tdq, &thread0); |
1242 tdq->tdq_lowpri = thread0.td_priority; 1243 TDQ_UNLOCK(tdq); 1244} 1245 1246/* 1247 * This routine determines the tickincr after stathz and hz are setup. 1248 */ 1249/* ARGSUSED */ --- 200 unchanged lines hidden (view full) --- 1450 1451 /* 1452 * Set up the scheduler specific parts of proc0. 1453 */ 1454 proc0.p_sched = NULL; /* XXX */ 1455 thread0.td_sched = &td_sched0; 1456 td_sched0.ts_ltick = ticks; 1457 td_sched0.ts_ftick = ticks; |
1458 td_sched0.ts_slice = sched_slice; 1459} 1460 1461/* 1462 * This is only somewhat accurate since given many processes of the same 1463 * priority they will switch when their slices run out, which will be 1464 * at most sched_slice stathz ticks. 1465 */ --- 211 unchanged lines hidden (view full) --- 1677 */ 1678static struct mtx * 1679sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags) 1680{ 1681 struct tdq *tdn; 1682 1683 tdn = TDQ_CPU(td->td_sched->ts_cpu); 1684#ifdef SMP |
1685 tdq_load_rem(tdq, td); |
1686 /* 1687 * Do the lock dance required to avoid LOR. We grab an extra 1688 * spinlock nesting to prevent preemption while we're 1689 * not holding either run-queue lock. 1690 */ 1691 spinlock_enter(); 1692 thread_block_switch(td); /* This releases the lock on tdq. */ 1693 TDQ_LOCK(tdn); 1694 tdq_add(tdn, td, flags); |
1695 tdq_notify(tdn, td); |
1696 /* 1697 * After we unlock tdn the new cpu still can't switch into this 1698 * thread until we've unblocked it in cpu_switch(). The lock 1699 * pointers may match in the case of HTT cores. Don't unlock here 1700 * or we can deadlock when the other CPU runs the IPI handler. 1701 */ 1702 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) { 1703 TDQ_UNLOCK(tdn); --- 49 unchanged lines hidden (view full) --- 1753 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1754 TD_SET_CAN_RUN(td); 1755 } else if (TD_IS_RUNNING(td)) { 1756 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 1757 srqflag = (flags & SW_PREEMPT) ? 1758 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED : 1759 SRQ_OURSELF|SRQ_YIELDING; 1760 if (ts->ts_cpu == cpuid) |
1761 tdq_runq_add(tdq, td, srqflag); |
1762 else 1763 mtx = sched_switch_migrate(tdq, td, srqflag); 1764 } else { 1765 /* This thread must be going to sleep. */ 1766 TDQ_LOCK(tdq); 1767 mtx = thread_block_switch(td); |
1768 tdq_load_rem(tdq, td); |
1769 } 1770 /* 1771 * We enter here with the thread blocked and assigned to the 1772 * appropriate cpu run-queue or sleep-queue and with the current 1773 * thread-queue locked. 1774 */ 1775 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 1776 newtd = choosethread(); --- 129 unchanged lines hidden (view full) --- 1906 THREAD_LOCK_ASSERT(td, MA_OWNED); 1907 /* 1908 * Initialize child. 1909 */ 1910 ts = td->td_sched; 1911 ts2 = child->td_sched; 1912 child->td_lock = TDQ_LOCKPTR(TDQ_SELF()); 1913 child->td_cpuset = cpuset_ref(td->td_cpuset); |
1914 ts2->ts_cpu = ts->ts_cpu; 1915 ts2->ts_flags = 0; 1916 /* 1917 * Grab our parents cpu estimation information and priority. 1918 */ 1919 ts2->ts_ticks = ts->ts_ticks; 1920 ts2->ts_ltick = ts->ts_ltick; 1921 ts2->ts_ftick = ts->ts_ftick; --- 208 unchanged lines hidden (view full) --- 2130/* 2131 * Choose the highest priority thread to run. The thread is removed from 2132 * the run-queue while running however the load remains. For SMP we set 2133 * the tdq in the global idle bitmask if it idles here. 2134 */ 2135struct thread * 2136sched_choose(void) 2137{ |
2138 struct thread *td; |
2139 struct tdq *tdq; 2140 2141 tdq = TDQ_SELF(); 2142 TDQ_LOCK_ASSERT(tdq, MA_OWNED); |
2143 td = tdq_choose(tdq); 2144 if (td) { 2145 td->td_sched->ts_ltick = ticks; 2146 tdq_runq_rem(tdq, td); 2147 return (td); |
2148 } 2149 return (PCPU_GET(idlethread)); 2150} 2151 2152/* 2153 * Set owepreempt if necessary. Preemption never happens directly in ULE, 2154 * we always request it once we exit a critical section. 2155 */ --- 21 unchanged lines hidden (view full) --- 2177/* 2178 * Add a thread to a thread queue. Select the appropriate runq and add the 2179 * thread to it. This is the internal function called when the tdq is 2180 * predetermined. 2181 */ 2182void 2183tdq_add(struct tdq *tdq, struct thread *td, int flags) 2184{ |
2185 2186 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2187 KASSERT((td->td_inhibitors == 0), 2188 ("sched_add: trying to run inhibited thread")); 2189 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), 2190 ("sched_add: bad thread state")); 2191 KASSERT(td->td_flags & TDF_INMEM, 2192 ("sched_add: thread swapped out")); 2193 |
2194 if (td->td_priority < tdq->tdq_lowpri) 2195 tdq->tdq_lowpri = td->td_priority; |
2196 tdq_runq_add(tdq, td, flags); 2197 tdq_load_add(tdq, td); |
2198} 2199 2200/* 2201 * Select the target thread queue and add a thread to it. Request 2202 * preemption or IPI a remote processor if required. 2203 */ 2204void 2205sched_add(struct thread *td, int flags) 2206{ 2207 struct tdq *tdq; 2208#ifdef SMP |
2209 int cpu; 2210#endif 2211 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)", 2212 td, td->td_name, td->td_priority, curthread, 2213 curthread->td_name); 2214 THREAD_LOCK_ASSERT(td, MA_OWNED); 2215 /* 2216 * Recalculate the priority before we select the target cpu or 2217 * run-queue. 2218 */ 2219 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 2220 sched_priority(td); 2221#ifdef SMP 2222 /* 2223 * Pick the destination cpu and if it isn't ours transfer to the 2224 * target cpu. 2225 */ |
2226 cpu = sched_pickcpu(td, flags); 2227 tdq = sched_setcpu(td, cpu, flags); |
2228 tdq_add(tdq, td, flags); 2229 if (cpu != PCPU_GET(cpuid)) { |
2230 tdq_notify(tdq, td); |
2231 return; 2232 } 2233#else 2234 tdq = TDQ_SELF(); 2235 TDQ_LOCK(tdq); 2236 /* 2237 * Now that the thread is moving to the run-queue, set the lock 2238 * to the scheduler's lock. --- 9 unchanged lines hidden (view full) --- 2248 * Remove a thread from a run-queue without running it. This is used 2249 * when we're stealing a thread from a remote queue. Otherwise all threads 2250 * exit by calling sched_exit_thread() and sched_throw() themselves. 2251 */ 2252void 2253sched_rem(struct thread *td) 2254{ 2255 struct tdq *tdq; |
2256 2257 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)", 2258 td, td->td_name, td->td_priority, curthread, 2259 curthread->td_name); |
2260 tdq = TDQ_CPU(td->td_sched->ts_cpu); |
2261 TDQ_LOCK_ASSERT(tdq, MA_OWNED); 2262 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2263 KASSERT(TD_ON_RUNQ(td), 2264 ("sched_rem: thread not on run queue")); |
2265 tdq_runq_rem(tdq, td); 2266 tdq_load_rem(tdq, td); |
2267 TD_SET_CAN_RUN(td); 2268 if (td->td_priority == tdq->tdq_lowpri) 2269 tdq_setlowpri(tdq, NULL); 2270} 2271 2272/* 2273 * Fetch cpu utilization information. Updates on demand. 2274 */ --- 43 unchanged lines hidden (view full) --- 2318 if (!THREAD_CAN_MIGRATE(td)) 2319 return; 2320 /* 2321 * Assign the new cpu and force a switch before returning to 2322 * userspace. If the target thread is not running locally send 2323 * an ipi to force the issue. 2324 */ 2325 cpu = ts->ts_cpu; |
2326 ts->ts_cpu = sched_pickcpu(td, 0); |
2327 if (cpu != PCPU_GET(cpuid)) 2328 ipi_selected(1 << cpu, IPI_PREEMPT); 2329#endif 2330} 2331 2332/* 2333 * Bind a thread to a target cpu. 2334 */ --- 115 unchanged lines hidden (view full) --- 2450 2451 tdq = TDQ_SELF(); 2452 if (td == NULL) { 2453 /* Correct spinlock nesting and acquire the correct lock. */ 2454 TDQ_LOCK(tdq); 2455 spinlock_exit(); 2456 } else { 2457 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); |
2458 tdq_load_rem(tdq, td); |
2459 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object); 2460 } 2461 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 2462 newtd = choosethread(); 2463 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd; 2464 PCPU_SET(switchtime, cpu_ticks()); 2465 PCPU_SET(switchticks, ticks); 2466 cpu_throw(td, newtd); /* doesn't return */ --- 21 unchanged lines hidden (view full) --- 2488 td->td_lock = TDQ_LOCKPTR(tdq); 2489 MPASS(td->td_lock == TDQ_LOCKPTR(tdq)); 2490 td->td_oncpu = cpuid; 2491 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED); 2492 lock_profile_obtain_lock_success( 2493 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__); 2494} 2495 |
2496SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); |
2497SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, 2498 "Scheduler name"); 2499SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, 2500 "Slice size for timeshare threads"); 2501SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, 2502 "Interactivity score threshold"); 2503SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh, 2504 0,"Min priority for preemption, lower priorities have greater precedence"); --- 13 unchanged lines hidden (view full) --- 2518 "Attempts to steal work from other cores before idling"); 2519SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0, 2520 "Minimum load on remote cpu before we'll steal"); 2521#endif 2522 2523/* ps compat. All cpu percentages from ULE are weighted. */ 2524static int ccpu = 0; 2525SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); |