Deleted Added
full compact
sched_ule.c (177426) sched_ule.c (177435)
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 22 unchanged lines hidden (view full) ---

31 *
32 * etymology:
33 * ULE is the last three letters in schedule. It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 22 unchanged lines hidden (view full) ---

31 *
32 * etymology:
33 * ULE is the last three letters in schedule. It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177426 2008-03-20 03:06:33Z jeff $");
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177435 2008-03-20 05:51:16Z jeff $");
40
41#include "opt_hwpmc_hooks.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>

--- 30 unchanged lines hidden (view full) ---

78
79#define KTR_ULE 0
80
81/*
82 * Thread scheduler specific section. All fields are protected
83 * by the thread lock.
84 */
85struct td_sched {
40
41#include "opt_hwpmc_hooks.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>

--- 30 unchanged lines hidden (view full) ---

78
79#define KTR_ULE 0
80
81/*
82 * Thread scheduler specific section. All fields are protected
83 * by the thread lock.
84 */
85struct td_sched {
86 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
87 struct thread *ts_thread; /* Active associated thread. */
88 struct runq *ts_runq; /* Run-queue we're queued on. */
89 short ts_flags; /* TSF_* flags. */
86 struct runq *ts_runq; /* Run-queue we're queued on. */
87 short ts_flags; /* TSF_* flags. */
90 u_char ts_rqindex; /* Run queue index. */
91 u_char ts_cpu; /* CPU that we have affinity for. */
92 int ts_rltick; /* Real last tick, for affinity. */
93 int ts_slice; /* Ticks of slice remaining. */
94 u_int ts_slptime; /* Number of ticks we vol. slept */
95 u_int ts_runtime; /* Number of ticks we were running */
96 int ts_ltick; /* Last tick that we were running on */
97 int ts_ftick; /* First tick that we were running on */
98 int ts_ticks; /* Tick count */

--- 154 unchanged lines hidden (view full) ---

253static void sched_priority(struct thread *);
254static void sched_thread_priority(struct thread *, u_char);
255static int sched_interact_score(struct thread *);
256static void sched_interact_update(struct thread *);
257static void sched_interact_fork(struct thread *);
258static void sched_pctcpu_update(struct td_sched *);
259
260/* Operations on per processor queues */
88 u_char ts_cpu; /* CPU that we have affinity for. */
89 int ts_rltick; /* Real last tick, for affinity. */
90 int ts_slice; /* Ticks of slice remaining. */
91 u_int ts_slptime; /* Number of ticks we vol. slept */
92 u_int ts_runtime; /* Number of ticks we were running */
93 int ts_ltick; /* Last tick that we were running on */
94 int ts_ftick; /* First tick that we were running on */
95 int ts_ticks; /* Tick count */

--- 154 unchanged lines hidden (view full) ---

250static void sched_priority(struct thread *);
251static void sched_thread_priority(struct thread *, u_char);
252static int sched_interact_score(struct thread *);
253static void sched_interact_update(struct thread *);
254static void sched_interact_fork(struct thread *);
255static void sched_pctcpu_update(struct td_sched *);
256
257/* Operations on per processor queues */
261static struct td_sched * tdq_choose(struct tdq *);
258static struct thread *tdq_choose(struct tdq *);
262static void tdq_setup(struct tdq *);
259static void tdq_setup(struct tdq *);
263static void tdq_load_add(struct tdq *, struct td_sched *);
264static void tdq_load_rem(struct tdq *, struct td_sched *);
265static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
266static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
260static void tdq_load_add(struct tdq *, struct thread *);
261static void tdq_load_rem(struct tdq *, struct thread *);
262static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
263static __inline void tdq_runq_rem(struct tdq *, struct thread *);
267static inline int sched_shouldpreempt(int, int, int);
268void tdq_print(int cpu);
269static void runq_print(struct runq *rq);
270static void tdq_add(struct tdq *, struct thread *, int);
271#ifdef SMP
272static int tdq_move(struct tdq *, struct tdq *);
273static int tdq_idled(struct tdq *);
264static inline int sched_shouldpreempt(int, int, int);
265void tdq_print(int cpu);
266static void runq_print(struct runq *rq);
267static void tdq_add(struct tdq *, struct thread *, int);
268#ifdef SMP
269static int tdq_move(struct tdq *, struct tdq *);
270static int tdq_idled(struct tdq *);
274static void tdq_notify(struct tdq *, struct td_sched *);
275static struct td_sched *tdq_steal(struct tdq *, int);
276static struct td_sched *runq_steal(struct runq *, int);
277static int sched_pickcpu(struct td_sched *, int);
271static void tdq_notify(struct tdq *, struct thread *);
272static struct thread *tdq_steal(struct tdq *, int);
273static struct thread *runq_steal(struct runq *, int);
274static int sched_pickcpu(struct thread *, int);
278static void sched_balance(void);
279static int sched_balance_pair(struct tdq *, struct tdq *);
275static void sched_balance(void);
276static int sched_balance_pair(struct tdq *, struct tdq *);
280static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
277static inline struct tdq *sched_setcpu(struct thread *, int, int);
281static inline struct mtx *thread_block_switch(struct thread *);
282static inline void thread_unblock_switch(struct thread *, struct mtx *);
283static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
284#endif
285
286static void sched_setup(void *dummy);
287SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
288
289static void sched_initticks(void *dummy);
290SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
291 NULL);
292
293/*
294 * Print the threads waiting on a run-queue.
295 */
296static void
297runq_print(struct runq *rq)
298{
299 struct rqhead *rqh;
278static inline struct mtx *thread_block_switch(struct thread *);
279static inline void thread_unblock_switch(struct thread *, struct mtx *);
280static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
281#endif
282
283static void sched_setup(void *dummy);
284SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
285
286static void sched_initticks(void *dummy);
287SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
288 NULL);
289
290/*
291 * Print the threads waiting on a run-queue.
292 */
293static void
294runq_print(struct runq *rq)
295{
296 struct rqhead *rqh;
300 struct td_sched *ts;
297 struct thread *td;
301 int pri;
302 int j;
303 int i;
304
305 for (i = 0; i < RQB_LEN; i++) {
306 printf("\t\trunq bits %d 0x%zx\n",
307 i, rq->rq_status.rqb_bits[i]);
308 for (j = 0; j < RQB_BPW; j++)
309 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
310 pri = j + (i << RQB_L2BPW);
311 rqh = &rq->rq_queues[pri];
298 int pri;
299 int j;
300 int i;
301
302 for (i = 0; i < RQB_LEN; i++) {
303 printf("\t\trunq bits %d 0x%zx\n",
304 i, rq->rq_status.rqb_bits[i]);
305 for (j = 0; j < RQB_BPW; j++)
306 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
307 pri = j + (i << RQB_L2BPW);
308 rqh = &rq->rq_queues[pri];
312 TAILQ_FOREACH(ts, rqh, ts_procq) {
309 TAILQ_FOREACH(td, rqh, td_runq) {
313 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
310 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
314 ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
311 td, td->td_name, td->td_priority,
312 td->td_rqindex, pri);
315 }
316 }
317 }
318}
319
320/*
321 * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
322 */

--- 55 unchanged lines hidden (view full) ---

378
379#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
380/*
381 * Add a thread to the actual run-queue. Keeps transferable counts up to
382 * date with what is actually on the run-queue. Selects the correct
383 * queue position for timeshare threads.
384 */
385static __inline void
313 }
314 }
315 }
316}
317
318/*
319 * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
320 */

--- 55 unchanged lines hidden (view full) ---

376
377#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
378/*
379 * Add a thread to the actual run-queue. Keeps transferable counts up to
380 * date with what is actually on the run-queue. Selects the correct
381 * queue position for timeshare threads.
382 */
383static __inline void
386tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
384tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
387{
385{
386 struct td_sched *ts;
388 u_char pri;
389
390 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387 u_char pri;
388
389 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
391 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
390 THREAD_LOCK_ASSERT(td, MA_OWNED);
392
391
393 TD_SET_RUNQ(ts->ts_thread);
394 if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
392 pri = td->td_priority;
393 ts = td->td_sched;
394 TD_SET_RUNQ(td);
395 if (THREAD_CAN_MIGRATE(td)) {
395 tdq->tdq_transferable++;
396 ts->ts_flags |= TSF_XFERABLE;
397 }
396 tdq->tdq_transferable++;
397 ts->ts_flags |= TSF_XFERABLE;
398 }
398 pri = ts->ts_thread->td_priority;
399 if (pri <= PRI_MAX_REALTIME) {
400 ts->ts_runq = &tdq->tdq_realtime;
401 } else if (pri <= PRI_MAX_TIMESHARE) {
402 ts->ts_runq = &tdq->tdq_timeshare;
403 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
404 ("Invalid priority %d on timeshare runq", pri));
405 /*
406 * This queue contains only priorities between MIN and MAX

--- 7 unchanged lines hidden (view full) ---

414 * can have a one slot difference between idx and
415 * ridx while we wait for threads to drain.
416 */
417 if (tdq->tdq_ridx != tdq->tdq_idx &&
418 pri == tdq->tdq_ridx)
419 pri = (unsigned char)(pri - 1) % RQ_NQS;
420 } else
421 pri = tdq->tdq_ridx;
399 if (pri <= PRI_MAX_REALTIME) {
400 ts->ts_runq = &tdq->tdq_realtime;
401 } else if (pri <= PRI_MAX_TIMESHARE) {
402 ts->ts_runq = &tdq->tdq_timeshare;
403 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
404 ("Invalid priority %d on timeshare runq", pri));
405 /*
406 * This queue contains only priorities between MIN and MAX

--- 7 unchanged lines hidden (view full) ---

414 * can have a one slot difference between idx and
415 * ridx while we wait for threads to drain.
416 */
417 if (tdq->tdq_ridx != tdq->tdq_idx &&
418 pri == tdq->tdq_ridx)
419 pri = (unsigned char)(pri - 1) % RQ_NQS;
420 } else
421 pri = tdq->tdq_ridx;
422 runq_add_pri(ts->ts_runq, ts, pri, flags);
422 runq_add_pri(ts->ts_runq, td, pri, flags);
423 return;
424 } else
425 ts->ts_runq = &tdq->tdq_idle;
423 return;
424 } else
425 ts->ts_runq = &tdq->tdq_idle;
426 runq_add(ts->ts_runq, ts, flags);
426 runq_add(ts->ts_runq, td, flags);
427}
428
429/*
430 * Remove a thread from a run-queue. This typically happens when a thread
431 * is selected to run. Running threads are not on the queue and the
432 * transferable count does not reflect them.
433 */
434static __inline void
427}
428
429/*
430 * Remove a thread from a run-queue. This typically happens when a thread
431 * is selected to run. Running threads are not on the queue and the
432 * transferable count does not reflect them.
433 */
434static __inline void
435tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
435tdq_runq_rem(struct tdq *tdq, struct thread *td)
436{
436{
437 struct td_sched *ts;
438
439 ts = td->td_sched;
437 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
438 KASSERT(ts->ts_runq != NULL,
440 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
441 KASSERT(ts->ts_runq != NULL,
439 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
442 ("tdq_runq_remove: thread %p null ts_runq", td));
440 if (ts->ts_flags & TSF_XFERABLE) {
441 tdq->tdq_transferable--;
442 ts->ts_flags &= ~TSF_XFERABLE;
443 }
444 if (ts->ts_runq == &tdq->tdq_timeshare) {
445 if (tdq->tdq_idx != tdq->tdq_ridx)
443 if (ts->ts_flags & TSF_XFERABLE) {
444 tdq->tdq_transferable--;
445 ts->ts_flags &= ~TSF_XFERABLE;
446 }
447 if (ts->ts_runq == &tdq->tdq_timeshare) {
448 if (tdq->tdq_idx != tdq->tdq_ridx)
446 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
449 runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
447 else
450 else
448 runq_remove_idx(ts->ts_runq, ts, NULL);
451 runq_remove_idx(ts->ts_runq, td, NULL);
449 } else
452 } else
450 runq_remove(ts->ts_runq, ts);
453 runq_remove(ts->ts_runq, td);
451}
452
453/*
454 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
455 * for this thread to the referenced thread queue.
456 */
457static void
454}
455
456/*
457 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
458 * for this thread to the referenced thread queue.
459 */
460static void
458tdq_load_add(struct tdq *tdq, struct td_sched *ts)
461tdq_load_add(struct tdq *tdq, struct thread *td)
459{
462{
463 struct td_sched *ts;
460 int class;
461
464 int class;
465
466 ts = td->td_sched;
462 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
467 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
463 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
464 class = PRI_BASE(ts->ts_thread->td_pri_class);
468 THREAD_LOCK_ASSERT(td, MA_OWNED);
469 class = PRI_BASE(td->td_pri_class);
465 tdq->tdq_load++;
466 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
470 tdq->tdq_load++;
471 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
467 if (class != PRI_ITHD &&
468 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
472 if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0)
469 tdq->tdq_sysload++;
470}
471
472/*
473 * Remove the load from a thread that is transitioning to a sleep state or
474 * exiting.
475 */
476static void
473 tdq->tdq_sysload++;
474}
475
476/*
477 * Remove the load from a thread that is transitioning to a sleep state or
478 * exiting.
479 */
480static void
477tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
481tdq_load_rem(struct tdq *tdq, struct thread *td)
478{
482{
483 struct td_sched *ts;
479 int class;
480
484 int class;
485
481 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
486 ts = td->td_sched;
487 THREAD_LOCK_ASSERT(td, MA_OWNED);
482 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
488 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
483 class = PRI_BASE(ts->ts_thread->td_pri_class);
484 if (class != PRI_ITHD &&
485 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
489 class = PRI_BASE(td->td_pri_class);
490 if (class != PRI_ITHD && (td->td_proc->p_flag & P_NOLOAD) == 0)
486 tdq->tdq_sysload--;
487 KASSERT(tdq->tdq_load != 0,
488 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
489 tdq->tdq_load--;
490 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
491}
492
493/*
494 * Set lowpri to its exact value by searching the run-queue and
495 * evaluating curthread. curthread may be passed as an optimization.
496 */
497static void
498tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
499{
491 tdq->tdq_sysload--;
492 KASSERT(tdq->tdq_load != 0,
493 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
494 tdq->tdq_load--;
495 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
496}
497
498/*
499 * Set lowpri to its exact value by searching the run-queue and
500 * evaluating curthread. curthread may be passed as an optimization.
501 */
502static void
503tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
504{
500 struct td_sched *ts;
501 struct thread *td;
502
503 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
504 if (ctd == NULL)
505 ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
505 struct thread *td;
506
507 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
508 if (ctd == NULL)
509 ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
506 ts = tdq_choose(tdq);
507 if (ts)
508 td = ts->ts_thread;
509 if (ts == NULL || td->td_priority > ctd->td_priority)
510 td = tdq_choose(tdq);
511 if (td == NULL || td->td_priority > ctd->td_priority)
510 tdq->tdq_lowpri = ctd->td_priority;
511 else
512 tdq->tdq_lowpri = td->td_priority;
513}
514
515#ifdef SMP
516struct cpu_search {
517 cpumask_t cs_mask; /* Mask of valid cpus. */

--- 324 unchanged lines hidden (view full) ---

842 struct tdq *tdq;
843 int cpu;
844
845 TDQ_LOCK_ASSERT(from, MA_OWNED);
846 TDQ_LOCK_ASSERT(to, MA_OWNED);
847
848 tdq = from;
849 cpu = TDQ_ID(to);
512 tdq->tdq_lowpri = ctd->td_priority;
513 else
514 tdq->tdq_lowpri = td->td_priority;
515}
516
517#ifdef SMP
518struct cpu_search {
519 cpumask_t cs_mask; /* Mask of valid cpus. */

--- 324 unchanged lines hidden (view full) ---

844 struct tdq *tdq;
845 int cpu;
846
847 TDQ_LOCK_ASSERT(from, MA_OWNED);
848 TDQ_LOCK_ASSERT(to, MA_OWNED);
849
850 tdq = from;
851 cpu = TDQ_ID(to);
850 ts = tdq_steal(tdq, cpu);
851 if (ts == NULL)
852 td = tdq_steal(tdq, cpu);
853 if (td == NULL)
852 return (0);
854 return (0);
853 td = ts->ts_thread;
855 ts = td->td_sched;
854 /*
855 * Although the run queue is locked the thread may be blocked. Lock
856 * it to clear this and acquire the run-queue lock.
857 */
858 thread_lock(td);
859 /* Drop recursive lock on from acquired via thread_lock(). */
860 TDQ_UNLOCK(from);
861 sched_rem(td);

--- 59 unchanged lines hidden (view full) ---

921 spinlock_exit();
922 return (1);
923}
924
925/*
926 * Notify a remote cpu of new work. Sends an IPI if criteria are met.
927 */
928static void
856 /*
857 * Although the run queue is locked the thread may be blocked. Lock
858 * it to clear this and acquire the run-queue lock.
859 */
860 thread_lock(td);
861 /* Drop recursive lock on from acquired via thread_lock(). */
862 TDQ_UNLOCK(from);
863 sched_rem(td);

--- 59 unchanged lines hidden (view full) ---

923 spinlock_exit();
924 return (1);
925}
926
927/*
928 * Notify a remote cpu of new work. Sends an IPI if criteria are met.
929 */
930static void
929tdq_notify(struct tdq *tdq, struct td_sched *ts)
931tdq_notify(struct tdq *tdq, struct thread *td)
930{
931 int cpri;
932 int pri;
933 int cpu;
934
935 if (tdq->tdq_ipipending)
936 return;
932{
933 int cpri;
934 int pri;
935 int cpu;
936
937 if (tdq->tdq_ipipending)
938 return;
937 cpu = ts->ts_cpu;
938 pri = ts->ts_thread->td_priority;
939 cpu = td->td_sched->ts_cpu;
940 pri = td->td_priority;
939 cpri = pcpu_find(cpu)->pc_curthread->td_priority;
940 if (!sched_shouldpreempt(pri, cpri, 1))
941 return;
942 tdq->tdq_ipipending = 1;
943 ipi_selected(1 << cpu, IPI_PREEMPT);
944}
945
946/*
947 * Steals load from a timeshare queue. Honors the rotating queue head
948 * index.
949 */
941 cpri = pcpu_find(cpu)->pc_curthread->td_priority;
942 if (!sched_shouldpreempt(pri, cpri, 1))
943 return;
944 tdq->tdq_ipipending = 1;
945 ipi_selected(1 << cpu, IPI_PREEMPT);
946}
947
948/*
949 * Steals load from a timeshare queue. Honors the rotating queue head
950 * index.
951 */
950static struct td_sched *
952static struct thread *
951runq_steal_from(struct runq *rq, int cpu, u_char start)
952{
953runq_steal_from(struct runq *rq, int cpu, u_char start)
954{
953 struct td_sched *ts;
954 struct rqbits *rqb;
955 struct rqhead *rqh;
955 struct rqbits *rqb;
956 struct rqhead *rqh;
957 struct thread *td;
956 int first;
957 int bit;
958 int pri;
959 int i;
960
961 rqb = &rq->rq_status;
962 bit = start & (RQB_BPW -1);
963 pri = 0;

--- 7 unchanged lines hidden (view full) ---

971 if (rqb->rqb_bits[i] & (1ul << pri))
972 break;
973 if (pri >= RQB_BPW)
974 continue;
975 } else
976 pri = RQB_FFS(rqb->rqb_bits[i]);
977 pri += (i << RQB_L2BPW);
978 rqh = &rq->rq_queues[pri];
958 int first;
959 int bit;
960 int pri;
961 int i;
962
963 rqb = &rq->rq_status;
964 bit = start & (RQB_BPW -1);
965 pri = 0;

--- 7 unchanged lines hidden (view full) ---

973 if (rqb->rqb_bits[i] & (1ul << pri))
974 break;
975 if (pri >= RQB_BPW)
976 continue;
977 } else
978 pri = RQB_FFS(rqb->rqb_bits[i]);
979 pri += (i << RQB_L2BPW);
980 rqh = &rq->rq_queues[pri];
979 TAILQ_FOREACH(ts, rqh, ts_procq) {
980 if (first && THREAD_CAN_MIGRATE(ts->ts_thread) &&
981 THREAD_CAN_SCHED(ts->ts_thread, cpu))
982 return (ts);
981 TAILQ_FOREACH(td, rqh, td_runq) {
982 if (first && THREAD_CAN_MIGRATE(td) &&
983 THREAD_CAN_SCHED(td, cpu))
984 return (td);
983 first = 1;
984 }
985 }
986 if (start != 0) {
987 start = 0;
988 goto again;
989 }
990
991 return (NULL);
992}
993
994/*
995 * Steals load from a standard linear queue.
996 */
985 first = 1;
986 }
987 }
988 if (start != 0) {
989 start = 0;
990 goto again;
991 }
992
993 return (NULL);
994}
995
996/*
997 * Steals load from a standard linear queue.
998 */
997static struct td_sched *
999static struct thread *
998runq_steal(struct runq *rq, int cpu)
999{
1000 struct rqhead *rqh;
1001 struct rqbits *rqb;
1000runq_steal(struct runq *rq, int cpu)
1001{
1002 struct rqhead *rqh;
1003 struct rqbits *rqb;
1002 struct td_sched *ts;
1004 struct thread *td;
1003 int word;
1004 int bit;
1005
1006 rqb = &rq->rq_status;
1007 for (word = 0; word < RQB_LEN; word++) {
1008 if (rqb->rqb_bits[word] == 0)
1009 continue;
1010 for (bit = 0; bit < RQB_BPW; bit++) {
1011 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
1012 continue;
1013 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
1005 int word;
1006 int bit;
1007
1008 rqb = &rq->rq_status;
1009 for (word = 0; word < RQB_LEN; word++) {
1010 if (rqb->rqb_bits[word] == 0)
1011 continue;
1012 for (bit = 0; bit < RQB_BPW; bit++) {
1013 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
1014 continue;
1015 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
1014 TAILQ_FOREACH(ts, rqh, ts_procq)
1015 if (THREAD_CAN_MIGRATE(ts->ts_thread) &&
1016 THREAD_CAN_SCHED(ts->ts_thread, cpu))
1017 return (ts);
1016 TAILQ_FOREACH(td, rqh, td_runq)
1017 if (THREAD_CAN_MIGRATE(td) &&
1018 THREAD_CAN_SCHED(td, cpu))
1019 return (td);
1018 }
1019 }
1020 return (NULL);
1021}
1022
1023/*
1024 * Attempt to steal a thread in priority order from a thread queue.
1025 */
1020 }
1021 }
1022 return (NULL);
1023}
1024
1025/*
1026 * Attempt to steal a thread in priority order from a thread queue.
1027 */
1026static struct td_sched *
1028static struct thread *
1027tdq_steal(struct tdq *tdq, int cpu)
1028{
1029tdq_steal(struct tdq *tdq, int cpu)
1030{
1029 struct td_sched *ts;
1031 struct thread *td;
1030
1031 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1032
1033 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1032 if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1033 return (ts);
1034 if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx))
1035 != NULL)
1036 return (ts);
1034 if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1035 return (td);
1036 if ((td = runq_steal_from(&tdq->tdq_timeshare,
1037 cpu, tdq->tdq_ridx)) != NULL)
1038 return (td);
1037 return (runq_steal(&tdq->tdq_idle, cpu));
1038}
1039
1040/*
1041 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
1042 * current lock and returns with the assigned queue locked.
1043 */
1044static inline struct tdq *
1039 return (runq_steal(&tdq->tdq_idle, cpu));
1040}
1041
1042/*
1043 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
1044 * current lock and returns with the assigned queue locked.
1045 */
1046static inline struct tdq *
1045sched_setcpu(struct td_sched *ts, int cpu, int flags)
1047sched_setcpu(struct thread *td, int cpu, int flags)
1046{
1048{
1047 struct thread *td;
1049
1048 struct tdq *tdq;
1049
1050 struct tdq *tdq;
1051
1050 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
1051
1052 THREAD_LOCK_ASSERT(td, MA_OWNED);
1052 tdq = TDQ_CPU(cpu);
1053 tdq = TDQ_CPU(cpu);
1053 td = ts->ts_thread;
1054 ts->ts_cpu = cpu;
1055
1056 /* If the lock matches just return the queue. */
1054 td->td_sched->ts_cpu = cpu;
1055 /*
1056 * If the lock matches just return the queue.
1057 */
1057 if (td->td_lock == TDQ_LOCKPTR(tdq))
1058 return (tdq);
1059#ifdef notyet
1060 /*
1061 * If the thread isn't running its lockptr is a
1062 * turnstile or a sleepqueue. We can just lock_set without
1063 * blocking.
1064 */

--- 9 unchanged lines hidden (view full) ---

1074 */
1075 thread_lock_block(td);
1076 TDQ_LOCK(tdq);
1077 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1078 return (tdq);
1079}
1080
1081static int
1058 if (td->td_lock == TDQ_LOCKPTR(tdq))
1059 return (tdq);
1060#ifdef notyet
1061 /*
1062 * If the thread isn't running its lockptr is a
1063 * turnstile or a sleepqueue. We can just lock_set without
1064 * blocking.
1065 */

--- 9 unchanged lines hidden (view full) ---

1075 */
1076 thread_lock_block(td);
1077 TDQ_LOCK(tdq);
1078 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1079 return (tdq);
1080}
1081
1082static int
1082sched_pickcpu(struct td_sched *ts, int flags)
1083sched_pickcpu(struct thread *td, int flags)
1083{
1084 struct cpu_group *cg;
1084{
1085 struct cpu_group *cg;
1085 struct thread *td;
1086 struct td_sched *ts;
1086 struct tdq *tdq;
1087 cpumask_t mask;
1088 int self;
1089 int pri;
1090 int cpu;
1091
1092 self = PCPU_GET(cpuid);
1087 struct tdq *tdq;
1088 cpumask_t mask;
1089 int self;
1090 int pri;
1091 int cpu;
1092
1093 self = PCPU_GET(cpuid);
1093 td = ts->ts_thread;
1094 ts = td->td_sched;
1094 if (smp_started == 0)
1095 return (self);
1096 /*
1097 * Don't migrate a running thread from sched_switch().
1098 */
1099 if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
1100 return (ts->ts_cpu);
1101 /*

--- 37 unchanged lines hidden (view full) ---

1139 KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1140 return (cpu);
1141}
1142#endif
1143
1144/*
1145 * Pick the highest priority task we have and return it.
1146 */
1095 if (smp_started == 0)
1096 return (self);
1097 /*
1098 * Don't migrate a running thread from sched_switch().
1099 */
1100 if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
1101 return (ts->ts_cpu);
1102 /*

--- 37 unchanged lines hidden (view full) ---

1140 KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1141 return (cpu);
1142}
1143#endif
1144
1145/*
1146 * Pick the highest priority task we have and return it.
1147 */
1147static struct td_sched *
1148static struct thread *
1148tdq_choose(struct tdq *tdq)
1149{
1149tdq_choose(struct tdq *tdq)
1150{
1150 struct td_sched *ts;
1151 struct thread *td;
1151
1152 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1152
1153 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1153 ts = runq_choose(&tdq->tdq_realtime);
1154 if (ts != NULL)
1155 return (ts);
1156 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1157 if (ts != NULL) {
1158 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1154 td = runq_choose(&tdq->tdq_realtime);
1155 if (td != NULL)
1156 return (td);
1157 td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1158 if (td != NULL) {
1159 KASSERT(td->td_priority >= PRI_MIN_TIMESHARE,
1159 ("tdq_choose: Invalid priority on timeshare queue %d",
1160 ("tdq_choose: Invalid priority on timeshare queue %d",
1160 ts->ts_thread->td_priority));
1161 return (ts);
1161 td->td_priority));
1162 return (td);
1162 }
1163 }
1163
1164 ts = runq_choose(&tdq->tdq_idle);
1165 if (ts != NULL) {
1166 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1164 td = runq_choose(&tdq->tdq_idle);
1165 if (td != NULL) {
1166 KASSERT(td->td_priority >= PRI_MIN_IDLE,
1167 ("tdq_choose: Invalid priority on idle queue %d",
1167 ("tdq_choose: Invalid priority on idle queue %d",
1168 ts->ts_thread->td_priority));
1169 return (ts);
1168 td->td_priority));
1169 return (td);
1170 }
1171
1172 return (NULL);
1173}
1174
1175/*
1176 * Initialize a thread queue.
1177 */

--- 55 unchanged lines hidden (view full) ---

1233 */
1234 realstathz = hz;
1235 sched_slice = (realstathz/10); /* ~100ms */
1236 tickincr = 1 << SCHED_TICK_SHIFT;
1237
1238 /* Add thread0's load since it's running. */
1239 TDQ_LOCK(tdq);
1240 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1170 }
1171
1172 return (NULL);
1173}
1174
1175/*
1176 * Initialize a thread queue.
1177 */

--- 55 unchanged lines hidden (view full) ---

1233 */
1234 realstathz = hz;
1235 sched_slice = (realstathz/10); /* ~100ms */
1236 tickincr = 1 << SCHED_TICK_SHIFT;
1237
1238 /* Add thread0's load since it's running. */
1239 TDQ_LOCK(tdq);
1240 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1241 tdq_load_add(tdq, &td_sched0);
1241 tdq_load_add(tdq, &thread0);
1242 tdq->tdq_lowpri = thread0.td_priority;
1243 TDQ_UNLOCK(tdq);
1244}
1245
1246/*
1247 * This routine determines the tickincr after stathz and hz are setup.
1248 */
1249/* ARGSUSED */

--- 200 unchanged lines hidden (view full) ---

1450
1451 /*
1452 * Set up the scheduler specific parts of proc0.
1453 */
1454 proc0.p_sched = NULL; /* XXX */
1455 thread0.td_sched = &td_sched0;
1456 td_sched0.ts_ltick = ticks;
1457 td_sched0.ts_ftick = ticks;
1242 tdq->tdq_lowpri = thread0.td_priority;
1243 TDQ_UNLOCK(tdq);
1244}
1245
1246/*
1247 * This routine determines the tickincr after stathz and hz are setup.
1248 */
1249/* ARGSUSED */

--- 200 unchanged lines hidden (view full) ---

1450
1451 /*
1452 * Set up the scheduler specific parts of proc0.
1453 */
1454 proc0.p_sched = NULL; /* XXX */
1455 thread0.td_sched = &td_sched0;
1456 td_sched0.ts_ltick = ticks;
1457 td_sched0.ts_ftick = ticks;
1458 td_sched0.ts_thread = &thread0;
1459 td_sched0.ts_slice = sched_slice;
1460}
1461
1462/*
1463 * This is only somewhat accurate since given many processes of the same
1464 * priority they will switch when their slices run out, which will be
1465 * at most sched_slice stathz ticks.
1466 */

--- 211 unchanged lines hidden (view full) ---

1678 */
1679static struct mtx *
1680sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1681{
1682 struct tdq *tdn;
1683
1684 tdn = TDQ_CPU(td->td_sched->ts_cpu);
1685#ifdef SMP
1458 td_sched0.ts_slice = sched_slice;
1459}
1460
1461/*
1462 * This is only somewhat accurate since given many processes of the same
1463 * priority they will switch when their slices run out, which will be
1464 * at most sched_slice stathz ticks.
1465 */

--- 211 unchanged lines hidden (view full) ---

1677 */
1678static struct mtx *
1679sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1680{
1681 struct tdq *tdn;
1682
1683 tdn = TDQ_CPU(td->td_sched->ts_cpu);
1684#ifdef SMP
1686 tdq_load_rem(tdq, td->td_sched);
1685 tdq_load_rem(tdq, td);
1687 /*
1688 * Do the lock dance required to avoid LOR. We grab an extra
1689 * spinlock nesting to prevent preemption while we're
1690 * not holding either run-queue lock.
1691 */
1692 spinlock_enter();
1693 thread_block_switch(td); /* This releases the lock on tdq. */
1694 TDQ_LOCK(tdn);
1695 tdq_add(tdn, td, flags);
1686 /*
1687 * Do the lock dance required to avoid LOR. We grab an extra
1688 * spinlock nesting to prevent preemption while we're
1689 * not holding either run-queue lock.
1690 */
1691 spinlock_enter();
1692 thread_block_switch(td); /* This releases the lock on tdq. */
1693 TDQ_LOCK(tdn);
1694 tdq_add(tdn, td, flags);
1696 tdq_notify(tdn, td->td_sched);
1695 tdq_notify(tdn, td);
1697 /*
1698 * After we unlock tdn the new cpu still can't switch into this
1699 * thread until we've unblocked it in cpu_switch(). The lock
1700 * pointers may match in the case of HTT cores. Don't unlock here
1701 * or we can deadlock when the other CPU runs the IPI handler.
1702 */
1703 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1704 TDQ_UNLOCK(tdn);

--- 49 unchanged lines hidden (view full) ---

1754 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1755 TD_SET_CAN_RUN(td);
1756 } else if (TD_IS_RUNNING(td)) {
1757 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1758 srqflag = (flags & SW_PREEMPT) ?
1759 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1760 SRQ_OURSELF|SRQ_YIELDING;
1761 if (ts->ts_cpu == cpuid)
1696 /*
1697 * After we unlock tdn the new cpu still can't switch into this
1698 * thread until we've unblocked it in cpu_switch(). The lock
1699 * pointers may match in the case of HTT cores. Don't unlock here
1700 * or we can deadlock when the other CPU runs the IPI handler.
1701 */
1702 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1703 TDQ_UNLOCK(tdn);

--- 49 unchanged lines hidden (view full) ---

1753 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1754 TD_SET_CAN_RUN(td);
1755 } else if (TD_IS_RUNNING(td)) {
1756 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1757 srqflag = (flags & SW_PREEMPT) ?
1758 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1759 SRQ_OURSELF|SRQ_YIELDING;
1760 if (ts->ts_cpu == cpuid)
1762 tdq_runq_add(tdq, ts, srqflag);
1761 tdq_runq_add(tdq, td, srqflag);
1763 else
1764 mtx = sched_switch_migrate(tdq, td, srqflag);
1765 } else {
1766 /* This thread must be going to sleep. */
1767 TDQ_LOCK(tdq);
1768 mtx = thread_block_switch(td);
1762 else
1763 mtx = sched_switch_migrate(tdq, td, srqflag);
1764 } else {
1765 /* This thread must be going to sleep. */
1766 TDQ_LOCK(tdq);
1767 mtx = thread_block_switch(td);
1769 tdq_load_rem(tdq, ts);
1768 tdq_load_rem(tdq, td);
1770 }
1771 /*
1772 * We enter here with the thread blocked and assigned to the
1773 * appropriate cpu run-queue or sleep-queue and with the current
1774 * thread-queue locked.
1775 */
1776 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1777 newtd = choosethread();

--- 129 unchanged lines hidden (view full) ---

1907 THREAD_LOCK_ASSERT(td, MA_OWNED);
1908 /*
1909 * Initialize child.
1910 */
1911 ts = td->td_sched;
1912 ts2 = child->td_sched;
1913 child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1914 child->td_cpuset = cpuset_ref(td->td_cpuset);
1769 }
1770 /*
1771 * We enter here with the thread blocked and assigned to the
1772 * appropriate cpu run-queue or sleep-queue and with the current
1773 * thread-queue locked.
1774 */
1775 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1776 newtd = choosethread();

--- 129 unchanged lines hidden (view full) ---

1906 THREAD_LOCK_ASSERT(td, MA_OWNED);
1907 /*
1908 * Initialize child.
1909 */
1910 ts = td->td_sched;
1911 ts2 = child->td_sched;
1912 child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1913 child->td_cpuset = cpuset_ref(td->td_cpuset);
1915 ts2->ts_thread = child;
1916 ts2->ts_cpu = ts->ts_cpu;
1917 ts2->ts_flags = 0;
1918 /*
1919 * Grab our parents cpu estimation information and priority.
1920 */
1921 ts2->ts_ticks = ts->ts_ticks;
1922 ts2->ts_ltick = ts->ts_ltick;
1923 ts2->ts_ftick = ts->ts_ftick;

--- 208 unchanged lines hidden (view full) ---

2132/*
2133 * Choose the highest priority thread to run. The thread is removed from
2134 * the run-queue while running however the load remains. For SMP we set
2135 * the tdq in the global idle bitmask if it idles here.
2136 */
2137struct thread *
2138sched_choose(void)
2139{
1914 ts2->ts_cpu = ts->ts_cpu;
1915 ts2->ts_flags = 0;
1916 /*
1917 * Grab our parents cpu estimation information and priority.
1918 */
1919 ts2->ts_ticks = ts->ts_ticks;
1920 ts2->ts_ltick = ts->ts_ltick;
1921 ts2->ts_ftick = ts->ts_ftick;

--- 208 unchanged lines hidden (view full) ---

2130/*
2131 * Choose the highest priority thread to run. The thread is removed from
2132 * the run-queue while running however the load remains. For SMP we set
2133 * the tdq in the global idle bitmask if it idles here.
2134 */
2135struct thread *
2136sched_choose(void)
2137{
2140 struct td_sched *ts;
2138 struct thread *td;
2141 struct tdq *tdq;
2142
2143 tdq = TDQ_SELF();
2144 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2139 struct tdq *tdq;
2140
2141 tdq = TDQ_SELF();
2142 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2145 ts = tdq_choose(tdq);
2146 if (ts) {
2147 ts->ts_ltick = ticks;
2148 tdq_runq_rem(tdq, ts);
2149 return (ts->ts_thread);
2143 td = tdq_choose(tdq);
2144 if (td) {
2145 td->td_sched->ts_ltick = ticks;
2146 tdq_runq_rem(tdq, td);
2147 return (td);
2150 }
2151 return (PCPU_GET(idlethread));
2152}
2153
2154/*
2155 * Set owepreempt if necessary. Preemption never happens directly in ULE,
2156 * we always request it once we exit a critical section.
2157 */

--- 21 unchanged lines hidden (view full) ---

2179/*
2180 * Add a thread to a thread queue. Select the appropriate runq and add the
2181 * thread to it. This is the internal function called when the tdq is
2182 * predetermined.
2183 */
2184void
2185tdq_add(struct tdq *tdq, struct thread *td, int flags)
2186{
2148 }
2149 return (PCPU_GET(idlethread));
2150}
2151
2152/*
2153 * Set owepreempt if necessary. Preemption never happens directly in ULE,
2154 * we always request it once we exit a critical section.
2155 */

--- 21 unchanged lines hidden (view full) ---

2177/*
2178 * Add a thread to a thread queue. Select the appropriate runq and add the
2179 * thread to it. This is the internal function called when the tdq is
2180 * predetermined.
2181 */
2182void
2183tdq_add(struct tdq *tdq, struct thread *td, int flags)
2184{
2187 struct td_sched *ts;
2188
2189 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2190 KASSERT((td->td_inhibitors == 0),
2191 ("sched_add: trying to run inhibited thread"));
2192 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2193 ("sched_add: bad thread state"));
2194 KASSERT(td->td_flags & TDF_INMEM,
2195 ("sched_add: thread swapped out"));
2196
2185
2186 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2187 KASSERT((td->td_inhibitors == 0),
2188 ("sched_add: trying to run inhibited thread"));
2189 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2190 ("sched_add: bad thread state"));
2191 KASSERT(td->td_flags & TDF_INMEM,
2192 ("sched_add: thread swapped out"));
2193
2197 ts = td->td_sched;
2198 if (td->td_priority < tdq->tdq_lowpri)
2199 tdq->tdq_lowpri = td->td_priority;
2194 if (td->td_priority < tdq->tdq_lowpri)
2195 tdq->tdq_lowpri = td->td_priority;
2200 tdq_runq_add(tdq, ts, flags);
2201 tdq_load_add(tdq, ts);
2196 tdq_runq_add(tdq, td, flags);
2197 tdq_load_add(tdq, td);
2202}
2203
2204/*
2205 * Select the target thread queue and add a thread to it. Request
2206 * preemption or IPI a remote processor if required.
2207 */
2208void
2209sched_add(struct thread *td, int flags)
2210{
2211 struct tdq *tdq;
2212#ifdef SMP
2198}
2199
2200/*
2201 * Select the target thread queue and add a thread to it. Request
2202 * preemption or IPI a remote processor if required.
2203 */
2204void
2205sched_add(struct thread *td, int flags)
2206{
2207 struct tdq *tdq;
2208#ifdef SMP
2213 struct td_sched *ts;
2214 int cpu;
2215#endif
2216 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2217 td, td->td_name, td->td_priority, curthread,
2218 curthread->td_name);
2219 THREAD_LOCK_ASSERT(td, MA_OWNED);
2220 /*
2221 * Recalculate the priority before we select the target cpu or
2222 * run-queue.
2223 */
2224 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2225 sched_priority(td);
2226#ifdef SMP
2227 /*
2228 * Pick the destination cpu and if it isn't ours transfer to the
2229 * target cpu.
2230 */
2209 int cpu;
2210#endif
2211 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2212 td, td->td_name, td->td_priority, curthread,
2213 curthread->td_name);
2214 THREAD_LOCK_ASSERT(td, MA_OWNED);
2215 /*
2216 * Recalculate the priority before we select the target cpu or
2217 * run-queue.
2218 */
2219 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2220 sched_priority(td);
2221#ifdef SMP
2222 /*
2223 * Pick the destination cpu and if it isn't ours transfer to the
2224 * target cpu.
2225 */
2231 ts = td->td_sched;
2232 cpu = sched_pickcpu(ts, flags);
2233 tdq = sched_setcpu(ts, cpu, flags);
2226 cpu = sched_pickcpu(td, flags);
2227 tdq = sched_setcpu(td, cpu, flags);
2234 tdq_add(tdq, td, flags);
2235 if (cpu != PCPU_GET(cpuid)) {
2228 tdq_add(tdq, td, flags);
2229 if (cpu != PCPU_GET(cpuid)) {
2236 tdq_notify(tdq, ts);
2230 tdq_notify(tdq, td);
2237 return;
2238 }
2239#else
2240 tdq = TDQ_SELF();
2241 TDQ_LOCK(tdq);
2242 /*
2243 * Now that the thread is moving to the run-queue, set the lock
2244 * to the scheduler's lock.

--- 9 unchanged lines hidden (view full) ---

2254 * Remove a thread from a run-queue without running it. This is used
2255 * when we're stealing a thread from a remote queue. Otherwise all threads
2256 * exit by calling sched_exit_thread() and sched_throw() themselves.
2257 */
2258void
2259sched_rem(struct thread *td)
2260{
2261 struct tdq *tdq;
2231 return;
2232 }
2233#else
2234 tdq = TDQ_SELF();
2235 TDQ_LOCK(tdq);
2236 /*
2237 * Now that the thread is moving to the run-queue, set the lock
2238 * to the scheduler's lock.

--- 9 unchanged lines hidden (view full) ---

2248 * Remove a thread from a run-queue without running it. This is used
2249 * when we're stealing a thread from a remote queue. Otherwise all threads
2250 * exit by calling sched_exit_thread() and sched_throw() themselves.
2251 */
2252void
2253sched_rem(struct thread *td)
2254{
2255 struct tdq *tdq;
2262 struct td_sched *ts;
2263
2264 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2265 td, td->td_name, td->td_priority, curthread,
2266 curthread->td_name);
2256
2257 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2258 td, td->td_name, td->td_priority, curthread,
2259 curthread->td_name);
2267 ts = td->td_sched;
2268 tdq = TDQ_CPU(ts->ts_cpu);
2260 tdq = TDQ_CPU(td->td_sched->ts_cpu);
2269 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2270 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2271 KASSERT(TD_ON_RUNQ(td),
2272 ("sched_rem: thread not on run queue"));
2261 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2262 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2263 KASSERT(TD_ON_RUNQ(td),
2264 ("sched_rem: thread not on run queue"));
2273 tdq_runq_rem(tdq, ts);
2274 tdq_load_rem(tdq, ts);
2265 tdq_runq_rem(tdq, td);
2266 tdq_load_rem(tdq, td);
2275 TD_SET_CAN_RUN(td);
2276 if (td->td_priority == tdq->tdq_lowpri)
2277 tdq_setlowpri(tdq, NULL);
2278}
2279
2280/*
2281 * Fetch cpu utilization information. Updates on demand.
2282 */

--- 43 unchanged lines hidden (view full) ---

2326 if (!THREAD_CAN_MIGRATE(td))
2327 return;
2328 /*
2329 * Assign the new cpu and force a switch before returning to
2330 * userspace. If the target thread is not running locally send
2331 * an ipi to force the issue.
2332 */
2333 cpu = ts->ts_cpu;
2267 TD_SET_CAN_RUN(td);
2268 if (td->td_priority == tdq->tdq_lowpri)
2269 tdq_setlowpri(tdq, NULL);
2270}
2271
2272/*
2273 * Fetch cpu utilization information. Updates on demand.
2274 */

--- 43 unchanged lines hidden (view full) ---

2318 if (!THREAD_CAN_MIGRATE(td))
2319 return;
2320 /*
2321 * Assign the new cpu and force a switch before returning to
2322 * userspace. If the target thread is not running locally send
2323 * an ipi to force the issue.
2324 */
2325 cpu = ts->ts_cpu;
2334 ts->ts_cpu = sched_pickcpu(ts, 0);
2326 ts->ts_cpu = sched_pickcpu(td, 0);
2335 if (cpu != PCPU_GET(cpuid))
2336 ipi_selected(1 << cpu, IPI_PREEMPT);
2337#endif
2338}
2339
2340/*
2341 * Bind a thread to a target cpu.
2342 */

--- 115 unchanged lines hidden (view full) ---

2458
2459 tdq = TDQ_SELF();
2460 if (td == NULL) {
2461 /* Correct spinlock nesting and acquire the correct lock. */
2462 TDQ_LOCK(tdq);
2463 spinlock_exit();
2464 } else {
2465 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2327 if (cpu != PCPU_GET(cpuid))
2328 ipi_selected(1 << cpu, IPI_PREEMPT);
2329#endif
2330}
2331
2332/*
2333 * Bind a thread to a target cpu.
2334 */

--- 115 unchanged lines hidden (view full) ---

2450
2451 tdq = TDQ_SELF();
2452 if (td == NULL) {
2453 /* Correct spinlock nesting and acquire the correct lock. */
2454 TDQ_LOCK(tdq);
2455 spinlock_exit();
2456 } else {
2457 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2466 tdq_load_rem(tdq, td->td_sched);
2458 tdq_load_rem(tdq, td);
2467 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2468 }
2469 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2470 newtd = choosethread();
2471 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2472 PCPU_SET(switchtime, cpu_ticks());
2473 PCPU_SET(switchticks, ticks);
2474 cpu_throw(td, newtd); /* doesn't return */

--- 21 unchanged lines hidden (view full) ---

2496 td->td_lock = TDQ_LOCKPTR(tdq);
2497 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2498 td->td_oncpu = cpuid;
2499 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2500 lock_profile_obtain_lock_success(
2501 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2502}
2503
2459 lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2460 }
2461 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2462 newtd = choosethread();
2463 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2464 PCPU_SET(switchtime, cpu_ticks());
2465 PCPU_SET(switchticks, ticks);
2466 cpu_throw(td, newtd); /* doesn't return */

--- 21 unchanged lines hidden (view full) ---

2488 td->td_lock = TDQ_LOCKPTR(tdq);
2489 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2490 td->td_oncpu = cpuid;
2491 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2492 lock_profile_obtain_lock_success(
2493 &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2494}
2495
2504static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2505 "Scheduler");
2496SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2506SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2507 "Scheduler name");
2508SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2509 "Slice size for timeshare threads");
2510SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2511 "Interactivity score threshold");
2512SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2513 0,"Min priority for preemption, lower priorities have greater precedence");

--- 13 unchanged lines hidden (view full) ---

2527 "Attempts to steal work from other cores before idling");
2528SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2529 "Minimum load on remote cpu before we'll steal");
2530#endif
2531
2532/* ps compat. All cpu percentages from ULE are weighted. */
2533static int ccpu = 0;
2534SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2497SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2498 "Scheduler name");
2499SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2500 "Slice size for timeshare threads");
2501SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2502 "Interactivity score threshold");
2503SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2504 0,"Min priority for preemption, lower priorities have greater precedence");

--- 13 unchanged lines hidden (view full) ---

2518 "Attempts to steal work from other cores before idling");
2519SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2520 "Minimum load on remote cpu before we'll steal");
2521#endif
2522
2523/* ps compat. All cpu percentages from ULE are weighted. */
2524static int ccpu = 0;
2525SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2535
2536
2537#define KERN_SWITCH_INCLUDE 1
2538#include "kern/kern_switch.c"