sched_ule.c revision 135059
154359Sroberto/*- 254359Sroberto * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org> 354359Sroberto * All rights reserved. 454359Sroberto * 554359Sroberto * Redistribution and use in source and binary forms, with or without 654359Sroberto * modification, are permitted provided that the following conditions 754359Sroberto * are met: 854359Sroberto * 1. Redistributions of source code must retain the above copyright 954359Sroberto * notice unmodified, this list of conditions, and the following 1054359Sroberto * disclaimer. 1154359Sroberto * 2. Redistributions in binary form must reproduce the above copyright 1254359Sroberto * notice, this list of conditions and the following disclaimer in the 1354359Sroberto * documentation and/or other materials provided with the distribution. 1454359Sroberto * 1554359Sroberto * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1654359Sroberto * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1754359Sroberto * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 1854359Sroberto * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 1954359Sroberto * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 2054359Sroberto * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 2154359Sroberto * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 2254359Sroberto * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 2354359Sroberto * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 2454359Sroberto * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2554359Sroberto */ 2654359Sroberto 2754359Sroberto#include <sys/cdefs.h> 2854359Sroberto__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 135059 2004-09-11 00:11:09Z julian $"); 2954359Sroberto 3054359Sroberto#include <opt_sched.h> 3154359Sroberto 3254359Sroberto#define kse td_sched 3354359Sroberto 3454359Sroberto#include <sys/param.h> 3554359Sroberto#include <sys/systm.h> 3654359Sroberto#include <sys/kdb.h> 3754359Sroberto#include <sys/kernel.h> 3854359Sroberto#include <sys/ktr.h> 3954359Sroberto#include <sys/lock.h> 4054359Sroberto#include <sys/mutex.h> 4154359Sroberto#include <sys/proc.h> 4254359Sroberto#include <sys/resource.h> 4354359Sroberto#include <sys/resourcevar.h> 4454359Sroberto#include <sys/sched.h> 4554359Sroberto#include <sys/smp.h> 4654359Sroberto#include <sys/sx.h> 4754359Sroberto#include <sys/sysctl.h> 4854359Sroberto#include <sys/sysproto.h> 4954359Sroberto#include <sys/vmmeter.h> 5054359Sroberto#ifdef KTRACE 5154359Sroberto#include <sys/uio.h> 5254359Sroberto#include <sys/ktrace.h> 5354359Sroberto#endif 5454359Sroberto 5554359Sroberto#include <machine/cpu.h> 5654359Sroberto#include <machine/smp.h> 5754359Sroberto 5854359Sroberto#define KTR_ULE KTR_NFS 5954359Sroberto 6054359Sroberto/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 6154359Sroberto/* XXX This is bogus compatability crap for ps */ 6254359Srobertostatic fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 6354359SrobertoSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, ""); 6454359Sroberto 6554359Srobertostatic void sched_setup(void *dummy); 6654359SrobertoSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL) 6754359Sroberto 6854359Srobertostatic SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler"); 6954359Sroberto 7054359SrobertoSYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0, 7154359Sroberto "Scheduler name"); 7254359Sroberto 7354359Srobertostatic int slice_min = 1; 7454359SrobertoSYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, ""); 7554359Sroberto 7654359Srobertostatic int slice_max = 10; 7754359SrobertoSYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, ""); 7854359Sroberto 7954359Srobertoint realstathz; 8054359Srobertoint tickincr = 1; 8154359Sroberto 8254359Sroberto#ifdef PREEMPTION 8354359Srobertostatic void 8454359Srobertoprintf_caddr_t(void *data) 8554359Sroberto{ 8654359Sroberto printf("%s", (char *)data); 8754359Sroberto} 8854359Srobertostatic char preempt_warning[] = 8954359Sroberto "WARNING: Kernel PREEMPTION is unstable under SCHED_ULE.\n"; 9054359SrobertoSYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t, 9154359Sroberto preempt_warning) 9254359Sroberto#endif 9354359Sroberto 9454359Sroberto/* 9554359Sroberto * The schedulable entity that can be given a context to run. 9654359Sroberto * A process may have several of these. Probably one per processor 9754359Sroberto * but posibly a few more. In this universe they are grouped 9854359Sroberto * with a KSEG that contains the priority and niceness 9954359Sroberto * for the group. 10054359Sroberto */ 10154359Srobertostruct kse { 10254359Sroberto TAILQ_ENTRY(kse) ke_kglist; /* (*) Queue of threads in ke_ksegrp. */ 10354359Sroberto TAILQ_ENTRY(kse) ke_kgrlist; /* (*) Queue of threads in this state.*/ 10454359Sroberto TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */ 10554359Sroberto int ke_flags; /* (j) KEF_* flags. */ 10654359Sroberto struct thread *ke_thread; /* (*) Active associated thread. */ 10754359Sroberto fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */ 10854359Sroberto u_char ke_oncpu; /* (j) Which cpu we are on. */ 10954359Sroberto char ke_rqindex; /* (j) Run queue index. */ 11054359Sroberto enum { 11154359Sroberto KES_THREAD = 0x0, /* slaved to thread state */ 11254359Sroberto KES_ONRUNQ 11354359Sroberto } ke_state; /* (j) thread sched specific status. */ 11454359Sroberto int ke_slptime; 11554359Sroberto int ke_pinned; /* (k) nested coult.. pinned to a cpu */ 11654359Sroberto int ke_slice; 11754359Sroberto struct runq *ke_runq; 11854359Sroberto u_char ke_cpu; /* CPU that we have affinity for. */ 11954359Sroberto /* The following variables are only used for pctcpu calculation */ 12054359Sroberto int ke_ltick; /* Last tick that we were running on */ 12154359Sroberto int ke_ftick; /* First tick that we were running on */ 12254359Sroberto int ke_ticks; /* Tick count */ 12354359Sroberto 12454359Sroberto}; 12554359Sroberto 12654359Sroberto 12754359Sroberto#define td_kse td_sched 12854359Sroberto#define td_slptime td_kse->ke_slptime 12954359Sroberto#define ke_proc ke_thread->td_proc 13054359Sroberto#define ke_ksegrp ke_thread->td_ksegrp 13154359Sroberto 13254359Sroberto/* flags kept in ke_flags */ 13354359Sroberto#define KEF_SCHED0 0x00001 /* For scheduler-specific use. */ 13454359Sroberto#define KEF_SCHED1 0x00002 /* For scheduler-specific use. */ 13554359Sroberto#define KEF_SCHED2 0x00004 /* For scheduler-specific use. */ 13654359Sroberto#define KEF_SCHED3 0x00008 /* For scheduler-specific use. */ 13754359Sroberto#define KEF_DIDRUN 0x02000 /* Thread actually ran. */ 13854359Sroberto#define KEF_EXIT 0x04000 /* Thread is being killed. */ 13954359Sroberto 14054359Sroberto/* 14154359Sroberto * These datastructures are allocated within their parent datastructure but 14254359Sroberto * are scheduler specific. 14354359Sroberto */ 14454359Sroberto 14554359Sroberto#define ke_assign ke_procq.tqe_next 14654359Sroberto 14754359Sroberto#define KEF_ASSIGNED KEF_SCHED0 /* Thread is being migrated. */ 14854359Sroberto#define KEF_BOUND KEF_SCHED1 /* Thread can not migrate. */ 14954359Sroberto#define KEF_XFERABLE KEF_SCHED2 /* Thread was added as transferable. */ 15054359Sroberto#define KEF_HOLD KEF_SCHED3 /* Thread is temporarily bound. */ 15154359Sroberto 15254359Srobertostruct kg_sched { 15354359Sroberto struct thread *skg_last_assigned; /* (j) Last thread assigned to */ 15454359Sroberto /* the system scheduler */ 15554359Sroberto int skg_slptime; /* Number of ticks we vol. slept */ 15654359Sroberto int skg_runtime; /* Number of ticks we were running */ 15754359Sroberto int skg_avail_opennings; /* (j) Num unfilled slots in group.*/ 15854359Sroberto int skg_concurrency; /* (j) Num threads requested in group.*/ 15954359Sroberto int skg_runq_threads; /* (j) Num KSEs on runq. */ 16054359Sroberto}; 16154359Sroberto#define kg_last_assigned kg_sched->skg_last_assigned 16254359Sroberto#define kg_avail_opennings kg_sched->skg_avail_opennings 16354359Sroberto#define kg_concurrency kg_sched->skg_concurrency 16454359Sroberto#define kg_runq_threads kg_sched->skg_runq_threads 16554359Sroberto#define kg_runtime kg_sched->skg_runtime 16654359Sroberto#define kg_slptime kg_sched->skg_slptime 16754359Sroberto 16854359Sroberto 16954359Srobertostatic struct kse kse0; 17054359Srobertostatic struct kg_sched kg_sched0; 17154359Sroberto 17254359Sroberto/* 17354359Sroberto * The priority is primarily determined by the interactivity score. Thus, we 17454359Sroberto * give lower(better) priorities to kse groups that use less CPU. The nice 17554359Sroberto * value is then directly added to this to allow nice to have some effect 17654359Sroberto * on latency. 17754359Sroberto * 17854359Sroberto * PRI_RANGE: Total priority range for timeshare threads. 17954359Sroberto * PRI_NRESV: Number of nice values. 18054359Sroberto * PRI_BASE: The start of the dynamic range. 18154359Sroberto */ 18254359Sroberto#define SCHED_PRI_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1) 18354359Sroberto#define SCHED_PRI_NRESV ((PRIO_MAX - PRIO_MIN) + 1) 18454359Sroberto#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2) 18554359Sroberto#define SCHED_PRI_BASE (PRI_MIN_TIMESHARE) 18654359Sroberto#define SCHED_PRI_INTERACT(score) \ 18754359Sroberto ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX) 18854359Sroberto 18954359Sroberto/* 19054359Sroberto * These determine the interactivity of a process. 19154359Sroberto * 19254359Sroberto * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate 19354359Sroberto * before throttling back. 19454359Sroberto * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time. 19554359Sroberto * INTERACT_MAX: Maximum interactivity value. Smaller is better. 19654359Sroberto * INTERACT_THRESH: Threshhold for placement on the current runq. 19754359Sroberto */ 19854359Sroberto#define SCHED_SLP_RUN_MAX ((hz * 5) << 10) 19954359Sroberto#define SCHED_SLP_RUN_FORK ((hz / 2) << 10) 20054359Sroberto#define SCHED_INTERACT_MAX (100) 20154359Sroberto#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2) 20254359Sroberto#define SCHED_INTERACT_THRESH (30) 20354359Sroberto 20454359Sroberto/* 20554359Sroberto * These parameters and macros determine the size of the time slice that is 20654359Sroberto * granted to each thread. 20754359Sroberto * 20854359Sroberto * SLICE_MIN: Minimum time slice granted, in units of ticks. 20954359Sroberto * SLICE_MAX: Maximum time slice granted. 21054359Sroberto * SLICE_RANGE: Range of available time slices scaled by hz. 21154359Sroberto * SLICE_SCALE: The number slices granted per val in the range of [0, max]. 21254359Sroberto * SLICE_NICE: Determine the amount of slice granted to a scaled nice. 21354359Sroberto * SLICE_NTHRESH: The nice cutoff point for slice assignment. 21454359Sroberto */ 21554359Sroberto#define SCHED_SLICE_MIN (slice_min) 21654359Sroberto#define SCHED_SLICE_MAX (slice_max) 21754359Sroberto#define SCHED_SLICE_INTERACTIVE (slice_max) 21854359Sroberto#define SCHED_SLICE_NTHRESH (SCHED_PRI_NHALF - 1) 21954359Sroberto#define SCHED_SLICE_RANGE (SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1) 22054359Sroberto#define SCHED_SLICE_SCALE(val, max) (((val) * SCHED_SLICE_RANGE) / (max)) 22154359Sroberto#define SCHED_SLICE_NICE(nice) \ 22254359Sroberto (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH)) 22354359Sroberto 22454359Sroberto/* 22554359Sroberto * This macro determines whether or not the thread belongs on the current or 22654359Sroberto * next run queue. 22754359Sroberto */ 22854359Sroberto#define SCHED_INTERACTIVE(kg) \ 22954359Sroberto (sched_interact_score(kg) < SCHED_INTERACT_THRESH) 23054359Sroberto#define SCHED_CURR(kg, ke) \ 23154359Sroberto (ke->ke_thread->td_priority < kg->kg_user_pri || \ 23254359Sroberto SCHED_INTERACTIVE(kg)) 23354359Sroberto 23454359Sroberto/* 23554359Sroberto * Cpu percentage computation macros and defines. 23654359Sroberto * 23754359Sroberto * SCHED_CPU_TIME: Number of seconds to average the cpu usage across. 23854359Sroberto * SCHED_CPU_TICKS: Number of hz ticks to average the cpu usage across. 23954359Sroberto */ 24054359Sroberto 24154359Sroberto#define SCHED_CPU_TIME 10 24254359Sroberto#define SCHED_CPU_TICKS (hz * SCHED_CPU_TIME) 24354359Sroberto 24454359Sroberto/* 24554359Sroberto * kseq - per processor runqs and statistics. 24654359Sroberto */ 24754359Srobertostruct kseq { 24854359Sroberto struct runq ksq_idle; /* Queue of IDLE threads. */ 24954359Sroberto struct runq ksq_timeshare[2]; /* Run queues for !IDLE. */ 25054359Sroberto struct runq *ksq_next; /* Next timeshare queue. */ 25154359Sroberto struct runq *ksq_curr; /* Current queue. */ 25254359Sroberto int ksq_load_timeshare; /* Load for timeshare. */ 25354359Sroberto int ksq_load; /* Aggregate load. */ 25454359Sroberto short ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */ 25554359Sroberto short ksq_nicemin; /* Least nice. */ 25654359Sroberto#ifdef SMP 25754359Sroberto int ksq_transferable; 25854359Sroberto LIST_ENTRY(kseq) ksq_siblings; /* Next in kseq group. */ 25954359Sroberto struct kseq_group *ksq_group; /* Our processor group. */ 26054359Sroberto volatile struct kse *ksq_assigned; /* assigned by another CPU. */ 26154359Sroberto#else 26254359Sroberto int ksq_sysload; /* For loadavg, !ITHD load. */ 26354359Sroberto#endif 26454359Sroberto}; 26554359Sroberto 26654359Sroberto#ifdef SMP 26754359Sroberto/* 26854359Sroberto * kseq groups are groups of processors which can cheaply share threads. When 26954359Sroberto * one processor in the group goes idle it will check the runqs of the other 27054359Sroberto * processors in its group prior to halting and waiting for an interrupt. 27154359Sroberto * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA. 27254359Sroberto * In a numa environment we'd want an idle bitmap per group and a two tiered 27354359Sroberto * load balancer. 27454359Sroberto */ 27554359Srobertostruct kseq_group { 27654359Sroberto int ksg_cpus; /* Count of CPUs in this kseq group. */ 27754359Sroberto cpumask_t ksg_cpumask; /* Mask of cpus in this group. */ 27854359Sroberto cpumask_t ksg_idlemask; /* Idle cpus in this group. */ 27954359Sroberto cpumask_t ksg_mask; /* Bit mask for first cpu. */ 28054359Sroberto int ksg_load; /* Total load of this group. */ 28154359Sroberto int ksg_transferable; /* Transferable load of this group. */ 28254359Sroberto LIST_HEAD(, kseq) ksg_members; /* Linked list of all members. */ 28354359Sroberto}; 28454359Sroberto#endif 28554359Sroberto 28654359Sroberto/* 28754359Sroberto * One kse queue per processor. 28854359Sroberto */ 28954359Sroberto#ifdef SMP 29054359Srobertostatic cpumask_t kseq_idle; 29154359Srobertostatic int ksg_maxid; 29254359Srobertostatic struct kseq kseq_cpu[MAXCPU]; 29354359Srobertostatic struct kseq_group kseq_groups[MAXCPU]; 29454359Srobertostatic int bal_tick; 29554359Srobertostatic int gbal_tick; 29654359Sroberto 29754359Sroberto#define KSEQ_SELF() (&kseq_cpu[PCPU_GET(cpuid)]) 29854359Sroberto#define KSEQ_CPU(x) (&kseq_cpu[(x)]) 29954359Sroberto#define KSEQ_ID(x) ((x) - kseq_cpu) 30054359Sroberto#define KSEQ_GROUP(x) (&kseq_groups[(x)]) 30154359Sroberto#else /* !SMP */ 30254359Srobertostatic struct kseq kseq_cpu; 30354359Sroberto 30454359Sroberto#define KSEQ_SELF() (&kseq_cpu) 30554359Sroberto#define KSEQ_CPU(x) (&kseq_cpu) 30654359Sroberto#endif 30754359Sroberto 30854359Srobertostatic void slot_fill(struct ksegrp *kg); 30954359Srobertostatic struct kse *sched_choose(void); /* XXX Should be thread * */ 31054359Srobertostatic void sched_add_internal(struct thread *td, int preemptive); 31154359Srobertostatic void sched_slice(struct kse *ke); 31254359Srobertostatic void sched_priority(struct ksegrp *kg); 31354359Srobertostatic int sched_interact_score(struct ksegrp *kg); 31454359Srobertostatic void sched_interact_update(struct ksegrp *kg); 31554359Srobertostatic void sched_interact_fork(struct ksegrp *kg); 31654359Srobertostatic void sched_pctcpu_update(struct kse *ke); 31754359Sroberto 31854359Sroberto/* Operations on per processor queues */ 31954359Srobertostatic struct kse * kseq_choose(struct kseq *kseq); 32054359Srobertostatic void kseq_setup(struct kseq *kseq); 32154359Srobertostatic void kseq_load_add(struct kseq *kseq, struct kse *ke); 32254359Srobertostatic void kseq_load_rem(struct kseq *kseq, struct kse *ke); 32354359Srobertostatic __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke); 32454359Srobertostatic __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke); 32554359Srobertostatic void kseq_nice_add(struct kseq *kseq, int nice); 32654359Srobertostatic void kseq_nice_rem(struct kseq *kseq, int nice); 32754359Srobertovoid kseq_print(int cpu); 32854359Sroberto#ifdef SMP 32954359Srobertostatic int kseq_transfer(struct kseq *ksq, struct kse *ke, int class); 33054359Srobertostatic struct kse *runq_steal(struct runq *rq); 33154359Srobertostatic void sched_balance(void); 33254359Srobertostatic void sched_balance_groups(void); 33354359Srobertostatic void sched_balance_group(struct kseq_group *ksg); 33454359Srobertostatic void sched_balance_pair(struct kseq *high, struct kseq *low); 33554359Srobertostatic void kseq_move(struct kseq *from, int cpu); 33654359Srobertostatic int kseq_idled(struct kseq *kseq); 33754359Srobertostatic void kseq_notify(struct kse *ke, int cpu); 33854359Srobertostatic void kseq_assign(struct kseq *); 33954359Srobertostatic struct kse *kseq_steal(struct kseq *kseq, int stealidle); 34054359Sroberto/* 34154359Sroberto * On P4 Xeons the round-robin interrupt delivery is broken. As a result of 34254359Sroberto * this, we can't pin interrupts to the cpu that they were delivered to, 34354359Sroberto * otherwise all ithreads only run on CPU 0. 34454359Sroberto */ 34554359Sroberto#ifdef __i386__ 34654359Sroberto#define KSE_CAN_MIGRATE(ke, class) \ 34754359Sroberto ((ke)->ke_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0) 34854359Sroberto#else /* !__i386__ */ 34954359Sroberto#define KSE_CAN_MIGRATE(ke, class) \ 35054359Sroberto ((class) != PRI_ITHD && (ke)->ke_pinned == 0 && \ 35154359Sroberto ((ke)->ke_flags & KEF_BOUND) == 0) 35254359Sroberto#endif /* !__i386__ */ 35354359Sroberto#endif 35454359Sroberto 35554359Srobertovoid 35654359Srobertokseq_print(int cpu) 35754359Sroberto{ 35854359Sroberto struct kseq *kseq; 35954359Sroberto int i; 36054359Sroberto 36154359Sroberto kseq = KSEQ_CPU(cpu); 36254359Sroberto 36354359Sroberto printf("kseq:\n"); 36454359Sroberto printf("\tload: %d\n", kseq->ksq_load); 36554359Sroberto printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare); 36654359Sroberto#ifdef SMP 36754359Sroberto printf("\tload transferable: %d\n", kseq->ksq_transferable); 36854359Sroberto#endif 36954359Sroberto printf("\tnicemin:\t%d\n", kseq->ksq_nicemin); 37054359Sroberto printf("\tnice counts:\n"); 37154359Sroberto for (i = 0; i < SCHED_PRI_NRESV; i++) 37254359Sroberto if (kseq->ksq_nice[i]) 37354359Sroberto printf("\t\t%d = %d\n", 37454359Sroberto i - SCHED_PRI_NHALF, kseq->ksq_nice[i]); 37554359Sroberto} 37654359Sroberto 37754359Srobertostatic __inline void 37854359Srobertokseq_runq_add(struct kseq *kseq, struct kse *ke) 37954359Sroberto{ 38054359Sroberto#ifdef SMP 38154359Sroberto if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) { 38254359Sroberto kseq->ksq_transferable++; 38354359Sroberto kseq->ksq_group->ksg_transferable++; 38454359Sroberto ke->ke_flags |= KEF_XFERABLE; 38554359Sroberto } 38654359Sroberto#endif 38754359Sroberto runq_add(ke->ke_runq, ke); 38854359Sroberto} 38954359Sroberto 39054359Srobertostatic __inline void 39154359Srobertokseq_runq_rem(struct kseq *kseq, struct kse *ke) 39254359Sroberto{ 39354359Sroberto#ifdef SMP 39454359Sroberto if (ke->ke_flags & KEF_XFERABLE) { 39554359Sroberto kseq->ksq_transferable--; 39654359Sroberto kseq->ksq_group->ksg_transferable--; 39754359Sroberto ke->ke_flags &= ~KEF_XFERABLE; 39854359Sroberto } 39954359Sroberto#endif 40054359Sroberto runq_remove(ke->ke_runq, ke); 40154359Sroberto} 40254359Sroberto 40354359Srobertostatic void 40454359Srobertokseq_load_add(struct kseq *kseq, struct kse *ke) 40554359Sroberto{ 40654359Sroberto int class; 40754359Sroberto mtx_assert(&sched_lock, MA_OWNED); 40854359Sroberto class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 40954359Sroberto if (class == PRI_TIMESHARE) 41054359Sroberto kseq->ksq_load_timeshare++; 41154359Sroberto kseq->ksq_load++; 41254359Sroberto if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 41354359Sroberto#ifdef SMP 41454359Sroberto kseq->ksq_group->ksg_load++; 41554359Sroberto#else 41654359Sroberto kseq->ksq_sysload++; 41754359Sroberto#endif 41854359Sroberto if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 41954359Sroberto CTR6(KTR_ULE, 42054359Sroberto "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))", 42154359Sroberto ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority, 42254359Sroberto ke->ke_proc->p_nice, kseq->ksq_nicemin); 42354359Sroberto if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 42454359Sroberto kseq_nice_add(kseq, ke->ke_proc->p_nice); 42554359Sroberto} 42654359Sroberto 42754359Srobertostatic void 42854359Srobertokseq_load_rem(struct kseq *kseq, struct kse *ke) 42954359Sroberto{ 43054359Sroberto int class; 43154359Sroberto mtx_assert(&sched_lock, MA_OWNED); 432 class = PRI_BASE(ke->ke_ksegrp->kg_pri_class); 433 if (class == PRI_TIMESHARE) 434 kseq->ksq_load_timeshare--; 435 if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0) 436#ifdef SMP 437 kseq->ksq_group->ksg_load--; 438#else 439 kseq->ksq_sysload--; 440#endif 441 kseq->ksq_load--; 442 ke->ke_runq = NULL; 443 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) 444 kseq_nice_rem(kseq, ke->ke_proc->p_nice); 445} 446 447static void 448kseq_nice_add(struct kseq *kseq, int nice) 449{ 450 mtx_assert(&sched_lock, MA_OWNED); 451 /* Normalize to zero. */ 452 kseq->ksq_nice[nice + SCHED_PRI_NHALF]++; 453 if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1) 454 kseq->ksq_nicemin = nice; 455} 456 457static void 458kseq_nice_rem(struct kseq *kseq, int nice) 459{ 460 int n; 461 462 mtx_assert(&sched_lock, MA_OWNED); 463 /* Normalize to zero. */ 464 n = nice + SCHED_PRI_NHALF; 465 kseq->ksq_nice[n]--; 466 KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count.")); 467 468 /* 469 * If this wasn't the smallest nice value or there are more in 470 * this bucket we can just return. Otherwise we have to recalculate 471 * the smallest nice. 472 */ 473 if (nice != kseq->ksq_nicemin || 474 kseq->ksq_nice[n] != 0 || 475 kseq->ksq_load_timeshare == 0) 476 return; 477 478 for (; n < SCHED_PRI_NRESV; n++) 479 if (kseq->ksq_nice[n]) { 480 kseq->ksq_nicemin = n - SCHED_PRI_NHALF; 481 return; 482 } 483} 484 485#ifdef SMP 486/* 487 * sched_balance is a simple CPU load balancing algorithm. It operates by 488 * finding the least loaded and most loaded cpu and equalizing their load 489 * by migrating some processes. 490 * 491 * Dealing only with two CPUs at a time has two advantages. Firstly, most 492 * installations will only have 2 cpus. Secondly, load balancing too much at 493 * once can have an unpleasant effect on the system. The scheduler rarely has 494 * enough information to make perfect decisions. So this algorithm chooses 495 * algorithm simplicity and more gradual effects on load in larger systems. 496 * 497 * It could be improved by considering the priorities and slices assigned to 498 * each task prior to balancing them. There are many pathological cases with 499 * any approach and so the semi random algorithm below may work as well as any. 500 * 501 */ 502static void 503sched_balance(void) 504{ 505 struct kseq_group *high; 506 struct kseq_group *low; 507 struct kseq_group *ksg; 508 int cnt; 509 int i; 510 511 if (smp_started == 0) 512 goto out; 513 low = high = NULL; 514 i = random() % (ksg_maxid + 1); 515 for (cnt = 0; cnt <= ksg_maxid; cnt++) { 516 ksg = KSEQ_GROUP(i); 517 /* 518 * Find the CPU with the highest load that has some 519 * threads to transfer. 520 */ 521 if ((high == NULL || ksg->ksg_load > high->ksg_load) 522 && ksg->ksg_transferable) 523 high = ksg; 524 if (low == NULL || ksg->ksg_load < low->ksg_load) 525 low = ksg; 526 if (++i > ksg_maxid) 527 i = 0; 528 } 529 if (low != NULL && high != NULL && high != low) 530 sched_balance_pair(LIST_FIRST(&high->ksg_members), 531 LIST_FIRST(&low->ksg_members)); 532out: 533 bal_tick = ticks + (random() % (hz * 2)); 534} 535 536static void 537sched_balance_groups(void) 538{ 539 int i; 540 541 mtx_assert(&sched_lock, MA_OWNED); 542 if (smp_started) 543 for (i = 0; i <= ksg_maxid; i++) 544 sched_balance_group(KSEQ_GROUP(i)); 545 gbal_tick = ticks + (random() % (hz * 2)); 546} 547 548static void 549sched_balance_group(struct kseq_group *ksg) 550{ 551 struct kseq *kseq; 552 struct kseq *high; 553 struct kseq *low; 554 int load; 555 556 if (ksg->ksg_transferable == 0) 557 return; 558 low = NULL; 559 high = NULL; 560 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 561 load = kseq->ksq_load; 562 if (high == NULL || load > high->ksq_load) 563 high = kseq; 564 if (low == NULL || load < low->ksq_load) 565 low = kseq; 566 } 567 if (high != NULL && low != NULL && high != low) 568 sched_balance_pair(high, low); 569} 570 571static void 572sched_balance_pair(struct kseq *high, struct kseq *low) 573{ 574 int transferable; 575 int high_load; 576 int low_load; 577 int move; 578 int diff; 579 int i; 580 581 /* 582 * If we're transfering within a group we have to use this specific 583 * kseq's transferable count, otherwise we can steal from other members 584 * of the group. 585 */ 586 if (high->ksq_group == low->ksq_group) { 587 transferable = high->ksq_transferable; 588 high_load = high->ksq_load; 589 low_load = low->ksq_load; 590 } else { 591 transferable = high->ksq_group->ksg_transferable; 592 high_load = high->ksq_group->ksg_load; 593 low_load = low->ksq_group->ksg_load; 594 } 595 if (transferable == 0) 596 return; 597 /* 598 * Determine what the imbalance is and then adjust that to how many 599 * kses we actually have to give up (transferable). 600 */ 601 diff = high_load - low_load; 602 move = diff / 2; 603 if (diff & 0x1) 604 move++; 605 move = min(move, transferable); 606 for (i = 0; i < move; i++) 607 kseq_move(high, KSEQ_ID(low)); 608 return; 609} 610 611static void 612kseq_move(struct kseq *from, int cpu) 613{ 614 struct kseq *kseq; 615 struct kseq *to; 616 struct kse *ke; 617 618 kseq = from; 619 to = KSEQ_CPU(cpu); 620 ke = kseq_steal(kseq, 1); 621 if (ke == NULL) { 622 struct kseq_group *ksg; 623 624 ksg = kseq->ksq_group; 625 LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) { 626 if (kseq == from || kseq->ksq_transferable == 0) 627 continue; 628 ke = kseq_steal(kseq, 1); 629 break; 630 } 631 if (ke == NULL) 632 panic("kseq_move: No KSEs available with a " 633 "transferable count of %d\n", 634 ksg->ksg_transferable); 635 } 636 if (kseq == to) 637 return; 638 ke->ke_state = KES_THREAD; 639 kseq_runq_rem(kseq, ke); 640 kseq_load_rem(kseq, ke); 641 kseq_notify(ke, cpu); 642} 643 644static int 645kseq_idled(struct kseq *kseq) 646{ 647 struct kseq_group *ksg; 648 struct kseq *steal; 649 struct kse *ke; 650 651 ksg = kseq->ksq_group; 652 /* 653 * If we're in a cpu group, try and steal kses from another cpu in 654 * the group before idling. 655 */ 656 if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) { 657 LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) { 658 if (steal == kseq || steal->ksq_transferable == 0) 659 continue; 660 ke = kseq_steal(steal, 0); 661 if (ke == NULL) 662 continue; 663 ke->ke_state = KES_THREAD; 664 kseq_runq_rem(steal, ke); 665 kseq_load_rem(steal, ke); 666 ke->ke_cpu = PCPU_GET(cpuid); 667 sched_add_internal(ke->ke_thread, 0); 668 return (0); 669 } 670 } 671 /* 672 * We only set the idled bit when all of the cpus in the group are 673 * idle. Otherwise we could get into a situation where a KSE bounces 674 * back and forth between two idle cores on seperate physical CPUs. 675 */ 676 ksg->ksg_idlemask |= PCPU_GET(cpumask); 677 if (ksg->ksg_idlemask != ksg->ksg_cpumask) 678 return (1); 679 atomic_set_int(&kseq_idle, ksg->ksg_mask); 680 return (1); 681} 682 683static void 684kseq_assign(struct kseq *kseq) 685{ 686 struct kse *nke; 687 struct kse *ke; 688 689 do { 690 *(volatile struct kse **)&ke = kseq->ksq_assigned; 691 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL)); 692 for (; ke != NULL; ke = nke) { 693 nke = ke->ke_assign; 694 ke->ke_flags &= ~KEF_ASSIGNED; 695 sched_add_internal(ke->ke_thread, 0); 696 } 697} 698 699static void 700kseq_notify(struct kse *ke, int cpu) 701{ 702 struct kseq *kseq; 703 struct thread *td; 704 struct pcpu *pcpu; 705 int prio; 706 707 ke->ke_cpu = cpu; 708 ke->ke_flags |= KEF_ASSIGNED; 709 prio = ke->ke_thread->td_priority; 710 711 kseq = KSEQ_CPU(cpu); 712 713 /* 714 * Place a KSE on another cpu's queue and force a resched. 715 */ 716 do { 717 *(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned; 718 } while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke)); 719 /* 720 * Without sched_lock we could lose a race where we set NEEDRESCHED 721 * on a thread that is switched out before the IPI is delivered. This 722 * would lead us to miss the resched. This will be a problem once 723 * sched_lock is pushed down. 724 */ 725 pcpu = pcpu_find(cpu); 726 td = pcpu->pc_curthread; 727 if (ke->ke_thread->td_priority < td->td_priority || 728 td == pcpu->pc_idlethread) { 729 td->td_flags |= TDF_NEEDRESCHED; 730 ipi_selected(1 << cpu, IPI_AST); 731 } 732} 733 734static struct kse * 735runq_steal(struct runq *rq) 736{ 737 struct rqhead *rqh; 738 struct rqbits *rqb; 739 struct kse *ke; 740 int word; 741 int bit; 742 743 mtx_assert(&sched_lock, MA_OWNED); 744 rqb = &rq->rq_status; 745 for (word = 0; word < RQB_LEN; word++) { 746 if (rqb->rqb_bits[word] == 0) 747 continue; 748 for (bit = 0; bit < RQB_BPW; bit++) { 749 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0) 750 continue; 751 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)]; 752 TAILQ_FOREACH(ke, rqh, ke_procq) { 753 if (KSE_CAN_MIGRATE(ke, 754 PRI_BASE(ke->ke_ksegrp->kg_pri_class))) 755 return (ke); 756 } 757 } 758 } 759 return (NULL); 760} 761 762static struct kse * 763kseq_steal(struct kseq *kseq, int stealidle) 764{ 765 struct kse *ke; 766 767 /* 768 * Steal from next first to try to get a non-interactive task that 769 * may not have run for a while. 770 */ 771 if ((ke = runq_steal(kseq->ksq_next)) != NULL) 772 return (ke); 773 if ((ke = runq_steal(kseq->ksq_curr)) != NULL) 774 return (ke); 775 if (stealidle) 776 return (runq_steal(&kseq->ksq_idle)); 777 return (NULL); 778} 779 780int 781kseq_transfer(struct kseq *kseq, struct kse *ke, int class) 782{ 783 struct kseq_group *ksg; 784 int cpu; 785 786 if (smp_started == 0) 787 return (0); 788 cpu = 0; 789 /* 790 * If our load exceeds a certain threshold we should attempt to 791 * reassign this thread. The first candidate is the cpu that 792 * originally ran the thread. If it is idle, assign it there, 793 * otherwise, pick an idle cpu. 794 * 795 * The threshold at which we start to reassign kses has a large impact 796 * on the overall performance of the system. Tuned too high and 797 * some CPUs may idle. Too low and there will be excess migration 798 * and context switches. 799 */ 800 ksg = kseq->ksq_group; 801 if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) { 802 ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group; 803 if (kseq_idle & ksg->ksg_mask) { 804 cpu = ffs(ksg->ksg_idlemask); 805 if (cpu) 806 goto migrate; 807 } 808 /* 809 * Multiple cpus could find this bit simultaneously 810 * but the race shouldn't be terrible. 811 */ 812 cpu = ffs(kseq_idle); 813 if (cpu) 814 goto migrate; 815 } 816 /* 817 * If another cpu in this group has idled, assign a thread over 818 * to them after checking to see if there are idled groups. 819 */ 820 ksg = kseq->ksq_group; 821 if (ksg->ksg_idlemask) { 822 cpu = ffs(ksg->ksg_idlemask); 823 if (cpu) 824 goto migrate; 825 } 826 /* 827 * No new CPU was found. 828 */ 829 return (0); 830migrate: 831 /* 832 * Now that we've found an idle CPU, migrate the thread. 833 */ 834 cpu--; 835 ke->ke_runq = NULL; 836 kseq_notify(ke, cpu); 837 838 return (1); 839} 840 841#endif /* SMP */ 842 843/* 844 * Pick the highest priority task we have and return it. 845 */ 846 847static struct kse * 848kseq_choose(struct kseq *kseq) 849{ 850 struct kse *ke; 851 struct runq *swap; 852 853 mtx_assert(&sched_lock, MA_OWNED); 854 swap = NULL; 855 856 for (;;) { 857 ke = runq_choose(kseq->ksq_curr); 858 if (ke == NULL) { 859 /* 860 * We already swapped once and didn't get anywhere. 861 */ 862 if (swap) 863 break; 864 swap = kseq->ksq_curr; 865 kseq->ksq_curr = kseq->ksq_next; 866 kseq->ksq_next = swap; 867 continue; 868 } 869 /* 870 * If we encounter a slice of 0 the kse is in a 871 * TIMESHARE kse group and its nice was too far out 872 * of the range that receives slices. 873 */ 874 if (ke->ke_slice == 0) { 875 runq_remove(ke->ke_runq, ke); 876 sched_slice(ke); 877 ke->ke_runq = kseq->ksq_next; 878 runq_add(ke->ke_runq, ke); 879 continue; 880 } 881 return (ke); 882 } 883 884 return (runq_choose(&kseq->ksq_idle)); 885} 886 887static void 888kseq_setup(struct kseq *kseq) 889{ 890 runq_init(&kseq->ksq_timeshare[0]); 891 runq_init(&kseq->ksq_timeshare[1]); 892 runq_init(&kseq->ksq_idle); 893 kseq->ksq_curr = &kseq->ksq_timeshare[0]; 894 kseq->ksq_next = &kseq->ksq_timeshare[1]; 895 kseq->ksq_load = 0; 896 kseq->ksq_load_timeshare = 0; 897} 898 899static void 900sched_setup(void *dummy) 901{ 902#ifdef SMP 903 int balance_groups; 904 int i; 905#endif 906 907 slice_min = (hz/100); /* 10ms */ 908 slice_max = (hz/7); /* ~140ms */ 909 910#ifdef SMP 911 balance_groups = 0; 912 /* 913 * Initialize the kseqs. 914 */ 915 for (i = 0; i < MAXCPU; i++) { 916 struct kseq *ksq; 917 918 ksq = &kseq_cpu[i]; 919 ksq->ksq_assigned = NULL; 920 kseq_setup(&kseq_cpu[i]); 921 } 922 if (smp_topology == NULL) { 923 struct kseq_group *ksg; 924 struct kseq *ksq; 925 926 for (i = 0; i < MAXCPU; i++) { 927 ksq = &kseq_cpu[i]; 928 ksg = &kseq_groups[i]; 929 /* 930 * Setup a kseq group with one member. 931 */ 932 ksq->ksq_transferable = 0; 933 ksq->ksq_group = ksg; 934 ksg->ksg_cpus = 1; 935 ksg->ksg_idlemask = 0; 936 ksg->ksg_cpumask = ksg->ksg_mask = 1 << i; 937 ksg->ksg_load = 0; 938 ksg->ksg_transferable = 0; 939 LIST_INIT(&ksg->ksg_members); 940 LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings); 941 } 942 } else { 943 struct kseq_group *ksg; 944 struct cpu_group *cg; 945 int j; 946 947 for (i = 0; i < smp_topology->ct_count; i++) { 948 cg = &smp_topology->ct_group[i]; 949 ksg = &kseq_groups[i]; 950 /* 951 * Initialize the group. 952 */ 953 ksg->ksg_idlemask = 0; 954 ksg->ksg_load = 0; 955 ksg->ksg_transferable = 0; 956 ksg->ksg_cpus = cg->cg_count; 957 ksg->ksg_cpumask = cg->cg_mask; 958 LIST_INIT(&ksg->ksg_members); 959 /* 960 * Find all of the group members and add them. 961 */ 962 for (j = 0; j < MAXCPU; j++) { 963 if ((cg->cg_mask & (1 << j)) != 0) { 964 if (ksg->ksg_mask == 0) 965 ksg->ksg_mask = 1 << j; 966 kseq_cpu[j].ksq_transferable = 0; 967 kseq_cpu[j].ksq_group = ksg; 968 LIST_INSERT_HEAD(&ksg->ksg_members, 969 &kseq_cpu[j], ksq_siblings); 970 } 971 } 972 if (ksg->ksg_cpus > 1) 973 balance_groups = 1; 974 } 975 ksg_maxid = smp_topology->ct_count - 1; 976 } 977 /* 978 * Stagger the group and global load balancer so they do not 979 * interfere with each other. 980 */ 981 bal_tick = ticks + hz; 982 if (balance_groups) 983 gbal_tick = ticks + (hz / 2); 984#else 985 kseq_setup(KSEQ_SELF()); 986#endif 987 mtx_lock_spin(&sched_lock); 988 kseq_load_add(KSEQ_SELF(), &kse0); 989 mtx_unlock_spin(&sched_lock); 990} 991 992/* 993 * Scale the scheduling priority according to the "interactivity" of this 994 * process. 995 */ 996static void 997sched_priority(struct ksegrp *kg) 998{ 999 int pri; 1000 1001 if (kg->kg_pri_class != PRI_TIMESHARE) 1002 return; 1003 1004 pri = SCHED_PRI_INTERACT(sched_interact_score(kg)); 1005 pri += SCHED_PRI_BASE; 1006 pri += kg->kg_proc->p_nice; 1007 1008 if (pri > PRI_MAX_TIMESHARE) 1009 pri = PRI_MAX_TIMESHARE; 1010 else if (pri < PRI_MIN_TIMESHARE) 1011 pri = PRI_MIN_TIMESHARE; 1012 1013 kg->kg_user_pri = pri; 1014 1015 return; 1016} 1017 1018/* 1019 * Calculate a time slice based on the properties of the kseg and the runq 1020 * that we're on. This is only for PRI_TIMESHARE ksegrps. 1021 */ 1022static void 1023sched_slice(struct kse *ke) 1024{ 1025 struct kseq *kseq; 1026 struct ksegrp *kg; 1027 1028 kg = ke->ke_ksegrp; 1029 kseq = KSEQ_CPU(ke->ke_cpu); 1030 1031 /* 1032 * Rationale: 1033 * KSEs in interactive ksegs get a minimal slice so that we 1034 * quickly notice if it abuses its advantage. 1035 * 1036 * KSEs in non-interactive ksegs are assigned a slice that is 1037 * based on the ksegs nice value relative to the least nice kseg 1038 * on the run queue for this cpu. 1039 * 1040 * If the KSE is less nice than all others it gets the maximum 1041 * slice and other KSEs will adjust their slice relative to 1042 * this when they first expire. 1043 * 1044 * There is 20 point window that starts relative to the least 1045 * nice kse on the run queue. Slice size is determined by 1046 * the kse distance from the last nice ksegrp. 1047 * 1048 * If the kse is outside of the window it will get no slice 1049 * and will be reevaluated each time it is selected on the 1050 * run queue. The exception to this is nice 0 ksegs when 1051 * a nice -20 is running. They are always granted a minimum 1052 * slice. 1053 */ 1054 if (!SCHED_INTERACTIVE(kg)) { 1055 int nice; 1056 1057 nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin); 1058 if (kseq->ksq_load_timeshare == 0 || 1059 kg->kg_proc->p_nice < kseq->ksq_nicemin) 1060 ke->ke_slice = SCHED_SLICE_MAX; 1061 else if (nice <= SCHED_SLICE_NTHRESH) 1062 ke->ke_slice = SCHED_SLICE_NICE(nice); 1063 else if (kg->kg_proc->p_nice == 0) 1064 ke->ke_slice = SCHED_SLICE_MIN; 1065 else 1066 ke->ke_slice = 0; 1067 } else 1068 ke->ke_slice = SCHED_SLICE_INTERACTIVE; 1069 1070 CTR6(KTR_ULE, 1071 "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)", 1072 ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin, 1073 kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg)); 1074 1075 return; 1076} 1077 1078/* 1079 * This routine enforces a maximum limit on the amount of scheduling history 1080 * kept. It is called after either the slptime or runtime is adjusted. 1081 * This routine will not operate correctly when slp or run times have been 1082 * adjusted to more than double their maximum. 1083 */ 1084static void 1085sched_interact_update(struct ksegrp *kg) 1086{ 1087 int sum; 1088 1089 sum = kg->kg_runtime + kg->kg_slptime; 1090 if (sum < SCHED_SLP_RUN_MAX) 1091 return; 1092 /* 1093 * If we have exceeded by more than 1/5th then the algorithm below 1094 * will not bring us back into range. Dividing by two here forces 1095 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX] 1096 */ 1097 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) { 1098 kg->kg_runtime /= 2; 1099 kg->kg_slptime /= 2; 1100 return; 1101 } 1102 kg->kg_runtime = (kg->kg_runtime / 5) * 4; 1103 kg->kg_slptime = (kg->kg_slptime / 5) * 4; 1104} 1105 1106static void 1107sched_interact_fork(struct ksegrp *kg) 1108{ 1109 int ratio; 1110 int sum; 1111 1112 sum = kg->kg_runtime + kg->kg_slptime; 1113 if (sum > SCHED_SLP_RUN_FORK) { 1114 ratio = sum / SCHED_SLP_RUN_FORK; 1115 kg->kg_runtime /= ratio; 1116 kg->kg_slptime /= ratio; 1117 } 1118} 1119 1120static int 1121sched_interact_score(struct ksegrp *kg) 1122{ 1123 int div; 1124 1125 if (kg->kg_runtime > kg->kg_slptime) { 1126 div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF); 1127 return (SCHED_INTERACT_HALF + 1128 (SCHED_INTERACT_HALF - (kg->kg_slptime / div))); 1129 } if (kg->kg_slptime > kg->kg_runtime) { 1130 div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF); 1131 return (kg->kg_runtime / div); 1132 } 1133 1134 /* 1135 * This can happen if slptime and runtime are 0. 1136 */ 1137 return (0); 1138 1139} 1140 1141/* 1142 * Very early in the boot some setup of scheduler-specific 1143 * parts of proc0 and of soem scheduler resources needs to be done. 1144 * Called from: 1145 * proc0_init() 1146 */ 1147void 1148schedinit(void) 1149{ 1150 /* 1151 * Set up the scheduler specific parts of proc0. 1152 */ 1153 ksegrp0.kg_sched = &kg_sched0; 1154 proc0.p_sched = NULL; /* XXX */ 1155 thread0.td_kse = &kse0; 1156 kse0.ke_thread = &thread0; 1157 kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */ 1158 kse0.ke_state = KES_THREAD; 1159 kg_sched0.skg_concurrency = 1; 1160 kg_sched0.skg_avail_opennings = 0; /* we are already running */ 1161} 1162 1163/* 1164 * This is only somewhat accurate since given many processes of the same 1165 * priority they will switch when their slices run out, which will be 1166 * at most SCHED_SLICE_MAX. 1167 */ 1168int 1169sched_rr_interval(void) 1170{ 1171 return (SCHED_SLICE_MAX); 1172} 1173 1174static void 1175sched_pctcpu_update(struct kse *ke) 1176{ 1177 /* 1178 * Adjust counters and watermark for pctcpu calc. 1179 */ 1180 if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) { 1181 /* 1182 * Shift the tick count out so that the divide doesn't 1183 * round away our results. 1184 */ 1185 ke->ke_ticks <<= 10; 1186 ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) * 1187 SCHED_CPU_TICKS; 1188 ke->ke_ticks >>= 10; 1189 } else 1190 ke->ke_ticks = 0; 1191 ke->ke_ltick = ticks; 1192 ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS; 1193} 1194 1195void 1196sched_prio(struct thread *td, u_char prio) 1197{ 1198 struct kse *ke; 1199 1200 ke = td->td_kse; 1201 mtx_assert(&sched_lock, MA_OWNED); 1202 if (TD_ON_RUNQ(td)) { 1203 /* 1204 * If the priority has been elevated due to priority 1205 * propagation, we may have to move ourselves to a new 1206 * queue. We still call adjustrunqueue below in case kse 1207 * needs to fix things up. 1208 */ 1209 if (prio < td->td_priority && ke && 1210 (ke->ke_flags & KEF_ASSIGNED) == 0 && 1211 ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) { 1212 runq_remove(ke->ke_runq, ke); 1213 ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr; 1214 runq_add(ke->ke_runq, ke); 1215 } 1216 /* 1217 * Hold this kse on this cpu so that sched_prio() doesn't 1218 * cause excessive migration. We only want migration to 1219 * happen as the result of a wakeup. 1220 */ 1221 ke->ke_flags |= KEF_HOLD; 1222 adjustrunqueue(td, prio); 1223 } else 1224 td->td_priority = prio; 1225} 1226 1227void 1228sched_switch(struct thread *td, struct thread *newtd, int flags) 1229{ 1230 struct kse *ke; 1231 1232 mtx_assert(&sched_lock, MA_OWNED); 1233 1234 ke = td->td_kse; 1235 1236 td->td_lastcpu = td->td_oncpu; 1237 td->td_oncpu = NOCPU; 1238 td->td_flags &= ~TDF_NEEDRESCHED; 1239 td->td_pflags &= ~TDP_OWEPREEMPT; 1240 1241 /* 1242 * If we bring in a thread, 1243 * then account for it as if it had been added to the run queue and then chosen. 1244 */ 1245 if (newtd) { 1246 newtd->td_ksegrp->kg_avail_opennings--; 1247 newtd->td_kse->ke_flags |= KEF_DIDRUN; 1248 TD_SET_RUNNING(newtd); 1249 } 1250 /* 1251 * If the KSE has been assigned it may be in the process of switching 1252 * to the new cpu. This is the case in sched_bind(). 1253 */ 1254 if ((ke->ke_flags & KEF_ASSIGNED) == 0) { 1255 if (td == PCPU_GET(idlethread)) { 1256 TD_SET_CAN_RUN(td); 1257 } else { 1258 /* We are ending our run so make our slot available again */ 1259 td->td_ksegrp->kg_avail_opennings++; 1260 if (TD_IS_RUNNING(td)) { 1261 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1262 /* 1263 * Don't allow the thread to migrate 1264 * from a preemption. 1265 */ 1266 ke->ke_flags |= KEF_HOLD; 1267 setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); 1268 } else { 1269 if (ke->ke_runq) { 1270 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1271 } else if ((td->td_flags & TDF_IDLETD) == 0) 1272 kdb_backtrace(); 1273 /* 1274 * We will not be on the run queue. 1275 * So we must be sleeping or similar. 1276 */ 1277 if (td->td_proc->p_flag & P_HADTHREADS) 1278 slot_fill(td->td_ksegrp); 1279 } 1280 } 1281 } 1282 if (newtd != NULL) 1283 kseq_load_add(KSEQ_SELF(), newtd->td_kse); 1284 else 1285 newtd = choosethread(); 1286 if (td != newtd) 1287 cpu_switch(td, newtd); 1288 sched_lock.mtx_lock = (uintptr_t)td; 1289 1290 td->td_oncpu = PCPU_GET(cpuid); 1291} 1292 1293void 1294sched_nice(struct proc *p, int nice) 1295{ 1296 struct ksegrp *kg; 1297 struct kse *ke; 1298 struct thread *td; 1299 struct kseq *kseq; 1300 1301 PROC_LOCK_ASSERT(p, MA_OWNED); 1302 mtx_assert(&sched_lock, MA_OWNED); 1303 /* 1304 * We need to adjust the nice counts for running KSEs. 1305 */ 1306 FOREACH_KSEGRP_IN_PROC(p, kg) { 1307 if (kg->kg_pri_class == PRI_TIMESHARE) { 1308 FOREACH_THREAD_IN_GROUP(kg, td) { 1309 ke = td->td_kse; 1310 if (ke->ke_runq == NULL) 1311 continue; 1312 kseq = KSEQ_CPU(ke->ke_cpu); 1313 kseq_nice_rem(kseq, p->p_nice); 1314 kseq_nice_add(kseq, nice); 1315 } 1316 } 1317 } 1318 p->p_nice = nice; 1319 FOREACH_KSEGRP_IN_PROC(p, kg) { 1320 sched_priority(kg); 1321 FOREACH_THREAD_IN_GROUP(kg, td) 1322 td->td_flags |= TDF_NEEDRESCHED; 1323 } 1324} 1325 1326void 1327sched_sleep(struct thread *td) 1328{ 1329 mtx_assert(&sched_lock, MA_OWNED); 1330 1331 td->td_slptime = ticks; 1332 td->td_base_pri = td->td_priority; 1333 1334 CTR2(KTR_ULE, "sleep thread %p (tick: %d)", 1335 td, td->td_slptime); 1336} 1337 1338void 1339sched_wakeup(struct thread *td) 1340{ 1341 mtx_assert(&sched_lock, MA_OWNED); 1342 1343 /* 1344 * Let the kseg know how long we slept for. This is because process 1345 * interactivity behavior is modeled in the kseg. 1346 */ 1347 if (td->td_slptime) { 1348 struct ksegrp *kg; 1349 int hzticks; 1350 1351 kg = td->td_ksegrp; 1352 hzticks = (ticks - td->td_slptime) << 10; 1353 if (hzticks >= SCHED_SLP_RUN_MAX) { 1354 kg->kg_slptime = SCHED_SLP_RUN_MAX; 1355 kg->kg_runtime = 1; 1356 } else { 1357 kg->kg_slptime += hzticks; 1358 sched_interact_update(kg); 1359 } 1360 sched_priority(kg); 1361 sched_slice(td->td_kse); 1362 CTR2(KTR_ULE, "wakeup thread %p (%d ticks)", td, hzticks); 1363 td->td_slptime = 0; 1364 } 1365 setrunqueue(td, SRQ_BORING); 1366} 1367 1368/* 1369 * Penalize the parent for creating a new child and initialize the child's 1370 * priority. 1371 */ 1372void 1373sched_fork(struct thread *td, struct thread *childtd) 1374{ 1375 1376 mtx_assert(&sched_lock, MA_OWNED); 1377 1378 sched_fork_ksegrp(td, childtd->td_ksegrp); 1379 sched_fork_thread(td, childtd); 1380} 1381 1382void 1383sched_fork_ksegrp(struct thread *td, struct ksegrp *child) 1384{ 1385 struct ksegrp *kg = td->td_ksegrp; 1386 mtx_assert(&sched_lock, MA_OWNED); 1387 1388 child->kg_slptime = kg->kg_slptime; 1389 child->kg_runtime = kg->kg_runtime; 1390 child->kg_user_pri = kg->kg_user_pri; 1391 sched_interact_fork(child); 1392 kg->kg_runtime += tickincr << 10; 1393 sched_interact_update(kg); 1394 1395 CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)", 1396 kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime, 1397 child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime); 1398} 1399 1400void 1401sched_fork_thread(struct thread *td, struct thread *child) 1402{ 1403 struct kse *ke; 1404 struct kse *ke2; 1405 1406 sched_newthread(child); 1407 ke = td->td_kse; 1408 ke2 = child->td_kse; 1409 ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */ 1410 ke2->ke_cpu = ke->ke_cpu; 1411 ke2->ke_runq = NULL; 1412 1413 /* Grab our parents cpu estimation information. */ 1414 ke2->ke_ticks = ke->ke_ticks; 1415 ke2->ke_ltick = ke->ke_ltick; 1416 ke2->ke_ftick = ke->ke_ftick; 1417} 1418 1419void 1420sched_class(struct ksegrp *kg, int class) 1421{ 1422 struct kseq *kseq; 1423 struct kse *ke; 1424 struct thread *td; 1425 int nclass; 1426 int oclass; 1427 1428 mtx_assert(&sched_lock, MA_OWNED); 1429 if (kg->kg_pri_class == class) 1430 return; 1431 1432 nclass = PRI_BASE(class); 1433 oclass = PRI_BASE(kg->kg_pri_class); 1434 FOREACH_THREAD_IN_GROUP(kg, td) { 1435 ke = td->td_kse; 1436 if (ke->ke_state != KES_ONRUNQ && 1437 ke->ke_state != KES_THREAD) 1438 continue; 1439 kseq = KSEQ_CPU(ke->ke_cpu); 1440 1441#ifdef SMP 1442 /* 1443 * On SMP if we're on the RUNQ we must adjust the transferable 1444 * count because could be changing to or from an interrupt 1445 * class. 1446 */ 1447 if (ke->ke_state == KES_ONRUNQ) { 1448 if (KSE_CAN_MIGRATE(ke, oclass)) { 1449 kseq->ksq_transferable--; 1450 kseq->ksq_group->ksg_transferable--; 1451 } 1452 if (KSE_CAN_MIGRATE(ke, nclass)) { 1453 kseq->ksq_transferable++; 1454 kseq->ksq_group->ksg_transferable++; 1455 } 1456 } 1457#endif 1458 if (oclass == PRI_TIMESHARE) { 1459 kseq->ksq_load_timeshare--; 1460 kseq_nice_rem(kseq, kg->kg_proc->p_nice); 1461 } 1462 if (nclass == PRI_TIMESHARE) { 1463 kseq->ksq_load_timeshare++; 1464 kseq_nice_add(kseq, kg->kg_proc->p_nice); 1465 } 1466 } 1467 1468 kg->kg_pri_class = class; 1469} 1470 1471/* 1472 * Return some of the child's priority and interactivity to the parent. 1473 * Avoid using sched_exit_thread to avoid having to decide which 1474 * thread in the parent gets the honour since it isn't used. 1475 */ 1476void 1477sched_exit(struct proc *p, struct thread *childtd) 1478{ 1479 mtx_assert(&sched_lock, MA_OWNED); 1480 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd); 1481 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1482} 1483 1484void 1485sched_exit_ksegrp(struct ksegrp *kg, struct thread *td) 1486{ 1487 /* kg->kg_slptime += td->td_ksegrp->kg_slptime; */ 1488 kg->kg_runtime += td->td_ksegrp->kg_runtime; 1489 sched_interact_update(kg); 1490} 1491 1492void 1493sched_exit_thread(struct thread *td, struct thread *childtd) 1494{ 1495 kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse); 1496} 1497 1498void 1499sched_clock(struct thread *td) 1500{ 1501 struct kseq *kseq; 1502 struct ksegrp *kg; 1503 struct kse *ke; 1504 1505 mtx_assert(&sched_lock, MA_OWNED); 1506 kseq = KSEQ_SELF(); 1507#ifdef SMP 1508 if (ticks == bal_tick) 1509 sched_balance(); 1510 if (ticks == gbal_tick) 1511 sched_balance_groups(); 1512 /* 1513 * We could have been assigned a non real-time thread without an 1514 * IPI. 1515 */ 1516 if (kseq->ksq_assigned) 1517 kseq_assign(kseq); /* Potentially sets NEEDRESCHED */ 1518#endif 1519 /* 1520 * sched_setup() apparently happens prior to stathz being set. We 1521 * need to resolve the timers earlier in the boot so we can avoid 1522 * calculating this here. 1523 */ 1524 if (realstathz == 0) { 1525 realstathz = stathz ? stathz : hz; 1526 tickincr = hz / realstathz; 1527 /* 1528 * XXX This does not work for values of stathz that are much 1529 * larger than hz. 1530 */ 1531 if (tickincr == 0) 1532 tickincr = 1; 1533 } 1534 1535 ke = td->td_kse; 1536 kg = ke->ke_ksegrp; 1537 1538 /* Adjust ticks for pctcpu */ 1539 ke->ke_ticks++; 1540 ke->ke_ltick = ticks; 1541 1542 /* Go up to one second beyond our max and then trim back down */ 1543 if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick) 1544 sched_pctcpu_update(ke); 1545 1546 if (td->td_flags & TDF_IDLETD) 1547 return; 1548 1549 CTR4(KTR_ULE, "Tick thread %p (slice: %d, slptime: %d, runtime: %d)", 1550 td, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10); 1551 /* 1552 * We only do slicing code for TIMESHARE ksegrps. 1553 */ 1554 if (kg->kg_pri_class != PRI_TIMESHARE) 1555 return; 1556 /* 1557 * We used a tick charge it to the ksegrp so that we can compute our 1558 * interactivity. 1559 */ 1560 kg->kg_runtime += tickincr << 10; 1561 sched_interact_update(kg); 1562 1563 /* 1564 * We used up one time slice. 1565 */ 1566 if (--ke->ke_slice > 0) 1567 return; 1568 /* 1569 * We're out of time, recompute priorities and requeue. 1570 */ 1571 kseq_load_rem(kseq, ke); 1572 sched_priority(kg); 1573 sched_slice(ke); 1574 if (SCHED_CURR(kg, ke)) 1575 ke->ke_runq = kseq->ksq_curr; 1576 else 1577 ke->ke_runq = kseq->ksq_next; 1578 kseq_load_add(kseq, ke); 1579 td->td_flags |= TDF_NEEDRESCHED; 1580} 1581 1582int 1583sched_runnable(void) 1584{ 1585 struct kseq *kseq; 1586 int load; 1587 1588 load = 1; 1589 1590 kseq = KSEQ_SELF(); 1591#ifdef SMP 1592 if (kseq->ksq_assigned) { 1593 mtx_lock_spin(&sched_lock); 1594 kseq_assign(kseq); 1595 mtx_unlock_spin(&sched_lock); 1596 } 1597#endif 1598 if ((curthread->td_flags & TDF_IDLETD) != 0) { 1599 if (kseq->ksq_load > 0) 1600 goto out; 1601 } else 1602 if (kseq->ksq_load - 1 > 0) 1603 goto out; 1604 load = 0; 1605out: 1606 return (load); 1607} 1608 1609void 1610sched_userret(struct thread *td) 1611{ 1612 struct ksegrp *kg; 1613 1614 kg = td->td_ksegrp; 1615 1616 if (td->td_priority != kg->kg_user_pri) { 1617 mtx_lock_spin(&sched_lock); 1618 td->td_priority = kg->kg_user_pri; 1619 mtx_unlock_spin(&sched_lock); 1620 } 1621} 1622 1623struct kse * 1624sched_choose(void) 1625{ 1626 struct kseq *kseq; 1627 struct kse *ke; 1628 1629 mtx_assert(&sched_lock, MA_OWNED); 1630 kseq = KSEQ_SELF(); 1631#ifdef SMP 1632restart: 1633 if (kseq->ksq_assigned) 1634 kseq_assign(kseq); 1635#endif 1636 ke = kseq_choose(kseq); 1637 if (ke) { 1638#ifdef SMP 1639 if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) 1640 if (kseq_idled(kseq) == 0) 1641 goto restart; 1642#endif 1643 kseq_runq_rem(kseq, ke); 1644 ke->ke_state = KES_THREAD; 1645 1646 if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) { 1647 CTR4(KTR_ULE, "Run thread %p from %p (slice: %d, pri: %d)", 1648 ke->ke_thread, ke->ke_runq, ke->ke_slice, 1649 ke->ke_thread->td_priority); 1650 } 1651 return (ke); 1652 } 1653#ifdef SMP 1654 if (kseq_idled(kseq) == 0) 1655 goto restart; 1656#endif 1657 return (NULL); 1658} 1659 1660void 1661sched_add(struct thread *td, int flags) 1662{ 1663 1664 /* let jeff work out how to map the flags better */ 1665 /* I'm open to suggestions */ 1666 if (flags & SRQ_YIELDING) 1667 /* 1668 * Preempting during switching can be bad JUJU 1669 * especially for KSE processes 1670 */ 1671 sched_add_internal(td, 0); 1672 else 1673 sched_add_internal(td, 1); 1674} 1675 1676static void 1677sched_add_internal(struct thread *td, int preemptive) 1678{ 1679 struct kseq *kseq; 1680 struct ksegrp *kg; 1681 struct kse *ke; 1682#ifdef SMP 1683 int canmigrate; 1684#endif 1685 int class; 1686 1687 mtx_assert(&sched_lock, MA_OWNED); 1688 ke = td->td_kse; 1689 kg = td->td_ksegrp; 1690 if (ke->ke_flags & KEF_ASSIGNED) 1691 return; 1692 kseq = KSEQ_SELF(); 1693 KASSERT(ke->ke_state != KES_ONRUNQ, 1694 ("sched_add: kse %p (%s) already in run queue", ke, 1695 ke->ke_proc->p_comm)); 1696 KASSERT(ke->ke_proc->p_sflag & PS_INMEM, 1697 ("sched_add: process swapped out")); 1698 KASSERT(ke->ke_runq == NULL, 1699 ("sched_add: KSE %p is still assigned to a run queue", ke)); 1700 1701 class = PRI_BASE(kg->kg_pri_class); 1702 switch (class) { 1703 case PRI_ITHD: 1704 case PRI_REALTIME: 1705 ke->ke_runq = kseq->ksq_curr; 1706 ke->ke_slice = SCHED_SLICE_MAX; 1707 ke->ke_cpu = PCPU_GET(cpuid); 1708 break; 1709 case PRI_TIMESHARE: 1710 if (SCHED_CURR(kg, ke)) 1711 ke->ke_runq = kseq->ksq_curr; 1712 else 1713 ke->ke_runq = kseq->ksq_next; 1714 break; 1715 case PRI_IDLE: 1716 /* 1717 * This is for priority prop. 1718 */ 1719 if (ke->ke_thread->td_priority < PRI_MIN_IDLE) 1720 ke->ke_runq = kseq->ksq_curr; 1721 else 1722 ke->ke_runq = &kseq->ksq_idle; 1723 ke->ke_slice = SCHED_SLICE_MIN; 1724 break; 1725 default: 1726 panic("Unknown pri class."); 1727 break; 1728 } 1729#ifdef SMP 1730 /* 1731 * Don't migrate running threads here. Force the long term balancer 1732 * to do it. 1733 */ 1734 canmigrate = KSE_CAN_MIGRATE(ke, class); 1735 if (ke->ke_flags & KEF_HOLD) { 1736 ke->ke_flags &= ~KEF_HOLD; 1737 canmigrate = 0; 1738 } 1739 /* 1740 * If this thread is pinned or bound, notify the target cpu. 1741 */ 1742 if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) { 1743 ke->ke_runq = NULL; 1744 kseq_notify(ke, ke->ke_cpu); 1745 return; 1746 } 1747 /* 1748 * If we had been idle, clear our bit in the group and potentially 1749 * the global bitmap. If not, see if we should transfer this thread. 1750 */ 1751 if ((class == PRI_TIMESHARE || class == PRI_REALTIME) && 1752 (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) { 1753 /* 1754 * Check to see if our group is unidling, and if so, remove it 1755 * from the global idle mask. 1756 */ 1757 if (kseq->ksq_group->ksg_idlemask == 1758 kseq->ksq_group->ksg_cpumask) 1759 atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask); 1760 /* 1761 * Now remove ourselves from the group specific idle mask. 1762 */ 1763 kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask); 1764 } else if (kseq->ksq_load > 1 && canmigrate) 1765 if (kseq_transfer(kseq, ke, class)) 1766 return; 1767 ke->ke_cpu = PCPU_GET(cpuid); 1768#endif 1769 /* 1770 * XXX With preemption this is not necessary. 1771 */ 1772 if (td->td_priority < curthread->td_priority && 1773 ke->ke_runq == kseq->ksq_curr) 1774 curthread->td_flags |= TDF_NEEDRESCHED; 1775 if (preemptive && maybe_preempt(td)) 1776 return; 1777 ke->ke_ksegrp->kg_runq_threads++; 1778 ke->ke_state = KES_ONRUNQ; 1779 1780 kseq_runq_add(kseq, ke); 1781 kseq_load_add(kseq, ke); 1782} 1783 1784void 1785sched_rem(struct thread *td) 1786{ 1787 struct kseq *kseq; 1788 struct kse *ke; 1789 1790 ke = td->td_kse; 1791 /* 1792 * It is safe to just return here because sched_rem() is only ever 1793 * used in places where we're immediately going to add the 1794 * kse back on again. In that case it'll be added with the correct 1795 * thread and priority when the caller drops the sched_lock. 1796 */ 1797 if (ke->ke_flags & KEF_ASSIGNED) 1798 return; 1799 mtx_assert(&sched_lock, MA_OWNED); 1800 KASSERT((ke->ke_state == KES_ONRUNQ), 1801 ("sched_rem: KSE not on run queue")); 1802 1803 ke->ke_state = KES_THREAD; 1804 ke->ke_ksegrp->kg_runq_threads--; 1805 kseq = KSEQ_CPU(ke->ke_cpu); 1806 kseq_runq_rem(kseq, ke); 1807 kseq_load_rem(kseq, ke); 1808} 1809 1810fixpt_t 1811sched_pctcpu(struct thread *td) 1812{ 1813 fixpt_t pctcpu; 1814 struct kse *ke; 1815 1816 pctcpu = 0; 1817 ke = td->td_kse; 1818 if (ke == NULL) 1819 return (0); 1820 1821 mtx_lock_spin(&sched_lock); 1822 if (ke->ke_ticks) { 1823 int rtick; 1824 1825 /* 1826 * Don't update more frequently than twice a second. Allowing 1827 * this causes the cpu usage to decay away too quickly due to 1828 * rounding errors. 1829 */ 1830 if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick || 1831 ke->ke_ltick < (ticks - (hz / 2))) 1832 sched_pctcpu_update(ke); 1833 /* How many rtick per second ? */ 1834 rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS); 1835 pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT; 1836 } 1837 1838 ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick; 1839 mtx_unlock_spin(&sched_lock); 1840 1841 return (pctcpu); 1842} 1843 1844void 1845sched_bind(struct thread *td, int cpu) 1846{ 1847 struct kse *ke; 1848 1849 mtx_assert(&sched_lock, MA_OWNED); 1850 ke = td->td_kse; 1851 ke->ke_flags |= KEF_BOUND; 1852#ifdef SMP 1853 if (PCPU_GET(cpuid) == cpu) 1854 return; 1855 /* sched_rem without the runq_remove */ 1856 ke->ke_state = KES_THREAD; 1857 ke->ke_ksegrp->kg_runq_threads--; 1858 kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke); 1859 kseq_notify(ke, cpu); 1860 /* When we return from mi_switch we'll be on the correct cpu. */ 1861 mi_switch(SW_VOL, NULL); 1862#endif 1863} 1864 1865void 1866sched_unbind(struct thread *td) 1867{ 1868 mtx_assert(&sched_lock, MA_OWNED); 1869 td->td_kse->ke_flags &= ~KEF_BOUND; 1870} 1871 1872int 1873sched_load(void) 1874{ 1875#ifdef SMP 1876 int total; 1877 int i; 1878 1879 total = 0; 1880 for (i = 0; i <= ksg_maxid; i++) 1881 total += KSEQ_GROUP(i)->ksg_load; 1882 return (total); 1883#else 1884 return (KSEQ_SELF()->ksq_sysload); 1885#endif 1886} 1887 1888int 1889sched_sizeof_ksegrp(void) 1890{ 1891 return (sizeof(struct ksegrp) + sizeof(struct kg_sched)); 1892} 1893 1894int 1895sched_sizeof_proc(void) 1896{ 1897 return (sizeof(struct proc)); 1898} 1899 1900int 1901sched_sizeof_thread(void) 1902{ 1903 return (sizeof(struct thread) + sizeof(struct td_sched)); 1904} 1905 1906void 1907sched_pin(void) 1908{ 1909 curthread->td_sched->ke_pinned++; 1910} 1911 1912 void 1913sched_unpin(void) 1914{ 1915 curthread->td_sched->ke_pinned--; 1916} 1917 1918#ifdef INVARIANTS 1919int 1920sched_ispinned(void) 1921{ 1922 return (curthread->td_sched->ke_pinned); 1923} 1924#endif 1925 1926#define KERN_SWITCH_INCLUDE 1 1927#include "kern/kern_switch.c" 1928