Deleted Added
full compact
sched_4bsd.c (134586) sched_4bsd.c (134591)
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 134586 2004-09-01 02:11:28Z julian $");
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 134591 2004-09-01 06:42:02Z julian $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/kthread.h>
44#include <sys/mutex.h>

--- 648 unchanged lines hidden (view full) ---

693 kg->kg_slptime = 0;
694 setrunqueue(td, SRQ_BORING);
695}
696
697void
698sched_add(struct thread *td, int flags)
699{
700 struct kse *ke;
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/kthread.h>
44#include <sys/mutex.h>

--- 648 unchanged lines hidden (view full) ---

693 kg->kg_slptime = 0;
694 setrunqueue(td, SRQ_BORING);
695}
696
697void
698sched_add(struct thread *td, int flags)
699{
700 struct kse *ke;
701#ifdef SMP
702 int forwarded = 0;
703 int cpu;
704#endif
701
702 ke = td->td_kse;
703 mtx_assert(&sched_lock, MA_OWNED);
704 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
705 KASSERT((ke->ke_thread->td_kse != NULL),
706 ("sched_add: No KSE on thread"));
707 KASSERT(ke->ke_state != KES_ONRUNQ,
708 ("sched_add: kse %p (%s) already in run queue", ke,
709 ke->ke_proc->p_comm));
710 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
711 ("sched_add: process swapped out"));
712
713#ifdef SMP
705
706 ke = td->td_kse;
707 mtx_assert(&sched_lock, MA_OWNED);
708 KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
709 KASSERT((ke->ke_thread->td_kse != NULL),
710 ("sched_add: No KSE on thread"));
711 KASSERT(ke->ke_state != KES_ONRUNQ,
712 ("sched_add: kse %p (%s) already in run queue", ke,
713 ke->ke_proc->p_comm));
714 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
715 ("sched_add: process swapped out"));
716
717#ifdef SMP
714 /*
715 * Only try to preempt if the thread is unpinned or pinned to the
716 * current CPU.
717 */
718 if (KSE_CAN_MIGRATE(ke) || ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)])
719#endif
720 /*
721 * Don't try preempt if we are already switching.
722 * all hell might break loose.
723 */
724 if ((flags & SRQ_YIELDING) == 0)
725 if (maybe_preempt(td))
726 return;
727
728#ifdef SMP
729 if (KSE_CAN_MIGRATE(ke)) {
718 if (KSE_CAN_MIGRATE(ke)) {
730 CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
719 CTR2(KTR_RUNQ,
720 "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
721 cpu = NOCPU;
731 ke->ke_runq = &runq;
732 } else {
722 ke->ke_runq = &runq;
723 } else {
733 CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p)to pcpu runq", ke, td);
734 if (!SKE_RUNQ_PCPU(ke))
724 if (!SKE_RUNQ_PCPU(ke))
735 ke->ke_runq = &runq_pcpu[PCPU_GET(cpuid)];
725 ke->ke_runq = &runq_pcpu[(cpu = PCPU_GET(cpuid))];
726 else
727 cpu = td->td_lastcpu;
728 CTR3(KTR_RUNQ,
729 "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
736 }
737#else
738 CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
739 ke->ke_runq = &runq;
730 }
731#else
732 CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
733 ke->ke_runq = &runq;
734
740#endif
735#endif
736 /*
737 * If we are yielding (on the way out anyhow)
738 * or the thread being saved is US,
739 * then don't try be smart about preemption
740 * or kicking off another CPU
741 * as it won't help and may hinder.
742 * In the YIEDLING case, we are about to run whoever is
743 * being put in the queue anyhow, and in the
744 * OURSELF case, we are puting ourself on the run queue
745 * which also only happens when we are about to yield.
746 */
747 if((flags & SRQ_YIELDING) == 0) {
748#ifdef SMP
749 cpumask_t me = PCPU_GET(cpumask);
750 int idle = idle_cpus_mask & me;
751 /*
752 * Only try to kick off another CPU if
753 * the thread is unpinned
754 * or pinned to another cpu,
755 * and there are other available and idle CPUs.
756 * if we are idle, then skip straight to preemption.
757 */
758 if ( (! idle) &&
759 (idle_cpus_mask & ~(hlt_cpus_mask | me)) &&
760 ( KSE_CAN_MIGRATE(ke) ||
761 ke->ke_runq != &runq_pcpu[PCPU_GET(cpuid)])) {
762 forwarded = forward_wakeup(cpu);
763 }
764 /*
765 * If we failed to kick off another cpu, then look to
766 * see if we should preempt this CPU. Only allow this
767 * if it is not pinned or IS pinned to this CPU.
768 * If we are the idle thread, we also try do preempt.
769 * as it will be quicker and being idle, we won't
770 * lose in doing so..
771 */
772 if ((!forwarded) &&
773 (ke->ke_runq == &runq ||
774 ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]))
775#endif
776
777 {
778 if (maybe_preempt(td))
779 return;
780 }
781 }
741 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
742 sched_tdcnt++;
743 runq_add(ke->ke_runq, ke);
744 ke->ke_ksegrp->kg_runq_kses++;
745 ke->ke_state = KES_ONRUNQ;
746 maybe_resched(td);
747}
748

--- 155 unchanged lines hidden ---
782 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
783 sched_tdcnt++;
784 runq_add(ke->ke_runq, ke);
785 ke->ke_ksegrp->kg_runq_kses++;
786 ke->ke_state = KES_ONRUNQ;
787 maybe_resched(td);
788}
789

--- 155 unchanged lines hidden ---