Deleted Added
full compact
subr_sleepqueue.c (141616) subr_sleepqueue.c (145056)
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include "opt_sleepqueue_profiling.h"
63
64#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include "opt_sleepqueue_profiling.h"
63
64#include <sys/cdefs.h>
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 141616 2005-02-10 12:02:37Z phk $");
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 145056 2005-04-14 06:30:32Z jhb $");
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/malloc.h>
73#include <sys/mutex.h>

--- 67 unchanged lines hidden (view full) ---

141static MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142
143/*
144 * Prototypes for non-exported routines.
145 */
146static int sleepq_check_timeout(void);
147static void sleepq_switch(void *wchan);
148static void sleepq_timeout(void *arg);
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/malloc.h>
73#include <sys/mutex.h>

--- 67 unchanged lines hidden (view full) ---

141static MALLOC_DEFINE(M_SLEEPQUEUE, "sleep queues", "sleep queues");
142
143/*
144 * Prototypes for non-exported routines.
145 */
146static int sleepq_check_timeout(void);
147static void sleepq_switch(void *wchan);
148static void sleepq_timeout(void *arg);
149static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
150static void sleepq_resume_thread(struct thread *td, int pri);
149static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
151
152/*
153 * Early initialization of sleep queues that is called from the sleepinit()
154 * SYSINIT.
155 */
156void
157init_sleepqueues(void)
158{

--- 220 unchanged lines hidden (view full) ---

379 * If there were pending signals and this thread is still on
380 * the sleep queue, remove it from the sleep queue. If the
381 * thread was removed from the sleep queue while we were blocked
382 * above, then clear TDF_SINTR before returning.
383 */
384 sleepq_lock(wchan);
385 sq = sleepq_lookup(wchan);
386 mtx_lock_spin(&sched_lock);
150
151/*
152 * Early initialization of sleep queues that is called from the sleepinit()
153 * SYSINIT.
154 */
155void
156init_sleepqueues(void)
157{

--- 220 unchanged lines hidden (view full) ---

378 * If there were pending signals and this thread is still on
379 * the sleep queue, remove it from the sleep queue. If the
380 * thread was removed from the sleep queue while we were blocked
381 * above, then clear TDF_SINTR before returning.
382 */
383 sleepq_lock(wchan);
384 sq = sleepq_lookup(wchan);
385 mtx_lock_spin(&sched_lock);
387 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
388 mtx_unlock_spin(&sched_lock);
389 sleepq_remove_thread(sq, td);
390 } else {
391 if (!TD_ON_SLEEPQ(td) && sig == 0)
392 td->td_flags &= ~TDF_SINTR;
393 mtx_unlock_spin(&sched_lock);
394 }
386 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0))
387 sleepq_resume_thread(sq, td, -1);
388 else if (!TD_ON_SLEEPQ(td) && sig == 0)
389 td->td_flags &= ~TDF_SINTR;
390 mtx_unlock_spin(&sched_lock);
395 return (sig);
396}
397
398/*
399 * Switches to another thread if we are still asleep on a sleep queue and
400 * drop the lock on the sleep queue chain. Returns with sched_lock held.
401 */
402static void

--- 187 unchanged lines hidden (view full) ---

590 mtx_unlock_spin(&sched_lock);
591 if (signal_caught || rvalt == 0)
592 return (rvals);
593 else
594 return (rvalt);
595}
596
597/*
391 return (sig);
392}
393
394/*
395 * Switches to another thread if we are still asleep on a sleep queue and
396 * drop the lock on the sleep queue chain. Returns with sched_lock held.
397 */
398static void

--- 187 unchanged lines hidden (view full) ---

586 mtx_unlock_spin(&sched_lock);
587 if (signal_caught || rvalt == 0)
588 return (rvals);
589 else
590 return (rvalt);
591}
592
593/*
598 * Removes a thread from a sleep queue.
594 * Removes a thread from a sleep queue and makes it
595 * runnable.
599 */
600static void
596 */
597static void
601sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
598sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
602{
603 struct sleepqueue_chain *sc;
604
605 MPASS(td != NULL);
606 MPASS(sq->sq_wchan != NULL);
607 MPASS(td->td_wchan == sq->sq_wchan);
608 sc = SC_LOOKUP(sq->sq_wchan);
609 mtx_assert(&sc->sc_lock, MA_OWNED);
599{
600 struct sleepqueue_chain *sc;
601
602 MPASS(td != NULL);
603 MPASS(sq->sq_wchan != NULL);
604 MPASS(td->td_wchan == sq->sq_wchan);
605 sc = SC_LOOKUP(sq->sq_wchan);
606 mtx_assert(&sc->sc_lock, MA_OWNED);
607 mtx_assert(&sched_lock, MA_OWNED);
610
611 /* Remove the thread from the queue. */
612 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
613
614 /*
615 * Get a sleep queue for this thread. If this is the last waiter,
616 * use the queue itself and take it out of the chain, otherwise,
617 * remove a queue from the free list.

--- 5 unchanged lines hidden (view full) ---

623#endif
624#ifdef SLEEPQUEUE_PROFILING
625 sc->sc_depth--;
626#endif
627 } else
628 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
629 LIST_REMOVE(td->td_sleepqueue, sq_hash);
630
608
609 /* Remove the thread from the queue. */
610 TAILQ_REMOVE(&sq->sq_blocked, td, td_slpq);
611
612 /*
613 * Get a sleep queue for this thread. If this is the last waiter,
614 * use the queue itself and take it out of the chain, otherwise,
615 * remove a queue from the free list.

--- 5 unchanged lines hidden (view full) ---

621#endif
622#ifdef SLEEPQUEUE_PROFILING
623 sc->sc_depth--;
624#endif
625 } else
626 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
627 LIST_REMOVE(td->td_sleepqueue, sq_hash);
628
631 mtx_lock_spin(&sched_lock);
632 td->td_wmesg = NULL;
633 td->td_wchan = NULL;
629 td->td_wmesg = NULL;
630 td->td_wchan = NULL;
634 mtx_unlock_spin(&sched_lock);
635}
636
631
637/*
638 * Resumes a thread that was asleep on a queue.
639 */
640static void
641sleepq_resume_thread(struct thread *td, int pri)
642{
643
644 /*
645 * Note that thread td might not be sleeping if it is running
646 * sleepq_catch_signals() on another CPU or is blocked on
647 * its proc lock to check signals. It doesn't hurt to clear
648 * the sleeping flag if it isn't set though, so we just always
649 * do it. However, we can't assert that it is set.
650 */
632 /*
633 * Note that thread td might not be sleeping if it is running
634 * sleepq_catch_signals() on another CPU or is blocked on
635 * its proc lock to check signals. It doesn't hurt to clear
636 * the sleeping flag if it isn't set though, so we just always
637 * do it. However, we can't assert that it is set.
638 */
651 mtx_lock_spin(&sched_lock);
652 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
653 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
654 TD_CLR_SLEEPING(td);
655
656 /* Adjust priority if requested. */
657 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
658 if (pri != -1 && td->td_priority > pri)
659 sched_prio(td, pri);
660 setrunnable(td);
639 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
640 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
641 TD_CLR_SLEEPING(td);
642
643 /* Adjust priority if requested. */
644 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
645 if (pri != -1 && td->td_priority > pri)
646 sched_prio(td, pri);
647 setrunnable(td);
661 mtx_unlock_spin(&sched_lock);
662}
663
664/*
665 * Find the highest priority thread sleeping on a wait channel and resume it.
666 */
667void
668sleepq_signal(void *wchan, int flags, int pri)
669{

--- 17 unchanged lines hidden (view full) ---

687 * the tail of sleep queues.
688 */
689 besttd = NULL;
690 TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
691 if (besttd == NULL || td->td_priority < besttd->td_priority)
692 besttd = td;
693 }
694 MPASS(besttd != NULL);
648}
649
650/*
651 * Find the highest priority thread sleeping on a wait channel and resume it.
652 */
653void
654sleepq_signal(void *wchan, int flags, int pri)
655{

--- 17 unchanged lines hidden (view full) ---

673 * the tail of sleep queues.
674 */
675 besttd = NULL;
676 TAILQ_FOREACH(td, &sq->sq_blocked, td_slpq) {
677 if (besttd == NULL || td->td_priority < besttd->td_priority)
678 besttd = td;
679 }
680 MPASS(besttd != NULL);
695 sleepq_remove_thread(sq, besttd);
681 mtx_lock_spin(&sched_lock);
682 sleepq_resume_thread(sq, besttd, pri);
683 mtx_unlock_spin(&sched_lock);
696 sleepq_release(wchan);
684 sleepq_release(wchan);
697 sleepq_resume_thread(besttd, pri);
698}
699
700/*
701 * Resume all threads sleeping on a specified wait channel.
702 */
703void
704sleepq_broadcast(void *wchan, int flags, int pri)
705{
685}
686
687/*
688 * Resume all threads sleeping on a specified wait channel.
689 */
690void
691sleepq_broadcast(void *wchan, int flags, int pri)
692{
706 TAILQ_HEAD(, thread) list;
707 struct sleepqueue *sq;
693 struct sleepqueue *sq;
708 struct thread *td;
709
710 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
711 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
712 sq = sleepq_lookup(wchan);
713 if (sq == NULL) {
714 sleepq_release(wchan);
715 return;
716 }
717 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
718 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
719
694
695 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
696 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
697 sq = sleepq_lookup(wchan);
698 if (sq == NULL) {
699 sleepq_release(wchan);
700 return;
701 }
702 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
703 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
704
720 /* Move blocked threads from the sleep queue to a temporary list. */
721 TAILQ_INIT(&list);
722 while (!TAILQ_EMPTY(&sq->sq_blocked)) {
723 td = TAILQ_FIRST(&sq->sq_blocked);
724 sleepq_remove_thread(sq, td);
725 TAILQ_INSERT_TAIL(&list, td, td_slpq);
726 }
705 /* Resume all blocked threads on the sleep queue. */
706 mtx_lock_spin(&sched_lock);
707 while (!TAILQ_EMPTY(&sq->sq_blocked))
708 sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked), pri);
709 mtx_unlock_spin(&sched_lock);
727 sleepq_release(wchan);
710 sleepq_release(wchan);
728
729 /* Resume all the threads on the temporary list. */
730 while (!TAILQ_EMPTY(&list)) {
731 td = TAILQ_FIRST(&list);
732 TAILQ_REMOVE(&list, td, td_slpq);
733 sleepq_resume_thread(td, pri);
734 }
735}
736
737/*
738 * Time sleeping threads out. When the timeout expires, the thread is
739 * removed from the sleep queue and made runnable if it is still asleep.
740 */
741static void
742sleepq_timeout(void *arg)

--- 31 unchanged lines hidden (view full) ---

774 * call to callout_stop() to stop this routine would have failed
775 * meaning that it would have already set TDF_TIMEOUT to
776 * synchronize with this function.
777 */
778 if (TD_ON_SLEEPQ(td)) {
779 MPASS(td->td_wchan == wchan);
780 MPASS(sq != NULL);
781 td->td_flags |= TDF_TIMEOUT;
711}
712
713/*
714 * Time sleeping threads out. When the timeout expires, the thread is
715 * removed from the sleep queue and made runnable if it is still asleep.
716 */
717static void
718sleepq_timeout(void *arg)

--- 31 unchanged lines hidden (view full) ---

750 * call to callout_stop() to stop this routine would have failed
751 * meaning that it would have already set TDF_TIMEOUT to
752 * synchronize with this function.
753 */
754 if (TD_ON_SLEEPQ(td)) {
755 MPASS(td->td_wchan == wchan);
756 MPASS(sq != NULL);
757 td->td_flags |= TDF_TIMEOUT;
758 sleepq_resume_thread(sq, td, -1);
782 mtx_unlock_spin(&sched_lock);
759 mtx_unlock_spin(&sched_lock);
783 sleepq_remove_thread(sq, td);
784 sleepq_release(wchan);
760 sleepq_release(wchan);
785 sleepq_resume_thread(td, -1);
786 return;
787 } else if (wchan != NULL)
788 sleepq_release(wchan);
789
790 /*
791 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
792 * then the other thread has already yielded to us, so clear
793 * the flag and resume it. If TDF_TIMEOUT is not set, then the

--- 30 unchanged lines hidden (view full) ---

824 sleepq_lock(wchan);
825 sq = sleepq_lookup(wchan);
826 mtx_lock_spin(&sched_lock);
827 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
828 mtx_unlock_spin(&sched_lock);
829 sleepq_release(wchan);
830 return;
831 }
761 return;
762 } else if (wchan != NULL)
763 sleepq_release(wchan);
764
765 /*
766 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
767 * then the other thread has already yielded to us, so clear
768 * the flag and resume it. If TDF_TIMEOUT is not set, then the

--- 30 unchanged lines hidden (view full) ---

799 sleepq_lock(wchan);
800 sq = sleepq_lookup(wchan);
801 mtx_lock_spin(&sched_lock);
802 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
803 mtx_unlock_spin(&sched_lock);
804 sleepq_release(wchan);
805 return;
806 }
832 mtx_unlock_spin(&sched_lock);
833 MPASS(sq != NULL);
834
835 /* Thread is asleep on sleep queue sq, so wake it up. */
807 MPASS(sq != NULL);
808
809 /* Thread is asleep on sleep queue sq, so wake it up. */
836 sleepq_remove_thread(sq, td);
810 sleepq_resume_thread(sq, td, -1);
837 sleepq_release(wchan);
811 sleepq_release(wchan);
838 sleepq_resume_thread(td, -1);
812 mtx_unlock_spin(&sched_lock);
839}
840
841/*
842 * Abort a thread as if an interrupt had occurred. Only abort
843 * interruptible waits (unfortunately it isn't safe to abort others).
844 *
845 * XXX: What in the world does the comment below mean?
846 * Also, whatever the signal code does...

--- 24 unchanged lines hidden ---
813}
814
815/*
816 * Abort a thread as if an interrupt had occurred. Only abort
817 * interruptible waits (unfortunately it isn't safe to abort others).
818 *
819 * XXX: What in the world does the comment below mean?
820 * Also, whatever the signal code does...

--- 24 unchanged lines hidden ---