Deleted Added
full compact
subr_sleepqueue.c (131473) subr_sleepqueue.c (134013)
1/*
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include "opt_sleepqueue_profiling.h"
63
64#include <sys/cdefs.h>
1/*
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 48 unchanged lines hidden (view full) ---

57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
59 * variables.
60 */
61
62#include "opt_sleepqueue_profiling.h"
63
64#include <sys/cdefs.h>
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 131473 2004-07-02 19:09:50Z jhb $");
65__FBSDID("$FreeBSD: head/sys/kern/subr_sleepqueue.c 134013 2004-08-19 11:31:42Z jhb $");
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/malloc.h>
73#include <sys/mutex.h>

--- 34 unchanged lines hidden (view full) ---

108 * Locking key:
109 * c - sleep queue chain lock
110 */
111struct sleepqueue {
112 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */
113 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
114 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
115 void *sq_wchan; /* (c) Wait channel. */
66
67#include <sys/param.h>
68#include <sys/systm.h>
69#include <sys/lock.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/malloc.h>
73#include <sys/mutex.h>

--- 34 unchanged lines hidden (view full) ---

108 * Locking key:
109 * c - sleep queue chain lock
110 */
111struct sleepqueue {
112 TAILQ_HEAD(, thread) sq_blocked; /* (c) Blocked threads. */
113 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
114 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
115 void *sq_wchan; /* (c) Wait channel. */
116 int sq_flags; /* (c) Flags. */
116 int sq_type; /* (c) Queue type. */
117#ifdef INVARIANTS
118 struct mtx *sq_lock; /* (c) Associated lock. */
119#endif
120};
121
122struct sleepqueue_chain {
123 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
124 struct mtx sc_lock; /* Spin lock for this chain. */

--- 149 unchanged lines hidden (view full) ---

274 ("thread's sleep queue has a non-empty queue"));
275 KASSERT(LIST_EMPTY(&sq->sq_free),
276 ("thread's sleep queue has a non-empty free list"));
277 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
278 sq->sq_wchan = wchan;
279#ifdef INVARIANTS
280 sq->sq_lock = lock;
281#endif
117#ifdef INVARIANTS
118 struct mtx *sq_lock; /* (c) Associated lock. */
119#endif
120};
121
122struct sleepqueue_chain {
123 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
124 struct mtx sc_lock; /* Spin lock for this chain. */

--- 149 unchanged lines hidden (view full) ---

274 ("thread's sleep queue has a non-empty queue"));
275 KASSERT(LIST_EMPTY(&sq->sq_free),
276 ("thread's sleep queue has a non-empty free list"));
277 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
278 sq->sq_wchan = wchan;
279#ifdef INVARIANTS
280 sq->sq_lock = lock;
281#endif
282 sq->sq_flags = flags;
282 sq->sq_type = flags & SLEEPQ_TYPE;
283 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
284 } else {
285 MPASS(wchan == sq->sq_wchan);
286 MPASS(lock == sq->sq_lock);
287 TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq)
288 if (td1->td_priority > td->td_priority)
289 break;
290 if (td1 != NULL)
291 TAILQ_INSERT_BEFORE(td1, td, td_slpq);
292 else
293 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
294 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
295 }
296 td->td_sleepqueue = NULL;
297 mtx_lock_spin(&sched_lock);
298 td->td_wchan = wchan;
299 td->td_wmesg = wmesg;
283 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
284 } else {
285 MPASS(wchan == sq->sq_wchan);
286 MPASS(lock == sq->sq_lock);
287 TAILQ_FOREACH(td1, &sq->sq_blocked, td_slpq)
288 if (td1->td_priority > td->td_priority)
289 break;
290 if (td1 != NULL)
291 TAILQ_INSERT_BEFORE(td1, td, td_slpq);
292 else
293 TAILQ_INSERT_TAIL(&sq->sq_blocked, td, td_slpq);
294 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
295 }
296 td->td_sleepqueue = NULL;
297 mtx_lock_spin(&sched_lock);
298 td->td_wchan = wchan;
299 td->td_wmesg = wmesg;
300 if (flags & SLEEPQ_INTERRUPTIBLE)
301 td->td_flags |= TDF_SINTR;
300 mtx_unlock_spin(&sched_lock);
301}
302
303/*
304 * Sets a timeout that will remove the current thread from the specified
305 * sleep queue after timo ticks if the thread has not already been awakened.
306 */
307void

--- 32 unchanged lines hidden (view full) ---

340 sc = SC_LOOKUP(wchan);
341 mtx_assert(&sc->sc_lock, MA_OWNED);
342 MPASS(td->td_sleepqueue == NULL);
343 MPASS(wchan != NULL);
344 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
345 (void *)td, (long)p->p_pid, p->p_comm);
346
347 /* Mark thread as being in an interruptible sleep. */
302 mtx_unlock_spin(&sched_lock);
303}
304
305/*
306 * Sets a timeout that will remove the current thread from the specified
307 * sleep queue after timo ticks if the thread has not already been awakened.
308 */
309void

--- 32 unchanged lines hidden (view full) ---

342 sc = SC_LOOKUP(wchan);
343 mtx_assert(&sc->sc_lock, MA_OWNED);
344 MPASS(td->td_sleepqueue == NULL);
345 MPASS(wchan != NULL);
346 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
347 (void *)td, (long)p->p_pid, p->p_comm);
348
349 /* Mark thread as being in an interruptible sleep. */
348 mtx_lock_spin(&sched_lock);
350 MPASS(td->td_flags & TDF_SINTR);
349 MPASS(TD_ON_SLEEPQ(td));
351 MPASS(TD_ON_SLEEPQ(td));
350 td->td_flags |= TDF_SINTR;
351 mtx_unlock_spin(&sched_lock);
352 sleepq_release(wchan);
353
354 /* See if there are any pending signals for this thread. */
355 PROC_LOCK(p);
356 mtx_lock(&p->p_sigacts->ps_mtx);
357 sig = cursig(td);
358 mtx_unlock(&p->p_sigacts->ps_mtx);
359 if (sig == 0 && thread_suspend_check(1))
360 sig = SIGSTOP;
361 else
362 do_upcall = thread_upcall_check(td);
363 PROC_UNLOCK(p);
364
365 /*
366 * If there were pending signals and this thread is still on
352 sleepq_release(wchan);
353
354 /* See if there are any pending signals for this thread. */
355 PROC_LOCK(p);
356 mtx_lock(&p->p_sigacts->ps_mtx);
357 sig = cursig(td);
358 mtx_unlock(&p->p_sigacts->ps_mtx);
359 if (sig == 0 && thread_suspend_check(1))
360 sig = SIGSTOP;
361 else
362 do_upcall = thread_upcall_check(td);
363 PROC_UNLOCK(p);
364
365 /*
366 * If there were pending signals and this thread is still on
367 * the sleep queue, remove it from the sleep queue.
367 * the sleep queue, remove it from the sleep queue. If the
368 * thread was removed from the sleep queue while we were blocked
369 * above, then clear TDF_SINTR before returning.
368 */
369 sq = sleepq_lookup(wchan);
370 mtx_lock_spin(&sched_lock);
371 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
372 mtx_unlock_spin(&sched_lock);
373 sleepq_remove_thread(sq, td);
370 */
371 sq = sleepq_lookup(wchan);
372 mtx_lock_spin(&sched_lock);
373 if (TD_ON_SLEEPQ(td) && (sig != 0 || do_upcall != 0)) {
374 mtx_unlock_spin(&sched_lock);
375 sleepq_remove_thread(sq, td);
374 } else
376 } else {
377 if (!TD_ON_SLEEPQ(td) && sig == 0)
378 td->td_flags &= ~TDF_SINTR;
375 mtx_unlock_spin(&sched_lock);
379 mtx_unlock_spin(&sched_lock);
380 }
376 return (sig);
377}
378
379/*
380 * Switches to another thread if we are still asleep on a sleep queue and
381 * drop the lock on the sleepqueue chain. Returns with sched_lock held.
382 */
383static void

--- 76 unchanged lines hidden (view full) ---

460static int
461sleepq_check_signals(void)
462{
463 struct thread *td;
464
465 mtx_assert(&sched_lock, MA_OWNED);
466 td = curthread;
467
381 return (sig);
382}
383
384/*
385 * Switches to another thread if we are still asleep on a sleep queue and
386 * drop the lock on the sleepqueue chain. Returns with sched_lock held.
387 */
388static void

--- 76 unchanged lines hidden (view full) ---

465static int
466sleepq_check_signals(void)
467{
468 struct thread *td;
469
470 mtx_assert(&sched_lock, MA_OWNED);
471 td = curthread;
472
473 /*
474 * If TDF_SINTR is clear, then we were awakened while executing
475 * sleepq_catch_signals().
476 */
477 if (!(td->td_flags & TDF_SINTR))
478 return (0);
479
468 /* We are no longer in an interruptible sleep. */
469 td->td_flags &= ~TDF_SINTR;
470
471 if (td->td_flags & TDF_INTERRUPT)
472 return (td->td_intrval);
473 return (0);
474}
475

--- 32 unchanged lines hidden (view full) ---

508
509/*
510 * Block the current thread until it is awakened from its sleep queue.
511 */
512void
513sleepq_wait(void *wchan)
514{
515
480 /* We are no longer in an interruptible sleep. */
481 td->td_flags &= ~TDF_SINTR;
482
483 if (td->td_flags & TDF_INTERRUPT)
484 return (td->td_intrval);
485 return (0);
486}
487

--- 32 unchanged lines hidden (view full) ---

520
521/*
522 * Block the current thread until it is awakened from its sleep queue.
523 */
524void
525sleepq_wait(void *wchan)
526{
527
528 MPASS(!(curthread->td_flags & TDF_SINTR));
516 sleepq_switch(wchan);
517 mtx_unlock_spin(&sched_lock);
518}
519
520/*
521 * Block the current thread until it is awakened from its sleep queue
522 * or it is interrupted by a signal.
523 */

--- 12 unchanged lines hidden (view full) ---

536 * Block the current thread until it is awakened from its sleep queue
537 * or it times out while waiting.
538 */
539int
540sleepq_timedwait(void *wchan)
541{
542 int rval;
543
529 sleepq_switch(wchan);
530 mtx_unlock_spin(&sched_lock);
531}
532
533/*
534 * Block the current thread until it is awakened from its sleep queue
535 * or it is interrupted by a signal.
536 */

--- 12 unchanged lines hidden (view full) ---

549 * Block the current thread until it is awakened from its sleep queue
550 * or it times out while waiting.
551 */
552int
553sleepq_timedwait(void *wchan)
554{
555 int rval;
556
557 MPASS(!(curthread->td_flags & TDF_SINTR));
544 sleepq_switch(wchan);
545 rval = sleepq_check_timeout();
546 mtx_unlock_spin(&sched_lock);
547 return (rval);
548}
549
550/*
551 * Block the current thread until it is awakened from its sleep queue,

--- 92 unchanged lines hidden (view full) ---

644
645 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
646 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
647 sq = sleepq_lookup(wchan);
648 if (sq == NULL) {
649 sleepq_release(wchan);
650 return;
651 }
558 sleepq_switch(wchan);
559 rval = sleepq_check_timeout();
560 mtx_unlock_spin(&sched_lock);
561 return (rval);
562}
563
564/*
565 * Block the current thread until it is awakened from its sleep queue,

--- 92 unchanged lines hidden (view full) ---

658
659 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
660 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
661 sq = sleepq_lookup(wchan);
662 if (sq == NULL) {
663 sleepq_release(wchan);
664 return;
665 }
652 KASSERT(sq->sq_flags == flags,
666 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
653 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
654 /* XXX: Do for all sleep queues eventually. */
655 if (flags & SLEEPQ_CONDVAR)
656 mtx_assert(sq->sq_lock, MA_OWNED);
657
658 /* Remove first thread from queue and awaken it. */
659 td = TAILQ_FIRST(&sq->sq_blocked);
660 sleepq_remove_thread(sq, td);

--- 13 unchanged lines hidden (view full) ---

674
675 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
676 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
677 sq = sleepq_lookup(wchan);
678 if (sq == NULL) {
679 sleepq_release(wchan);
680 return;
681 }
667 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
668 /* XXX: Do for all sleep queues eventually. */
669 if (flags & SLEEPQ_CONDVAR)
670 mtx_assert(sq->sq_lock, MA_OWNED);
671
672 /* Remove first thread from queue and awaken it. */
673 td = TAILQ_FIRST(&sq->sq_blocked);
674 sleepq_remove_thread(sq, td);

--- 13 unchanged lines hidden (view full) ---

688
689 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
690 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
691 sq = sleepq_lookup(wchan);
692 if (sq == NULL) {
693 sleepq_release(wchan);
694 return;
695 }
682 KASSERT(sq->sq_flags == flags,
696 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
683 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
684 /* XXX: Do for all sleep queues eventually. */
685 if (flags & SLEEPQ_CONDVAR)
686 mtx_assert(sq->sq_lock, MA_OWNED);
687
688 /* Move blocked threads from the sleep queue to a temporary list. */
689 TAILQ_INIT(&list);
690 while (!TAILQ_EMPTY(&sq->sq_blocked)) {

--- 146 unchanged lines hidden ---
697 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
698 /* XXX: Do for all sleep queues eventually. */
699 if (flags & SLEEPQ_CONDVAR)
700 mtx_assert(sq->sq_lock, MA_OWNED);
701
702 /* Move blocked threads from the sleep queue to a temporary list. */
703 TAILQ_INIT(&list);
704 while (!TAILQ_EMPTY(&sq->sq_blocked)) {

--- 146 unchanged lines hidden ---