Deleted Added
full compact
subr_sleepqueue.c (315027) subr_sleepqueue.c (316120)
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 43 unchanged lines hidden (view full) ---

52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 43 unchanged lines hidden (view full) ---

52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
56 * variables.
57 */
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: stable/11/sys/kern/subr_sleepqueue.c 315027 2017-03-10 20:30:17Z vangyzen $");
60__FBSDID("$FreeBSD: stable/11/sys/kern/subr_sleepqueue.c 316120 2017-03-29 01:21:48Z vangyzen $");
61
62#include "opt_sleepqueue_profiling.h"
63#include "opt_ddb.h"
64#include "opt_sched.h"
65#include "opt_stack.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>

--- 4 unchanged lines hidden (view full) ---

73#include <sys/proc.h>
74#include <sys/sbuf.h>
75#include <sys/sched.h>
76#include <sys/sdt.h>
77#include <sys/signalvar.h>
78#include <sys/sleepqueue.h>
79#include <sys/stack.h>
80#include <sys/sysctl.h>
61
62#include "opt_sleepqueue_profiling.h"
63#include "opt_ddb.h"
64#include "opt_sched.h"
65#include "opt_stack.h"
66
67#include <sys/param.h>
68#include <sys/systm.h>

--- 4 unchanged lines hidden (view full) ---

73#include <sys/proc.h>
74#include <sys/sbuf.h>
75#include <sys/sched.h>
76#include <sys/sdt.h>
77#include <sys/signalvar.h>
78#include <sys/sleepqueue.h>
79#include <sys/stack.h>
80#include <sys/sysctl.h>
81#include <sys/time.h>
81
82
83#include <machine/atomic.h>
84
82#include <vm/uma.h>
83
84#ifdef DDB
85#include <ddb/ddb.h>
86#endif
87
88
89/*

--- 444 unchanged lines hidden (view full) ---

534 * Returns with thread lock.
535 */
536static void
537sleepq_switch(void *wchan, int pri)
538{
539 struct sleepqueue_chain *sc;
540 struct sleepqueue *sq;
541 struct thread *td;
85#include <vm/uma.h>
86
87#ifdef DDB
88#include <ddb/ddb.h>
89#endif
90
91
92/*

--- 444 unchanged lines hidden (view full) ---

537 * Returns with thread lock.
538 */
539static void
540sleepq_switch(void *wchan, int pri)
541{
542 struct sleepqueue_chain *sc;
543 struct sleepqueue *sq;
544 struct thread *td;
545 bool rtc_changed;
542
543 td = curthread;
544 sc = SC_LOOKUP(wchan);
545 mtx_assert(&sc->sc_lock, MA_OWNED);
546 THREAD_LOCK_ASSERT(td, MA_OWNED);
547
548 /*
549 * If we have a sleep queue, then we've already been woken up, so
550 * just return.
551 */
552 if (td->td_sleepqueue != NULL) {
553 mtx_unlock_spin(&sc->sc_lock);
554 return;
555 }
556
557 /*
558 * If TDF_TIMEOUT is set, then our sleep has been timed out
559 * already but we are still on the sleep queue, so dequeue the
560 * thread and return.
546
547 td = curthread;
548 sc = SC_LOOKUP(wchan);
549 mtx_assert(&sc->sc_lock, MA_OWNED);
550 THREAD_LOCK_ASSERT(td, MA_OWNED);
551
552 /*
553 * If we have a sleep queue, then we've already been woken up, so
554 * just return.
555 */
556 if (td->td_sleepqueue != NULL) {
557 mtx_unlock_spin(&sc->sc_lock);
558 return;
559 }
560
561 /*
562 * If TDF_TIMEOUT is set, then our sleep has been timed out
563 * already but we are still on the sleep queue, so dequeue the
564 * thread and return.
565 *
566 * Do the same if the real-time clock has been adjusted since this
567 * thread calculated its timeout based on that clock. This handles
568 * the following race:
569 * - The Ts thread needs to sleep until an absolute real-clock time.
570 * It copies the global rtc_generation into curthread->td_rtcgen,
571 * reads the RTC, and calculates a sleep duration based on that time.
572 * See umtxq_sleep() for an example.
573 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
574 * threads that are sleeping until an absolute real-clock time.
575 * See tc_setclock() and the POSIX specification of clock_settime().
576 * - Ts reaches the code below. It holds the sleepqueue chain lock,
577 * so Tc has finished waking, so this thread must test td_rtcgen.
578 * (The declaration of td_rtcgen refers to this comment.)
561 */
579 */
562 if (td->td_flags & TDF_TIMEOUT) {
580 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
581 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
582 if (rtc_changed) {
583 td->td_rtcgen = 0;
584 }
563 MPASS(TD_ON_SLEEPQ(td));
564 sq = sleepq_lookup(wchan);
565 if (sleepq_resume_thread(sq, td, 0)) {
566#ifdef INVARIANTS
567 /*
568 * This thread hasn't gone to sleep yet, so it
569 * should not be swapped out.
570 */

--- 310 unchanged lines hidden (view full) ---

881 }
882 MPASS(besttd != NULL);
883 thread_lock(besttd);
884 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
885 thread_unlock(besttd);
886 return (wakeup_swapper);
887}
888
585 MPASS(TD_ON_SLEEPQ(td));
586 sq = sleepq_lookup(wchan);
587 if (sleepq_resume_thread(sq, td, 0)) {
588#ifdef INVARIANTS
589 /*
590 * This thread hasn't gone to sleep yet, so it
591 * should not be swapped out.
592 */

--- 310 unchanged lines hidden (view full) ---

903 }
904 MPASS(besttd != NULL);
905 thread_lock(besttd);
906 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
907 thread_unlock(besttd);
908 return (wakeup_swapper);
909}
910
911static bool
912match_any(struct thread *td __unused)
913{
914
915 return (true);
916}
917
889/*
890 * Resume all threads sleeping on a specified wait channel.
891 */
892int
893sleepq_broadcast(void *wchan, int flags, int pri, int queue)
894{
895 struct sleepqueue *sq;
918/*
919 * Resume all threads sleeping on a specified wait channel.
920 */
921int
922sleepq_broadcast(void *wchan, int flags, int pri, int queue)
923{
924 struct sleepqueue *sq;
896 struct thread *td, *tdn;
897 int wakeup_swapper;
898
899 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
900 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
901 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
902 sq = sleepq_lookup(wchan);
903 if (sq == NULL)
904 return (0);
905 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
906 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
907
925
926 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
927 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
928 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
929 sq = sleepq_lookup(wchan);
930 if (sq == NULL)
931 return (0);
932 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
933 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
934
935 return (sleepq_remove_matching(sq, queue, match_any, pri));
936}
937
938/*
939 * Resume threads on the sleep queue that match the given predicate.
940 */
941int
942sleepq_remove_matching(struct sleepqueue *sq, int queue,
943 bool (*matches)(struct thread *), int pri)
944{
945 struct thread *td, *tdn;
946 int wakeup_swapper;
947
908 /*
948 /*
909 * Resume all blocked threads on the sleep queue. The last thread will
910 * be given ownership of sq and may re-enqueue itself before
911 * sleepq_resume_thread() returns, so we must cache the "next" queue
912 * item at the beginning of the final iteration.
949 * The last thread will be given ownership of sq and may
950 * re-enqueue itself before sleepq_resume_thread() returns,
951 * so we must cache the "next" queue item at the beginning
952 * of the final iteration.
913 */
914 wakeup_swapper = 0;
915 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
916 thread_lock(td);
953 */
954 wakeup_swapper = 0;
955 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
956 thread_lock(td);
917 wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
957 if (matches(td))
958 wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
918 thread_unlock(td);
919 }
959 thread_unlock(td);
960 }
961
920 return (wakeup_swapper);
921}
922
923/*
924 * Time sleeping threads out. When the timeout expires, the thread is
925 * removed from the sleep queue and made runnable if it is still asleep.
926 */
927static void

--- 119 unchanged lines hidden (view full) ---

1047 MPASS(wchan != NULL);
1048 sq = sleepq_lookup(wchan);
1049 MPASS(sq != NULL);
1050
1051 /* Thread is asleep on sleep queue sq, so wake it up. */
1052 return (sleepq_resume_thread(sq, td, 0));
1053}
1054
962 return (wakeup_swapper);
963}
964
965/*
966 * Time sleeping threads out. When the timeout expires, the thread is
967 * removed from the sleep queue and made runnable if it is still asleep.
968 */
969static void

--- 119 unchanged lines hidden (view full) ---

1089 MPASS(wchan != NULL);
1090 sq = sleepq_lookup(wchan);
1091 MPASS(sq != NULL);
1092
1093 /* Thread is asleep on sleep queue sq, so wake it up. */
1094 return (sleepq_resume_thread(sq, td, 0));
1095}
1096
1097void
1098sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1099{
1100 struct sleepqueue_chain *sc;
1101 struct sleepqueue *sq;
1102 int i, wakeup_swapper;
1103
1104 wakeup_swapper = 0;
1105 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1106 if (LIST_EMPTY(&sc->sc_queues)) {
1107 continue;
1108 }
1109 mtx_lock_spin(&sc->sc_lock);
1110 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) {
1111 for (i = 0; i < NR_SLEEPQS; ++i) {
1112 wakeup_swapper |= sleepq_remove_matching(sq, i,
1113 matches, 0);
1114 }
1115 }
1116 mtx_unlock_spin(&sc->sc_lock);
1117 }
1118 if (wakeup_swapper) {
1119 kick_proc0();
1120 }
1121}
1122
1055/*
1056 * Prints the stacks of all threads presently sleeping on wchan/queue to
1057 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1058 * printed. Typically, this will equal the number of threads sleeping on the
1059 * queue, but may be less if sb overflowed before all stacks were printed.
1060 */
1061#ifdef STACK
1062int

--- 320 unchanged lines hidden ---
1123/*
1124 * Prints the stacks of all threads presently sleeping on wchan/queue to
1125 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1126 * printed. Typically, this will equal the number of threads sleeping on the
1127 * queue, but may be less if sb overflowed before all stacks were printed.
1128 */
1129#ifdef STACK
1130int

--- 320 unchanged lines hidden ---