1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds#ifndef _LINUX_WAIT_H
31da177e4SLinus Torvalds#define _LINUX_WAIT_H
4fb869b6eSIngo Molnar/*
5fb869b6eSIngo Molnar * Linux wait queue related types and methods
6fb869b6eSIngo Molnar */
71da177e4SLinus Torvalds#include <linux/list.h>
81da177e4SLinus Torvalds#include <linux/stddef.h>
91da177e4SLinus Torvalds#include <linux/spinlock.h>
105b825c3aSIngo Molnar
111da177e4SLinus Torvalds#include <asm/current.h>
12607ca46eSDavid Howells#include <uapi/linux/wait.h>
131da177e4SLinus Torvalds
14ac6424b9SIngo Molnartypedef struct wait_queue_entry wait_queue_entry_t;
1550816c48SIngo Molnar
1650816c48SIngo Molnartypedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
1750816c48SIngo Molnarint default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
181da177e4SLinus Torvalds
19ac6424b9SIngo Molnar/* wait_queue_entry::flags */
2061ada528SPeter Zijlstra#define WQ_FLAG_EXCLUSIVE	0x01
2161ada528SPeter Zijlstra#define WQ_FLAG_WOKEN		0x02
222554db91STim Chen#define WQ_FLAG_BOOKMARK	0x04
237f26482aSPeter Zijlstra#define WQ_FLAG_CUSTOM		0x08
245ef64cc8SLinus Torvalds#define WQ_FLAG_DONE		0x10
25c4d51a52SDavid Woodhouse#define WQ_FLAG_PRIORITY	0x20
2661ada528SPeter Zijlstra
27ac6424b9SIngo Molnar/*
28ac6424b9SIngo Molnar * A single wait-queue entry structure:
29ac6424b9SIngo Molnar */
30ac6424b9SIngo Molnarstruct wait_queue_entry {
31fb869b6eSIngo Molnar	unsigned int		flags;
32fb869b6eSIngo Molnar	void			*private;
33fb869b6eSIngo Molnar	wait_queue_func_t	func;
342055da97SIngo Molnar	struct list_head	entry;
351da177e4SLinus Torvalds};
361da177e4SLinus Torvalds
379d9d676fSIngo Molnarstruct wait_queue_head {
38fb869b6eSIngo Molnar	spinlock_t		lock;
392055da97SIngo Molnar	struct list_head	head;
401da177e4SLinus Torvalds};
419d9d676fSIngo Molnartypedef struct wait_queue_head wait_queue_head_t;
421da177e4SLinus Torvalds
438c65b4a6STim Schmielaustruct task_struct;
441da177e4SLinus Torvalds
451da177e4SLinus Torvalds/*
461da177e4SLinus Torvalds * Macros for declaration and initialisaton of the datatypes
471da177e4SLinus Torvalds */
481da177e4SLinus Torvalds
494b1c480bSIngo Molnar#define __WAITQUEUE_INITIALIZER(name, tsk) {					\
504b1c480bSIngo Molnar	.private	= tsk,							\
514b1c480bSIngo Molnar	.func		= default_wake_function,				\
522055da97SIngo Molnar	.entry		= { NULL, NULL } }
531da177e4SLinus Torvalds
544b1c480bSIngo Molnar#define DECLARE_WAITQUEUE(name, tsk)						\
5550816c48SIngo Molnar	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
561da177e4SLinus Torvalds
574b1c480bSIngo Molnar#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {					\
584b1c480bSIngo Molnar	.lock		= __SPIN_LOCK_UNLOCKED(name.lock),			\
5977eccd0dSJulian Wiedmann	.head		= LIST_HEAD_INIT(name.head) }
601da177e4SLinus Torvalds
611da177e4SLinus Torvalds#define DECLARE_WAIT_QUEUE_HEAD(name) \
629d9d676fSIngo Molnar	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
631da177e4SLinus Torvalds
649d9d676fSIngo Molnarextern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
652fc39111SPeter Zijlstra
664b1c480bSIngo Molnar#define init_waitqueue_head(wq_head)						\
674b1c480bSIngo Molnar	do {									\
684b1c480bSIngo Molnar		static struct lock_class_key __key;				\
694b1c480bSIngo Molnar										\
704b1c480bSIngo Molnar		__init_waitqueue_head((wq_head), #wq_head, &__key);		\
712fc39111SPeter Zijlstra	} while (0)
721da177e4SLinus Torvalds
737259f0d0SPeter Zijlstra#ifdef CONFIG_LOCKDEP
747259f0d0SPeter Zijlstra# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
757259f0d0SPeter Zijlstra	({ init_waitqueue_head(&name); name; })
767259f0d0SPeter Zijlstra# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
779d9d676fSIngo Molnar	struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
787259f0d0SPeter Zijlstra#else
797259f0d0SPeter Zijlstra# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
807259f0d0SPeter Zijlstra#endif
817259f0d0SPeter Zijlstra
8250816c48SIngo Molnarstatic inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
831da177e4SLinus Torvalds{
8450816c48SIngo Molnar	wq_entry->flags		= 0;
8550816c48SIngo Molnar	wq_entry->private	= p;
8650816c48SIngo Molnar	wq_entry->func		= default_wake_function;
871da177e4SLinus Torvalds}
881da177e4SLinus Torvalds
89fb869b6eSIngo Molnarstatic inline void
9050816c48SIngo Molnarinit_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
911da177e4SLinus Torvalds{
9250816c48SIngo Molnar	wq_entry->flags		= 0;
9350816c48SIngo Molnar	wq_entry->private	= NULL;
9450816c48SIngo Molnar	wq_entry->func		= func;
951da177e4SLinus Torvalds}
961da177e4SLinus Torvalds
9769e51e92SPeter Zijlstra/**
9869e51e92SPeter Zijlstra * waitqueue_active -- locklessly test for waiters on the queue
999d9d676fSIngo Molnar * @wq_head: the waitqueue to test for waiters
10069e51e92SPeter Zijlstra *
10169e51e92SPeter Zijlstra * returns true if the wait list is not empty
10269e51e92SPeter Zijlstra *
10369e51e92SPeter Zijlstra * NOTE: this function is lockless and requires care, incorrect usage _will_
10469e51e92SPeter Zijlstra * lead to sporadic and non-obvious failure.
10569e51e92SPeter Zijlstra *
1069d9d676fSIngo Molnar * Use either while holding wait_queue_head::lock or when used for wakeups
1078c1007fdSTobin C. Harding * with an extra smp_mb() like::
10869e51e92SPeter Zijlstra *
10969e51e92SPeter Zijlstra *      CPU0 - waker                    CPU1 - waiter
11069e51e92SPeter Zijlstra *
11169e51e92SPeter Zijlstra *                                      for (;;) {
1124b1c480bSIngo Molnar *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
11369e51e92SPeter Zijlstra *      smp_mb();                         // smp_mb() from set_current_state()
1144b1c480bSIngo Molnar *      if (waitqueue_active(wq_head))         if (@cond)
1154b1c480bSIngo Molnar *        wake_up(wq_head);                      break;
11669e51e92SPeter Zijlstra *                                        schedule();
11769e51e92SPeter Zijlstra *                                      }
1184b1c480bSIngo Molnar *                                      finish_wait(&wq_head, &wait);
11969e51e92SPeter Zijlstra *
12069e51e92SPeter Zijlstra * Because without the explicit smp_mb() it's possible for the
12169e51e92SPeter Zijlstra * waitqueue_active() load to get hoisted over the @cond store such that we'll
12269e51e92SPeter Zijlstra * observe an empty wait list while the waiter might not observe @cond.
12369e51e92SPeter Zijlstra *
12469e51e92SPeter Zijlstra * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
12569e51e92SPeter Zijlstra * which (when the lock is uncontended) are of roughly equal cost.
12669e51e92SPeter Zijlstra */
1279d9d676fSIngo Molnarstatic inline int waitqueue_active(struct wait_queue_head *wq_head)
1281da177e4SLinus Torvalds{
1292055da97SIngo Molnar	return !list_empty(&wq_head->head);
1301da177e4SLinus Torvalds}
1311da177e4SLinus Torvalds
132a6d81d30SJosef Bacik/**
133a6d81d30SJosef Bacik * wq_has_single_sleeper - check if there is only one sleeper
134a6d81d30SJosef Bacik * @wq_head: wait queue head
135a6d81d30SJosef Bacik *
136a6d81d30SJosef Bacik * Returns true of wq_head has only one sleeper on the list.
137a6d81d30SJosef Bacik *
138a6d81d30SJosef Bacik * Please refer to the comment for waitqueue_active.
139a6d81d30SJosef Bacik */
140a6d81d30SJosef Bacikstatic inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141a6d81d30SJosef Bacik{
142a6d81d30SJosef Bacik	return list_is_singular(&wq_head->head);
143a6d81d30SJosef Bacik}
144a6d81d30SJosef Bacik
1451ce0bf50SHerbert Xu/**
1461ce0bf50SHerbert Xu * wq_has_sleeper - check if there are any waiting processes
1474b1c480bSIngo Molnar * @wq_head: wait queue head
1481ce0bf50SHerbert Xu *
1494b1c480bSIngo Molnar * Returns true if wq_head has waiting processes
1501ce0bf50SHerbert Xu *
1511ce0bf50SHerbert Xu * Please refer to the comment for waitqueue_active.
1521ce0bf50SHerbert Xu */
1539d9d676fSIngo Molnarstatic inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
1541ce0bf50SHerbert Xu{
1551ce0bf50SHerbert Xu	/*
1561ce0bf50SHerbert Xu	 * We need to be sure we are in sync with the
1571ce0bf50SHerbert Xu	 * add_wait_queue modifications to the wait queue.
1581ce0bf50SHerbert Xu	 *
1591ce0bf50SHerbert Xu	 * This memory barrier should be paired with one on the
1601ce0bf50SHerbert Xu	 * waiting side.
1611ce0bf50SHerbert Xu	 */
1621ce0bf50SHerbert Xu	smp_mb();
1639d9d676fSIngo Molnar	return waitqueue_active(wq_head);
1641ce0bf50SHerbert Xu}
1651ce0bf50SHerbert Xu
1669d9d676fSIngo Molnarextern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1679d9d676fSIngo Molnarextern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168c4d51a52SDavid Woodhouseextern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1699d9d676fSIngo Molnarextern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1701da177e4SLinus Torvalds
1719d9d676fSIngo Molnarstatic inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1721da177e4SLinus Torvalds{
173c4d51a52SDavid Woodhouse	struct list_head *head = &wq_head->head;
174c4d51a52SDavid Woodhouse	struct wait_queue_entry *wq;
175c4d51a52SDavid Woodhouse
176c4d51a52SDavid Woodhouse	list_for_each_entry(wq, &wq_head->head, entry) {
177c4d51a52SDavid Woodhouse		if (!(wq->flags & WQ_FLAG_PRIORITY))
178c4d51a52SDavid Woodhouse			break;
179c4d51a52SDavid Woodhouse		head = &wq->entry;
180c4d51a52SDavid Woodhouse	}
181c4d51a52SDavid Woodhouse	list_add(&wq_entry->entry, head);
1821da177e4SLinus Torvalds}
1831da177e4SLinus Torvalds
1841da177e4SLinus Torvalds/*
1851da177e4SLinus Torvalds * Used for wake-one threads:
1861da177e4SLinus Torvalds */
187fb869b6eSIngo Molnarstatic inline void
1889d9d676fSIngo Molnar__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189a93d2f17SChangli Gao{
19050816c48SIngo Molnar	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
1919d9d676fSIngo Molnar	__add_wait_queue(wq_head, wq_entry);
192a93d2f17SChangli Gao}
193a93d2f17SChangli Gao
1949d9d676fSIngo Molnarstatic inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1951da177e4SLinus Torvalds{
1962055da97SIngo Molnar	list_add_tail(&wq_entry->entry, &wq_head->head);
1971da177e4SLinus Torvalds}
1981da177e4SLinus Torvalds
199fb869b6eSIngo Molnarstatic inline void
2009d9d676fSIngo Molnar__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201a93d2f17SChangli Gao{
20250816c48SIngo Molnar	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
2039d9d676fSIngo Molnar	__add_wait_queue_entry_tail(wq_head, wq_entry);
204a93d2f17SChangli Gao}
205a93d2f17SChangli Gao
206fb869b6eSIngo Molnarstatic inline void
2079d9d676fSIngo Molnar__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
2081da177e4SLinus Torvalds{
2092055da97SIngo Molnar	list_del(&wq_entry->entry);
2101da177e4SLinus Torvalds}
2111da177e4SLinus Torvalds
2129d9d676fSIngo Molnarvoid __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
2139d9d676fSIngo Molnarvoid __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
21411a19c7bSTim Chenvoid __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
21511a19c7bSTim Chen		unsigned int mode, void *key, wait_queue_entry_t *bookmark);
216ce4dd442SDavid Howellsvoid __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217f94df989SDavid Howellsvoid __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
2189d9d676fSIngo Molnarvoid __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
219ce4dd442SDavid Howellsvoid __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
22042288cb4SEric Biggersvoid __wake_up_pollfree(struct wait_queue_head *wq_head);
2211da177e4SLinus Torvalds
222e64d66c8SMatthew Wilcox#define wake_up(x)			__wake_up(x, TASK_NORMAL, 1, NULL)
223e64d66c8SMatthew Wilcox#define wake_up_nr(x, nr)		__wake_up(x, TASK_NORMAL, nr, NULL)
224e64d66c8SMatthew Wilcox#define wake_up_all(x)			__wake_up(x, TASK_NORMAL, 0, NULL)
22563b20011SThomas Gleixner#define wake_up_locked(x)		__wake_up_locked((x), TASK_NORMAL, 1)
22663b20011SThomas Gleixner#define wake_up_all_locked(x)		__wake_up_locked((x), TASK_NORMAL, 0)
227e64d66c8SMatthew Wilcox
2281da177e4SLinus Torvalds#define wake_up_interruptible(x)	__wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
2291da177e4SLinus Torvalds#define wake_up_interruptible_nr(x, nr)	__wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
2301da177e4SLinus Torvalds#define wake_up_interruptible_all(x)	__wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
231ce4dd442SDavid Howells#define wake_up_interruptible_sync(x)	__wake_up_sync((x), TASK_INTERRUPTIBLE)
2321da177e4SLinus Torvalds
2330ccf831cSPeter Zijlstra/*
234c0da3775SDavide Libenzi * Wakeup macros to be used to report events to the targets.
2350ccf831cSPeter Zijlstra */
2363ad6f93eSAl Viro#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
2373ad6f93eSAl Viro#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
2384b1c480bSIngo Molnar#define wake_up_poll(x, m)							\
2393ad6f93eSAl Viro	__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
2404b1c480bSIngo Molnar#define wake_up_locked_poll(x, m)						\
2413ad6f93eSAl Viro	__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
2424b1c480bSIngo Molnar#define wake_up_interruptible_poll(x, m)					\
2433ad6f93eSAl Viro	__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
2444b1c480bSIngo Molnar#define wake_up_interruptible_sync_poll(x, m)					\
245ce4dd442SDavid Howells	__wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
246f94df989SDavid Howells#define wake_up_interruptible_sync_poll_locked(x, m)				\
247f94df989SDavid Howells	__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
2480ccf831cSPeter Zijlstra
24942288cb4SEric Biggers/**
25042288cb4SEric Biggers * wake_up_pollfree - signal that a polled waitqueue is going away
25142288cb4SEric Biggers * @wq_head: the wait queue head
25242288cb4SEric Biggers *
25342288cb4SEric Biggers * In the very rare cases where a ->poll() implementation uses a waitqueue whose
25442288cb4SEric Biggers * lifetime is tied to a task rather than to the 'struct file' being polled,
25542288cb4SEric Biggers * this function must be called before the waitqueue is freed so that
25642288cb4SEric Biggers * non-blocking polls (e.g. epoll) are notified that the queue is going away.
25742288cb4SEric Biggers *
25842288cb4SEric Biggers * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
25942288cb4SEric Biggers * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
26042288cb4SEric Biggers */
26142288cb4SEric Biggersstatic inline void wake_up_pollfree(struct wait_queue_head *wq_head)
26242288cb4SEric Biggers{
26342288cb4SEric Biggers	/*
26442288cb4SEric Biggers	 * For performance reasons, we don't always take the queue lock here.
26542288cb4SEric Biggers	 * Therefore, we might race with someone removing the last entry from
26642288cb4SEric Biggers	 * the queue, and proceed while they still hold the queue lock.
26742288cb4SEric Biggers	 * However, rcu_read_lock() is required to be held in such cases, so we
26842288cb4SEric Biggers	 * can safely proceed with an RCU-delayed free.
26942288cb4SEric Biggers	 */
27042288cb4SEric Biggers	if (waitqueue_active(wq_head))
27142288cb4SEric Biggers		__wake_up_pollfree(wq_head);
27242288cb4SEric Biggers}
27342288cb4SEric Biggers
2744b1c480bSIngo Molnar#define ___wait_cond_timeout(condition)						\
2754b1c480bSIngo Molnar({										\
2764b1c480bSIngo Molnar	bool __cond = (condition);						\
2774b1c480bSIngo Molnar	if (__cond && !__ret)							\
2784b1c480bSIngo Molnar		__ret = 1;							\
2794b1c480bSIngo Molnar	__cond || !__ret;							\
2802953ef24SPeter Zijlstra})
2812953ef24SPeter Zijlstra
2824b1c480bSIngo Molnar#define ___wait_is_interruptible(state)						\
2834b1c480bSIngo Molnar	(!__builtin_constant_p(state) ||					\
2844b1c480bSIngo Molnar		state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)		\
28541a1431bSPeter Zijlstra
28650816c48SIngo Molnarextern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
2870176beafSOleg Nesterov
2888b32201dSPeter Zijlstra/*
2898b32201dSPeter Zijlstra * The below macro ___wait_event() has an explicit shadow of the __ret
2908b32201dSPeter Zijlstra * variable when used from the wait_event_*() macros.
2918b32201dSPeter Zijlstra *
2928b32201dSPeter Zijlstra * This is so that both can use the ___wait_cond_timeout() construct
2938b32201dSPeter Zijlstra * to wrap the condition.
2948b32201dSPeter Zijlstra *
2958b32201dSPeter Zijlstra * The type inconsistency of the wait_event_*() __ret variable is also
2968b32201dSPeter Zijlstra * on purpose; we use long where we can return timeout values and int
2978b32201dSPeter Zijlstra * otherwise.
2988b32201dSPeter Zijlstra */
2998b32201dSPeter Zijlstra
3004b1c480bSIngo Molnar#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)		\
3014b1c480bSIngo Molnar({										\
3024b1c480bSIngo Molnar	__label__ __out;							\
3034b1c480bSIngo Molnar	struct wait_queue_entry __wq_entry;					\
3044b1c480bSIngo Molnar	long __ret = ret;	/* explicit shadow */				\
3054b1c480bSIngo Molnar										\
3064b1c480bSIngo Molnar	init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);	\
3074b1c480bSIngo Molnar	for (;;) {								\
3084b1c480bSIngo Molnar		long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
3094b1c480bSIngo Molnar										\
3104b1c480bSIngo Molnar		if (condition)							\
3114b1c480bSIngo Molnar			break;							\
3124b1c480bSIngo Molnar										\
3134b1c480bSIngo Molnar		if (___wait_is_interruptible(state) && __int) {			\
3144b1c480bSIngo Molnar			__ret = __int;						\
3154b1c480bSIngo Molnar			goto __out;						\
3164b1c480bSIngo Molnar		}								\
3174b1c480bSIngo Molnar										\
3184b1c480bSIngo Molnar		cmd;								\
3194b1c480bSIngo Molnar	}									\
3204b1c480bSIngo Molnar	finish_wait(&wq_head, &__wq_entry);					\
3214b1c480bSIngo Molnar__out:	__ret;									\
32235a2af94SPeter Zijlstra})
32341a1431bSPeter Zijlstra
3244b1c480bSIngo Molnar#define __wait_event(wq_head, condition)					\
3254b1c480bSIngo Molnar	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
32635a2af94SPeter Zijlstra			    schedule())
3271da177e4SLinus Torvalds
3281da177e4SLinus Torvalds/**
3291da177e4SLinus Torvalds * wait_event - sleep until a condition gets true
3304b1c480bSIngo Molnar * @wq_head: the waitqueue to wait on
3311da177e4SLinus Torvalds * @condition: a C expression for the event to wait for
3321da177e4SLinus Torvalds *
3331da177e4SLinus Torvalds * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
3341da177e4SLinus Torvalds * @condition evaluates to true. The @condition is checked each time
3354b1c480bSIngo Molnar * the waitqueue @wq_head is woken up.
3361da177e4SLinus Torvalds *
3371da177e4SLinus Torvalds * wake_up() has to be called after changing any variable that could
3381da177e4SLinus Torvalds * change the result of the wait condition.
3391da177e4SLinus Torvalds */
3404b1c480bSIngo Molnar#define wait_event(wq_head, condition)						\
3414b1c480bSIngo Molnardo {										\
3424b1c480bSIngo Molnar	might_sleep();								\
3434b1c480bSIngo Molnar	if (condition)								\
3444b1c480bSIngo Molnar		break;								\
3454b1c480bSIngo Molnar	__wait_event(wq_head, condition);					\
3461da177e4SLinus Torvalds} while (0)
3471da177e4SLinus Torvalds
3484b1c480bSIngo Molnar#define __io_wait_event(wq_head, condition)					\
3494b1c480bSIngo Molnar	(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,	\
3502c561246SPeter Zijlstra			    io_schedule())
3512c561246SPeter Zijlstra
3522c561246SPeter Zijlstra/*
3532c561246SPeter Zijlstra * io_wait_event() -- like wait_event() but with io_schedule()
3542c561246SPeter Zijlstra */
3554b1c480bSIngo Molnar#define io_wait_event(wq_head, condition)					\
3564b1c480bSIngo Molnardo {										\
3574b1c480bSIngo Molnar	might_sleep();								\
3584b1c480bSIngo Molnar	if (condition)								\
3594b1c480bSIngo Molnar		break;								\
3604b1c480bSIngo Molnar	__io_wait_event(wq_head, condition);					\
3612c561246SPeter Zijlstra} while (0)
3622c561246SPeter Zijlstra
3634b1c480bSIngo Molnar#define __wait_event_freezable(wq_head, condition)				\
3644b1c480bSIngo Molnar	___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,		\
3652b9c2a48SHugo Lefeuvre			    freezable_schedule())
36636df04bcSPeter Zijlstra
36736df04bcSPeter Zijlstra/**
368f4bcfa1dSStafford Horne * wait_event_freezable - sleep (or freeze) until a condition gets true
3694b1c480bSIngo Molnar * @wq_head: the waitqueue to wait on
37036df04bcSPeter Zijlstra * @condition: a C expression for the event to wait for
37136df04bcSPeter Zijlstra *
37236df04bcSPeter Zijlstra * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
37336df04bcSPeter Zijlstra * to system load) until the @condition evaluates to true. The
3744b1c480bSIngo Molnar * @condition is checked each time the waitqueue @wq_head is woken up.
37536df04bcSPeter Zijlstra *
37636df04bcSPeter Zijlstra * wake_up() has to be called after changing any variable that could
37736df04bcSPeter Zijlstra * change the result of the wait condition.
37836df04bcSPeter Zijlstra */
3794b1c480bSIngo Molnar#define wait_event_freezable(wq_head, condition)				\
3804b1c480bSIngo Molnar({										\
3814b1c480bSIngo Molnar	int __ret = 0;								\
3824b1c480bSIngo Molnar	might_sleep();								\
3834b1c480bSIngo Molnar	if (!(condition))							\
3844b1c480bSIngo Molnar		__ret = __wait_event_freezable(wq_head, condition);		\
3854b1c480bSIngo Molnar	__ret;									\
38636df04bcSPeter Zijlstra})
38736df04bcSPeter Zijlstra
3884b1c480bSIngo Molnar#define __wait_event_timeout(wq_head, condition, timeout)			\
3894b1c480bSIngo Molnar	___wait_event(