wait.h revision 335425
1219820Sjeff/*-
2219820Sjeff * Copyright (c) 2010 Isilon Systems, Inc.
3219820Sjeff * Copyright (c) 2010 iX Systems, Inc.
4219820Sjeff * Copyright (c) 2010 Panasas, Inc.
5270710Shselasky * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6328653Shselasky * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7219820Sjeff * All rights reserved.
8219820Sjeff *
9219820Sjeff * Redistribution and use in source and binary forms, with or without
10219820Sjeff * modification, are permitted provided that the following conditions
11219820Sjeff * are met:
12219820Sjeff * 1. Redistributions of source code must retain the above copyright
13219820Sjeff *    notice unmodified, this list of conditions, and the following
14219820Sjeff *    disclaimer.
15219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
16219820Sjeff *    notice, this list of conditions and the following disclaimer in the
17219820Sjeff *    documentation and/or other materials provided with the distribution.
18219820Sjeff *
19219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20219820Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21219820Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22219820Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23219820Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24219820Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25219820Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26219820Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27219820Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28219820Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29289644Shselasky *
30289644Shselasky * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/wait.h 335425 2018-06-20 06:49:26Z hselasky $
31219820Sjeff */
32328653Shselasky
33328653Shselasky#ifndef _LINUX_WAIT_H_
34219820Sjeff#define	_LINUX_WAIT_H_
35219820Sjeff
36300671Shselasky#include <linux/compiler.h>
37328653Shselasky#include <linux/list.h>
38219820Sjeff#include <linux/spinlock.h>
39219820Sjeff
40328653Shselasky#include <asm/atomic.h>
41328653Shselasky
42219820Sjeff#include <sys/param.h>
43219820Sjeff#include <sys/systm.h>
44219820Sjeff
45328653Shselasky#define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
46289564Shselasky
47328653Shselasky#define	might_sleep()							\
48328653Shselasky	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
49219820Sjeff
50335425Shselasky#define	might_sleep_if(cond) do { \
51335425Shselasky	if (cond) { might_sleep(); } \
52335425Shselasky} while (0)
53335425Shselasky
54328653Shselaskystruct wait_queue;
55328653Shselaskystruct wait_queue_head;
56219820Sjeff
57328653Shselaskytypedef struct wait_queue wait_queue_t;
58328653Shselaskytypedef struct wait_queue_head wait_queue_head_t;
59219820Sjeff
60328653Shselaskytypedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
61219820Sjeff
62328653Shselasky/*
63328653Shselasky * Many API consumers directly reference these fields and those of
64328653Shselasky * wait_queue_head.
65328653Shselasky */
66328653Shselaskystruct wait_queue {
67328653Shselasky	unsigned int flags;	/* always 0 */
68328653Shselasky	void *private;
69328653Shselasky	wait_queue_func_t *func;
70335424Shselasky	union {
71335424Shselasky		struct list_head task_list; /* < v4.13 */
72335424Shselasky		struct list_head entry; /* >= v4.13 */
73335424Shselasky	};
74328653Shselasky};
75219820Sjeff
76328653Shselaskystruct wait_queue_head {
77328653Shselasky	spinlock_t lock;
78335424Shselasky	union {
79335424Shselasky		struct list_head task_list; /* < v4.13 */
80335424Shselasky		struct list_head head; /* >= v4.13 */
81335424Shselasky	};
82328653Shselasky};
83328653Shselasky
84328653Shselasky/*
85328653Shselasky * This function is referenced by at least one DRM driver, so it may not be
86328653Shselasky * renamed and furthermore must be the default wait queue callback.
87328653Shselasky */
88328653Shselaskyextern wait_queue_func_t autoremove_wake_function;
89330861Shselaskyextern wait_queue_func_t default_wake_function;
90328653Shselasky
91330861Shselasky#define	DEFINE_WAIT_FUNC(name, function)				\
92328653Shselasky	wait_queue_t name = {						\
93328653Shselasky		.private = current,					\
94330861Shselasky		.func = function,					\
95328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
96328653Shselasky	}
97328653Shselasky
98330861Shselasky#define	DEFINE_WAIT(name) \
99330861Shselasky	DEFINE_WAIT_FUNC(name, autoremove_wake_function)
100330861Shselasky
101328653Shselasky#define	DECLARE_WAITQUEUE(name, task)					\
102328653Shselasky	wait_queue_t name = {						\
103328653Shselasky		.private = task,					\
104328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
105328653Shselasky	}
106328653Shselasky
107328653Shselasky#define	DECLARE_WAIT_QUEUE_HEAD(name)					\
108328653Shselasky	wait_queue_head_t name = {					\
109328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
110328653Shselasky	};								\
111328653Shselasky	MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
112328653Shselasky
113328653Shselasky#define	init_waitqueue_head(wqh) do {					\
114328653Shselasky	mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"),		\
115328653Shselasky	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
116328653Shselasky	INIT_LIST_HEAD(&(wqh)->task_list);				\
117219820Sjeff} while (0)
118219820Sjeff
119335423Shselaskyvoid linux_init_wait_entry(wait_queue_t *, int);
120328653Shselaskyvoid linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
121328653Shselasky
122335423Shselasky#define	init_wait_entry(wq, flags)					\
123335423Shselasky        linux_init_wait_entry(wq, flags)
124328653Shselasky#define	wake_up(wqh)							\
125328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, false)
126328653Shselasky#define	wake_up_all(wqh)						\
127328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, false)
128328653Shselasky#define	wake_up_locked(wqh)						\
129328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, true)
130328653Shselasky#define	wake_up_all_locked(wqh)						\
131328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, true)
132328653Shselasky#define	wake_up_interruptible(wqh)					\
133328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
134328653Shselasky#define	wake_up_interruptible_all(wqh)					\
135328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
136328653Shselasky
137328653Shselaskyint linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
138328653Shselasky    unsigned int, spinlock_t *);
139328653Shselasky
140328653Shselasky/*
141328653Shselasky * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
142328653Shselasky * cond is true after timeout, remaining jiffies (> 0) if cond is true before
143328653Shselasky * timeout.
144328653Shselasky */
145328653Shselasky#define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
146328653Shselasky	DEFINE_WAIT(__wq);					\
147328653Shselasky	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
148328653Shselasky	int __start = ticks;					\
149328653Shselasky	int __ret = 0;						\
150328653Shselasky								\
151328653Shselasky	for (;;) {						\
152328653Shselasky		linux_prepare_to_wait(&(wqh), &__wq, state);	\
153328653Shselasky		if (cond)					\
154328653Shselasky			break;					\
155328653Shselasky		__ret = linux_wait_event_common(&(wqh), &__wq,	\
156328653Shselasky		    __timeout, state, lock);			\
157328653Shselasky		if (__ret != 0)					\
158328653Shselasky			break;					\
159328653Shselasky	}							\
160328653Shselasky	linux_finish_wait(&(wqh), &__wq);			\
161328653Shselasky	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
162328653Shselasky		if (__ret == -EWOULDBLOCK)			\
163328653Shselasky			__ret = !!(cond);			\
164328653Shselasky		else if (__ret != -ERESTARTSYS) {		\
165328653Shselasky			__ret = __timeout + __start - ticks;	\
166328653Shselasky			/* range check return value */		\
167328653Shselasky			if (__ret < 1)				\
168328653Shselasky				__ret = 1;			\
169328653Shselasky			else if (__ret > __timeout)		\
170328653Shselasky				__ret = __timeout;		\
171328653Shselasky		}						\
172328653Shselasky	}							\
173328653Shselasky	__ret;							\
174219820Sjeff})
175219820Sjeff
176328653Shselasky#define	wait_event(wqh, cond) do {					\
177328653Shselasky	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
178328653Shselasky	    TASK_UNINTERRUPTIBLE, NULL);				\
179328653Shselasky} while (0)
180328653Shselasky
181328653Shselasky#define	wait_event_timeout(wqh, cond, timeout) ({			\
182328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
183328653Shselasky	    NULL);							\
184328653Shselasky})
185328653Shselasky
186334760Shselasky#define	wait_event_killable(wqh, cond) ({				\
187334760Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
188334760Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
189334760Shselasky})
190334760Shselasky
191328653Shselasky#define	wait_event_interruptible(wqh, cond) ({				\
192328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
193328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
194328653Shselasky})
195328653Shselasky
196328653Shselasky#define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
197328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
198328653Shselasky	    NULL);							\
199328653Shselasky})
200328653Shselasky
201328653Shselasky/*
202328653Shselasky * Wait queue is already locked.
203328653Shselasky */
204328653Shselasky#define	wait_event_interruptible_locked(wqh, cond) ({			\
205328653Shselasky	int __ret;							\
206297459Snp									\
207328653Shselasky	spin_unlock(&(wqh).lock);					\
208328653Shselasky	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
209328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
210328653Shselasky	spin_lock(&(wqh).lock);						\
211297459Snp	__ret;								\
212297459Snp})
213297459Snp
214328653Shselasky/*
215330862Shselasky * The passed spinlock is held when testing the condition.
216328653Shselasky */
217328653Shselasky#define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
218328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
219328653Shselasky	    TASK_INTERRUPTIBLE, &(lock));				\
220328653Shselasky})
221297459Snp
222330862Shselasky/*
223330862Shselasky * The passed spinlock is held when testing the condition.
224330862Shselasky */
225330862Shselasky#define	wait_event_lock_irq(wqh, cond, lock) ({			\
226330862Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
227330862Shselasky	    TASK_UNINTERRUPTIBLE, &(lock));			\
228330862Shselasky})
229330862Shselasky
230328653Shselaskystatic inline void
231328653Shselasky__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
232289564Shselasky{
233328653Shselasky	list_add(&wq->task_list, &wqh->task_list);
234289564Shselasky}
235219820Sjeff
236328653Shselaskystatic inline void
237328653Shselaskyadd_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
238328653Shselasky{
239289564Shselasky
240328653Shselasky	spin_lock(&wqh->lock);
241328653Shselasky	__add_wait_queue(wqh, wq);
242328653Shselasky	spin_unlock(&wqh->lock);
243328653Shselasky}
244328653Shselasky
245289564Shselaskystatic inline void
246328653Shselasky__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
247289564Shselasky{
248328653Shselasky	list_add_tail(&wq->task_list, &wqh->task_list);
249289564Shselasky}
250289564Shselasky
251289564Shselaskystatic inline void
252328653Shselasky__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
253289564Shselasky{
254328653Shselasky	list_del(&wq->task_list);
255289564Shselasky}
256289564Shselasky
257328653Shselaskystatic inline void
258328653Shselaskyremove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
259328653Shselasky{
260328653Shselasky
261328653Shselasky	spin_lock(&wqh->lock);
262328653Shselasky	__remove_wait_queue(wqh, wq);
263328653Shselasky	spin_unlock(&wqh->lock);
264328653Shselasky}
265328653Shselasky
266328653Shselaskybool linux_waitqueue_active(wait_queue_head_t *);
267328653Shselasky
268328653Shselasky#define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
269328653Shselasky
270328653Shselaskyvoid linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
271328653Shselaskyvoid linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
272328653Shselasky
273328653Shselasky#define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
274328653Shselasky#define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
275328653Shselasky
276328653Shselaskyvoid linux_wake_up_bit(void *, int);
277328653Shselaskyint linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
278328653Shselaskyvoid linux_wake_up_atomic_t(atomic_t *);
279328653Shselaskyint linux_wait_on_atomic_t(atomic_t *, unsigned int);
280328653Shselasky
281328653Shselasky#define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
282330847Shselasky#define	wait_on_bit(word, bit, state)					\
283330847Shselasky	linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
284328653Shselasky#define	wait_on_bit_timeout(word, bit, state, timeout)			\
285328653Shselasky	linux_wait_on_bit_timeout(word, bit, state, timeout)
286328653Shselasky#define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
287328653Shselasky/*
288328653Shselasky * All existing callers have a cb that just schedule()s. To avoid adding
289328653Shselasky * complexity, just emulate that internally. The prototype is different so that
290328653Shselasky * callers must be manually modified; a cb that does something other than call
291328653Shselasky * schedule() will require special treatment.
292328653Shselasky */
293328653Shselasky#define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
294328653Shselasky
295328653Shselaskystruct task_struct;
296328653Shselaskybool linux_wake_up_state(struct task_struct *, unsigned int);
297328653Shselasky
298328653Shselasky#define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
299328653Shselasky#define	wake_up_state(task, state)	linux_wake_up_state(task, state)
300328653Shselasky
301328653Shselasky#endif /* _LINUX_WAIT_H_ */
302