wait.h revision 330861
1219820Sjeff/*-
2219820Sjeff * Copyright (c) 2010 Isilon Systems, Inc.
3219820Sjeff * Copyright (c) 2010 iX Systems, Inc.
4219820Sjeff * Copyright (c) 2010 Panasas, Inc.
5270710Shselasky * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6328653Shselasky * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7219820Sjeff * All rights reserved.
8219820Sjeff *
9219820Sjeff * Redistribution and use in source and binary forms, with or without
10219820Sjeff * modification, are permitted provided that the following conditions
11219820Sjeff * are met:
12219820Sjeff * 1. Redistributions of source code must retain the above copyright
13219820Sjeff *    notice unmodified, this list of conditions, and the following
14219820Sjeff *    disclaimer.
15219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
16219820Sjeff *    notice, this list of conditions and the following disclaimer in the
17219820Sjeff *    documentation and/or other materials provided with the distribution.
18219820Sjeff *
19219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20219820Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21219820Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22219820Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23219820Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24219820Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25219820Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26219820Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27219820Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28219820Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29289644Shselasky *
30289644Shselasky * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/wait.h 330861 2018-03-13 16:30:01Z hselasky $
31219820Sjeff */
32328653Shselasky
33328653Shselasky#ifndef _LINUX_WAIT_H_
34219820Sjeff#define	_LINUX_WAIT_H_
35219820Sjeff
36300671Shselasky#include <linux/compiler.h>
37328653Shselasky#include <linux/list.h>
38219820Sjeff#include <linux/spinlock.h>
39219820Sjeff
40328653Shselasky#include <asm/atomic.h>
41328653Shselasky
42219820Sjeff#include <sys/param.h>
43219820Sjeff#include <sys/systm.h>
44219820Sjeff
45328653Shselasky#define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
46289564Shselasky
47328653Shselasky#define	might_sleep()							\
48328653Shselasky	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
49219820Sjeff
50328653Shselaskystruct wait_queue;
51328653Shselaskystruct wait_queue_head;
52219820Sjeff
53328653Shselaskytypedef struct wait_queue wait_queue_t;
54328653Shselaskytypedef struct wait_queue_head wait_queue_head_t;
55219820Sjeff
56328653Shselaskytypedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
57219820Sjeff
58328653Shselasky/*
59328653Shselasky * Many API consumers directly reference these fields and those of
60328653Shselasky * wait_queue_head.
61328653Shselasky */
62328653Shselaskystruct wait_queue {
63328653Shselasky	unsigned int flags;	/* always 0 */
64328653Shselasky	void *private;
65328653Shselasky	wait_queue_func_t *func;
66328653Shselasky	struct list_head task_list;
67328653Shselasky};
68219820Sjeff
69328653Shselaskystruct wait_queue_head {
70328653Shselasky	spinlock_t lock;
71328653Shselasky	struct list_head task_list;
72328653Shselasky};
73328653Shselasky
74328653Shselasky/*
75328653Shselasky * This function is referenced by at least one DRM driver, so it may not be
76328653Shselasky * renamed and furthermore must be the default wait queue callback.
77328653Shselasky */
78328653Shselaskyextern wait_queue_func_t autoremove_wake_function;
79330861Shselaskyextern wait_queue_func_t default_wake_function;
80328653Shselasky
81330861Shselasky#define	DEFINE_WAIT_FUNC(name, function)				\
82328653Shselasky	wait_queue_t name = {						\
83328653Shselasky		.private = current,					\
84330861Shselasky		.func = function,					\
85328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
86328653Shselasky	}
87328653Shselasky
88330861Shselasky#define	DEFINE_WAIT(name) \
89330861Shselasky	DEFINE_WAIT_FUNC(name, autoremove_wake_function)
90330861Shselasky
91328653Shselasky#define	DECLARE_WAITQUEUE(name, task)					\
92328653Shselasky	wait_queue_t name = {						\
93328653Shselasky		.private = task,					\
94328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
95328653Shselasky	}
96328653Shselasky
97328653Shselasky#define	DECLARE_WAIT_QUEUE_HEAD(name)					\
98328653Shselasky	wait_queue_head_t name = {					\
99328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
100328653Shselasky	};								\
101328653Shselasky	MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
102328653Shselasky
103328653Shselasky#define	init_waitqueue_head(wqh) do {					\
104328653Shselasky	mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"),		\
105328653Shselasky	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
106328653Shselasky	INIT_LIST_HEAD(&(wqh)->task_list);				\
107219820Sjeff} while (0)
108219820Sjeff
109328653Shselaskyvoid linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
110328653Shselasky
111328653Shselasky#define	wake_up(wqh)							\
112328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, false)
113328653Shselasky#define	wake_up_all(wqh)						\
114328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, false)
115328653Shselasky#define	wake_up_locked(wqh)						\
116328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, true)
117328653Shselasky#define	wake_up_all_locked(wqh)						\
118328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, true)
119328653Shselasky#define	wake_up_interruptible(wqh)					\
120328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
121328653Shselasky#define	wake_up_interruptible_all(wqh)					\
122328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
123328653Shselasky
124328653Shselaskyint linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
125328653Shselasky    unsigned int, spinlock_t *);
126328653Shselasky
127328653Shselasky/*
128328653Shselasky * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
129328653Shselasky * cond is true after timeout, remaining jiffies (> 0) if cond is true before
130328653Shselasky * timeout.
131328653Shselasky */
132328653Shselasky#define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
133328653Shselasky	DEFINE_WAIT(__wq);					\
134328653Shselasky	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
135328653Shselasky	int __start = ticks;					\
136328653Shselasky	int __ret = 0;						\
137328653Shselasky								\
138328653Shselasky	for (;;) {						\
139328653Shselasky		linux_prepare_to_wait(&(wqh), &__wq, state);	\
140328653Shselasky		if (cond)					\
141328653Shselasky			break;					\
142328653Shselasky		__ret = linux_wait_event_common(&(wqh), &__wq,	\
143328653Shselasky		    __timeout, state, lock);			\
144328653Shselasky		if (__ret != 0)					\
145328653Shselasky			break;					\
146328653Shselasky	}							\
147328653Shselasky	linux_finish_wait(&(wqh), &__wq);			\
148328653Shselasky	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
149328653Shselasky		if (__ret == -EWOULDBLOCK)			\
150328653Shselasky			__ret = !!(cond);			\
151328653Shselasky		else if (__ret != -ERESTARTSYS) {		\
152328653Shselasky			__ret = __timeout + __start - ticks;	\
153328653Shselasky			/* range check return value */		\
154328653Shselasky			if (__ret < 1)				\
155328653Shselasky				__ret = 1;			\
156328653Shselasky			else if (__ret > __timeout)		\
157328653Shselasky				__ret = __timeout;		\
158328653Shselasky		}						\
159328653Shselasky	}							\
160328653Shselasky	__ret;							\
161219820Sjeff})
162219820Sjeff
163328653Shselasky#define	wait_event(wqh, cond) do {					\
164328653Shselasky	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
165328653Shselasky	    TASK_UNINTERRUPTIBLE, NULL);				\
166328653Shselasky} while (0)
167328653Shselasky
168328653Shselasky#define	wait_event_timeout(wqh, cond, timeout) ({			\
169328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
170328653Shselasky	    NULL);							\
171328653Shselasky})
172328653Shselasky
173328653Shselasky#define	wait_event_interruptible(wqh, cond) ({				\
174328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
175328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
176328653Shselasky})
177328653Shselasky
178328653Shselasky#define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
179328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
180328653Shselasky	    NULL);							\
181328653Shselasky})
182328653Shselasky
183328653Shselasky/*
184328653Shselasky * Wait queue is already locked.
185328653Shselasky */
186328653Shselasky#define	wait_event_interruptible_locked(wqh, cond) ({			\
187328653Shselasky	int __ret;							\
188297459Snp									\
189328653Shselasky	spin_unlock(&(wqh).lock);					\
190328653Shselasky	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
191328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
192328653Shselasky	spin_lock(&(wqh).lock);						\
193297459Snp	__ret;								\
194297459Snp})
195297459Snp
196328653Shselasky/*
197328653Shselasky * Hold the (locked) spinlock when testing the cond.
198328653Shselasky */
199328653Shselasky#define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
200328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
201328653Shselasky	    TASK_INTERRUPTIBLE, &(lock));				\
202328653Shselasky})
203297459Snp
204328653Shselaskystatic inline void
205328653Shselasky__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
206289564Shselasky{
207328653Shselasky	list_add(&wq->task_list, &wqh->task_list);
208289564Shselasky}
209219820Sjeff
210328653Shselaskystatic inline void
211328653Shselaskyadd_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
212328653Shselasky{
213289564Shselasky
214328653Shselasky	spin_lock(&wqh->lock);
215328653Shselasky	__add_wait_queue(wqh, wq);
216328653Shselasky	spin_unlock(&wqh->lock);
217328653Shselasky}
218328653Shselasky
219289564Shselaskystatic inline void
220328653Shselasky__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
221289564Shselasky{
222328653Shselasky	list_add_tail(&wq->task_list, &wqh->task_list);
223289564Shselasky}
224289564Shselasky
225289564Shselaskystatic inline void
226328653Shselasky__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
227289564Shselasky{
228328653Shselasky	list_del(&wq->task_list);
229289564Shselasky}
230289564Shselasky
231328653Shselaskystatic inline void
232328653Shselaskyremove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
233328653Shselasky{
234328653Shselasky
235328653Shselasky	spin_lock(&wqh->lock);
236328653Shselasky	__remove_wait_queue(wqh, wq);
237328653Shselasky	spin_unlock(&wqh->lock);
238328653Shselasky}
239328653Shselasky
240328653Shselaskybool linux_waitqueue_active(wait_queue_head_t *);
241328653Shselasky
242328653Shselasky#define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
243328653Shselasky
244328653Shselaskyvoid linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
245328653Shselaskyvoid linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
246328653Shselasky
247328653Shselasky#define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
248328653Shselasky#define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
249328653Shselasky
250328653Shselaskyvoid linux_wake_up_bit(void *, int);
251328653Shselaskyint linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
252328653Shselaskyvoid linux_wake_up_atomic_t(atomic_t *);
253328653Shselaskyint linux_wait_on_atomic_t(atomic_t *, unsigned int);
254328653Shselasky
255328653Shselasky#define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
256330847Shselasky#define	wait_on_bit(word, bit, state)					\
257330847Shselasky	linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
258328653Shselasky#define	wait_on_bit_timeout(word, bit, state, timeout)			\
259328653Shselasky	linux_wait_on_bit_timeout(word, bit, state, timeout)
260328653Shselasky#define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
261328653Shselasky/*
262328653Shselasky * All existing callers have a cb that just schedule()s. To avoid adding
263328653Shselasky * complexity, just emulate that internally. The prototype is different so that
264328653Shselasky * callers must be manually modified; a cb that does something other than call
265328653Shselasky * schedule() will require special treatment.
266328653Shselasky */
267328653Shselasky#define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
268328653Shselasky
269328653Shselaskystruct task_struct;
270328653Shselaskybool linux_wake_up_state(struct task_struct *, unsigned int);
271328653Shselasky
272328653Shselasky#define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
273328653Shselasky#define	wake_up_state(task, state)	linux_wake_up_state(task, state)
274328653Shselasky
275328653Shselasky#endif /* _LINUX_WAIT_H_ */
276