wait.h revision 330847
1219820Sjeff/*-
2219820Sjeff * Copyright (c) 2010 Isilon Systems, Inc.
3219820Sjeff * Copyright (c) 2010 iX Systems, Inc.
4219820Sjeff * Copyright (c) 2010 Panasas, Inc.
5270710Shselasky * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6328653Shselasky * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7219820Sjeff * All rights reserved.
8219820Sjeff *
9219820Sjeff * Redistribution and use in source and binary forms, with or without
10219820Sjeff * modification, are permitted provided that the following conditions
11219820Sjeff * are met:
12219820Sjeff * 1. Redistributions of source code must retain the above copyright
13219820Sjeff *    notice unmodified, this list of conditions, and the following
14219820Sjeff *    disclaimer.
15219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
16219820Sjeff *    notice, this list of conditions and the following disclaimer in the
17219820Sjeff *    documentation and/or other materials provided with the distribution.
18219820Sjeff *
19219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20219820Sjeff * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21219820Sjeff * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22219820Sjeff * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23219820Sjeff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24219820Sjeff * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25219820Sjeff * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26219820Sjeff * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27219820Sjeff * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28219820Sjeff * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29289644Shselasky *
30289644Shselasky * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/wait.h 330847 2018-03-13 16:07:38Z hselasky $
31219820Sjeff */
32328653Shselasky
33328653Shselasky#ifndef _LINUX_WAIT_H_
34219820Sjeff#define	_LINUX_WAIT_H_
35219820Sjeff
36300671Shselasky#include <linux/compiler.h>
37328653Shselasky#include <linux/list.h>
38219820Sjeff#include <linux/spinlock.h>
39219820Sjeff
40328653Shselasky#include <asm/atomic.h>
41328653Shselasky
42219820Sjeff#include <sys/param.h>
43219820Sjeff#include <sys/systm.h>
44219820Sjeff
45328653Shselasky#define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
46289564Shselasky
47328653Shselasky#define	might_sleep()							\
48328653Shselasky	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
49219820Sjeff
50328653Shselaskystruct wait_queue;
51328653Shselaskystruct wait_queue_head;
52219820Sjeff
53328653Shselaskytypedef struct wait_queue wait_queue_t;
54328653Shselaskytypedef struct wait_queue_head wait_queue_head_t;
55219820Sjeff
56328653Shselaskytypedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
57219820Sjeff
58328653Shselasky/*
59328653Shselasky * Many API consumers directly reference these fields and those of
60328653Shselasky * wait_queue_head.
61328653Shselasky */
62328653Shselaskystruct wait_queue {
63328653Shselasky	unsigned int flags;	/* always 0 */
64328653Shselasky	void *private;
65328653Shselasky	wait_queue_func_t *func;
66328653Shselasky	struct list_head task_list;
67328653Shselasky};
68219820Sjeff
69328653Shselaskystruct wait_queue_head {
70328653Shselasky	spinlock_t lock;
71328653Shselasky	struct list_head task_list;
72328653Shselasky};
73328653Shselasky
74328653Shselasky/*
75328653Shselasky * This function is referenced by at least one DRM driver, so it may not be
76328653Shselasky * renamed and furthermore must be the default wait queue callback.
77328653Shselasky */
78328653Shselaskyextern wait_queue_func_t autoremove_wake_function;
79328653Shselasky
80328653Shselasky#define	DEFINE_WAIT(name)						\
81328653Shselasky	wait_queue_t name = {						\
82328653Shselasky		.private = current,					\
83328653Shselasky		.func = autoremove_wake_function,			\
84328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
85328653Shselasky	}
86328653Shselasky
87328653Shselasky#define	DECLARE_WAITQUEUE(name, task)					\
88328653Shselasky	wait_queue_t name = {						\
89328653Shselasky		.private = task,					\
90328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
91328653Shselasky	}
92328653Shselasky
93328653Shselasky#define	DECLARE_WAIT_QUEUE_HEAD(name)					\
94328653Shselasky	wait_queue_head_t name = {					\
95328653Shselasky		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
96328653Shselasky	};								\
97328653Shselasky	MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
98328653Shselasky
99328653Shselasky#define	init_waitqueue_head(wqh) do {					\
100328653Shselasky	mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"),		\
101328653Shselasky	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
102328653Shselasky	INIT_LIST_HEAD(&(wqh)->task_list);				\
103219820Sjeff} while (0)
104219820Sjeff
105328653Shselaskyvoid linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
106328653Shselasky
107328653Shselasky#define	wake_up(wqh)							\
108328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, false)
109328653Shselasky#define	wake_up_all(wqh)						\
110328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, false)
111328653Shselasky#define	wake_up_locked(wqh)						\
112328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 1, true)
113328653Shselasky#define	wake_up_all_locked(wqh)						\
114328653Shselasky	linux_wake_up(wqh, TASK_NORMAL, 0, true)
115328653Shselasky#define	wake_up_interruptible(wqh)					\
116328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
117328653Shselasky#define	wake_up_interruptible_all(wqh)					\
118328653Shselasky	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
119328653Shselasky
120328653Shselaskyint linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
121328653Shselasky    unsigned int, spinlock_t *);
122328653Shselasky
123328653Shselasky/*
124328653Shselasky * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
125328653Shselasky * cond is true after timeout, remaining jiffies (> 0) if cond is true before
126328653Shselasky * timeout.
127328653Shselasky */
128328653Shselasky#define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
129328653Shselasky	DEFINE_WAIT(__wq);					\
130328653Shselasky	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
131328653Shselasky	int __start = ticks;					\
132328653Shselasky	int __ret = 0;						\
133328653Shselasky								\
134328653Shselasky	for (;;) {						\
135328653Shselasky		linux_prepare_to_wait(&(wqh), &__wq, state);	\
136328653Shselasky		if (cond)					\
137328653Shselasky			break;					\
138328653Shselasky		__ret = linux_wait_event_common(&(wqh), &__wq,	\
139328653Shselasky		    __timeout, state, lock);			\
140328653Shselasky		if (__ret != 0)					\
141328653Shselasky			break;					\
142328653Shselasky	}							\
143328653Shselasky	linux_finish_wait(&(wqh), &__wq);			\
144328653Shselasky	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
145328653Shselasky		if (__ret == -EWOULDBLOCK)			\
146328653Shselasky			__ret = !!(cond);			\
147328653Shselasky		else if (__ret != -ERESTARTSYS) {		\
148328653Shselasky			__ret = __timeout + __start - ticks;	\
149328653Shselasky			/* range check return value */		\
150328653Shselasky			if (__ret < 1)				\
151328653Shselasky				__ret = 1;			\
152328653Shselasky			else if (__ret > __timeout)		\
153328653Shselasky				__ret = __timeout;		\
154328653Shselasky		}						\
155328653Shselasky	}							\
156328653Shselasky	__ret;							\
157219820Sjeff})
158219820Sjeff
159328653Shselasky#define	wait_event(wqh, cond) do {					\
160328653Shselasky	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
161328653Shselasky	    TASK_UNINTERRUPTIBLE, NULL);				\
162328653Shselasky} while (0)
163328653Shselasky
164328653Shselasky#define	wait_event_timeout(wqh, cond, timeout) ({			\
165328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
166328653Shselasky	    NULL);							\
167328653Shselasky})
168328653Shselasky
169328653Shselasky#define	wait_event_interruptible(wqh, cond) ({				\
170328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
171328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
172328653Shselasky})
173328653Shselasky
174328653Shselasky#define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
175328653Shselasky	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
176328653Shselasky	    NULL);							\
177328653Shselasky})
178328653Shselasky
179328653Shselasky/*
180328653Shselasky * Wait queue is already locked.
181328653Shselasky */
182328653Shselasky#define	wait_event_interruptible_locked(wqh, cond) ({			\
183328653Shselasky	int __ret;							\
184297459Snp									\
185328653Shselasky	spin_unlock(&(wqh).lock);					\
186328653Shselasky	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
187328653Shselasky	    TASK_INTERRUPTIBLE, NULL);					\
188328653Shselasky	spin_lock(&(wqh).lock);						\
189297459Snp	__ret;								\
190297459Snp})
191297459Snp
192328653Shselasky/*
193328653Shselasky * Hold the (locked) spinlock when testing the cond.
194328653Shselasky */
195328653Shselasky#define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
196328653Shselasky	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
197328653Shselasky	    TASK_INTERRUPTIBLE, &(lock));				\
198328653Shselasky})
199297459Snp
200328653Shselaskystatic inline void
201328653Shselasky__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
202289564Shselasky{
203328653Shselasky	list_add(&wq->task_list, &wqh->task_list);
204289564Shselasky}
205219820Sjeff
206328653Shselaskystatic inline void
207328653Shselaskyadd_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
208328653Shselasky{
209289564Shselasky
210328653Shselasky	spin_lock(&wqh->lock);
211328653Shselasky	__add_wait_queue(wqh, wq);
212328653Shselasky	spin_unlock(&wqh->lock);
213328653Shselasky}
214328653Shselasky
215289564Shselaskystatic inline void
216328653Shselasky__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
217289564Shselasky{
218328653Shselasky	list_add_tail(&wq->task_list, &wqh->task_list);
219289564Shselasky}
220289564Shselasky
221289564Shselaskystatic inline void
222328653Shselasky__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
223289564Shselasky{
224328653Shselasky	list_del(&wq->task_list);
225289564Shselasky}
226289564Shselasky
227328653Shselaskystatic inline void
228328653Shselaskyremove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
229328653Shselasky{
230328653Shselasky
231328653Shselasky	spin_lock(&wqh->lock);
232328653Shselasky	__remove_wait_queue(wqh, wq);
233328653Shselasky	spin_unlock(&wqh->lock);
234328653Shselasky}
235328653Shselasky
236328653Shselaskybool linux_waitqueue_active(wait_queue_head_t *);
237328653Shselasky
238328653Shselasky#define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
239328653Shselasky
240328653Shselaskyvoid linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
241328653Shselaskyvoid linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
242328653Shselasky
243328653Shselasky#define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
244328653Shselasky#define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
245328653Shselasky
246328653Shselaskyvoid linux_wake_up_bit(void *, int);
247328653Shselaskyint linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
248328653Shselaskyvoid linux_wake_up_atomic_t(atomic_t *);
249328653Shselaskyint linux_wait_on_atomic_t(atomic_t *, unsigned int);
250328653Shselasky
251328653Shselasky#define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
252330847Shselasky#define	wait_on_bit(word, bit, state)					\
253330847Shselasky	linux_wait_on_bit_timeout(word, bit, state, MAX_SCHEDULE_TIMEOUT)
254328653Shselasky#define	wait_on_bit_timeout(word, bit, state, timeout)			\
255328653Shselasky	linux_wait_on_bit_timeout(word, bit, state, timeout)
256328653Shselasky#define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
257328653Shselasky/*
258328653Shselasky * All existing callers have a cb that just schedule()s. To avoid adding
259328653Shselasky * complexity, just emulate that internally. The prototype is different so that
260328653Shselasky * callers must be manually modified; a cb that does something other than call
261328653Shselasky * schedule() will require special treatment.
262328653Shselasky */
263328653Shselasky#define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
264328653Shselasky
265328653Shselaskystruct task_struct;
266328653Shselaskybool linux_wake_up_state(struct task_struct *, unsigned int);
267328653Shselasky
268328653Shselasky#define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
269328653Shselasky#define	wake_up_state(task, state)	linux_wake_up_state(task, state)
270328653Shselasky
271328653Shselasky#endif /* _LINUX_WAIT_H_ */
272