wait.h revision 328653
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/wait.h 328653 2018-02-01 13:01:44Z hselasky $
31 */
32
33#ifndef _LINUX_WAIT_H_
34#define	_LINUX_WAIT_H_
35
36#include <linux/compiler.h>
37#include <linux/list.h>
38#include <linux/spinlock.h>
39
40#include <asm/atomic.h>
41
42#include <sys/param.h>
43#include <sys/systm.h>
44
45#define	SKIP_SLEEP() (SCHEDULER_STOPPED() || kdb_active)
46
47#define	might_sleep()							\
48	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "might_sleep()")
49
50struct wait_queue;
51struct wait_queue_head;
52
53typedef struct wait_queue wait_queue_t;
54typedef struct wait_queue_head wait_queue_head_t;
55
56typedef int wait_queue_func_t(wait_queue_t *, unsigned int, int, void *);
57
58/*
59 * Many API consumers directly reference these fields and those of
60 * wait_queue_head.
61 */
62struct wait_queue {
63	unsigned int flags;	/* always 0 */
64	void *private;
65	wait_queue_func_t *func;
66	struct list_head task_list;
67};
68
69struct wait_queue_head {
70	spinlock_t lock;
71	struct list_head task_list;
72};
73
74/*
75 * This function is referenced by at least one DRM driver, so it may not be
76 * renamed and furthermore must be the default wait queue callback.
77 */
78extern wait_queue_func_t autoremove_wake_function;
79
80#define	DEFINE_WAIT(name)						\
81	wait_queue_t name = {						\
82		.private = current,					\
83		.func = autoremove_wake_function,			\
84		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
85	}
86
87#define	DECLARE_WAITQUEUE(name, task)					\
88	wait_queue_t name = {						\
89		.private = task,					\
90		.task_list = LINUX_LIST_HEAD_INIT(name.task_list)	\
91	}
92
93#define	DECLARE_WAIT_QUEUE_HEAD(name)					\
94	wait_queue_head_t name = {					\
95		.task_list = LINUX_LIST_HEAD_INIT(name.task_list),	\
96	};								\
97	MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
98
99#define	init_waitqueue_head(wqh) do {					\
100	mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"),		\
101	    NULL, MTX_DEF | MTX_NEW | MTX_NOWITNESS);			\
102	INIT_LIST_HEAD(&(wqh)->task_list);				\
103} while (0)
104
105void linux_wake_up(wait_queue_head_t *, unsigned int, int, bool);
106
107#define	wake_up(wqh)							\
108	linux_wake_up(wqh, TASK_NORMAL, 1, false)
109#define	wake_up_all(wqh)						\
110	linux_wake_up(wqh, TASK_NORMAL, 0, false)
111#define	wake_up_locked(wqh)						\
112	linux_wake_up(wqh, TASK_NORMAL, 1, true)
113#define	wake_up_all_locked(wqh)						\
114	linux_wake_up(wqh, TASK_NORMAL, 0, true)
115#define	wake_up_interruptible(wqh)					\
116	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 1, false)
117#define	wake_up_interruptible_all(wqh)					\
118	linux_wake_up(wqh, TASK_INTERRUPTIBLE, 0, false)
119
120int linux_wait_event_common(wait_queue_head_t *, wait_queue_t *, int,
121    unsigned int, spinlock_t *);
122
123/*
124 * Returns -ERESTARTSYS for a signal, 0 if cond is false after timeout, 1 if
125 * cond is true after timeout, remaining jiffies (> 0) if cond is true before
126 * timeout.
127 */
128#define	__wait_event_common(wqh, cond, timeout, state, lock) ({	\
129	DEFINE_WAIT(__wq);					\
130	const int __timeout = ((int)(timeout)) < 1 ? 1 : (timeout);	\
131	int __start = ticks;					\
132	int __ret = 0;						\
133								\
134	for (;;) {						\
135		linux_prepare_to_wait(&(wqh), &__wq, state);	\
136		if (cond)					\
137			break;					\
138		__ret = linux_wait_event_common(&(wqh), &__wq,	\
139		    __timeout, state, lock);			\
140		if (__ret != 0)					\
141			break;					\
142	}							\
143	linux_finish_wait(&(wqh), &__wq);			\
144	if (__timeout != MAX_SCHEDULE_TIMEOUT) {		\
145		if (__ret == -EWOULDBLOCK)			\
146			__ret = !!(cond);			\
147		else if (__ret != -ERESTARTSYS) {		\
148			__ret = __timeout + __start - ticks;	\
149			/* range check return value */		\
150			if (__ret < 1)				\
151				__ret = 1;			\
152			else if (__ret > __timeout)		\
153				__ret = __timeout;		\
154		}						\
155	}							\
156	__ret;							\
157})
158
159#define	wait_event(wqh, cond) do {					\
160	(void) __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
161	    TASK_UNINTERRUPTIBLE, NULL);				\
162} while (0)
163
164#define	wait_event_timeout(wqh, cond, timeout) ({			\
165	__wait_event_common(wqh, cond, timeout, TASK_UNINTERRUPTIBLE,	\
166	    NULL);							\
167})
168
169#define	wait_event_interruptible(wqh, cond) ({				\
170	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
171	    TASK_INTERRUPTIBLE, NULL);					\
172})
173
174#define	wait_event_interruptible_timeout(wqh, cond, timeout) ({		\
175	__wait_event_common(wqh, cond, timeout, TASK_INTERRUPTIBLE,	\
176	    NULL);							\
177})
178
179/*
180 * Wait queue is already locked.
181 */
182#define	wait_event_interruptible_locked(wqh, cond) ({			\
183	int __ret;							\
184									\
185	spin_unlock(&(wqh).lock);					\
186	__ret = __wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,	\
187	    TASK_INTERRUPTIBLE, NULL);					\
188	spin_lock(&(wqh).lock);						\
189	__ret;								\
190})
191
192/*
193 * Hold the (locked) spinlock when testing the cond.
194 */
195#define	wait_event_interruptible_lock_irq(wqh, cond, lock) ({		\
196	__wait_event_common(wqh, cond, MAX_SCHEDULE_TIMEOUT,		\
197	    TASK_INTERRUPTIBLE, &(lock));				\
198})
199
200static inline void
201__add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
202{
203	list_add(&wq->task_list, &wqh->task_list);
204}
205
206static inline void
207add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
208{
209
210	spin_lock(&wqh->lock);
211	__add_wait_queue(wqh, wq);
212	spin_unlock(&wqh->lock);
213}
214
215static inline void
216__add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq)
217{
218	list_add_tail(&wq->task_list, &wqh->task_list);
219}
220
221static inline void
222__remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
223{
224	list_del(&wq->task_list);
225}
226
227static inline void
228remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq)
229{
230
231	spin_lock(&wqh->lock);
232	__remove_wait_queue(wqh, wq);
233	spin_unlock(&wqh->lock);
234}
235
236bool linux_waitqueue_active(wait_queue_head_t *);
237
238#define	waitqueue_active(wqh)		linux_waitqueue_active(wqh)
239
240void linux_prepare_to_wait(wait_queue_head_t *, wait_queue_t *, int);
241void linux_finish_wait(wait_queue_head_t *, wait_queue_t *);
242
243#define	prepare_to_wait(wqh, wq, state)	linux_prepare_to_wait(wqh, wq, state)
244#define	finish_wait(wqh, wq)		linux_finish_wait(wqh, wq)
245
246void linux_wake_up_bit(void *, int);
247int linux_wait_on_bit_timeout(unsigned long *, int, unsigned int, int);
248void linux_wake_up_atomic_t(atomic_t *);
249int linux_wait_on_atomic_t(atomic_t *, unsigned int);
250
251#define	wake_up_bit(word, bit)		linux_wake_up_bit(word, bit)
252#define	wait_on_bit_timeout(word, bit, state, timeout)			\
253	linux_wait_on_bit_timeout(word, bit, state, timeout)
254#define	wake_up_atomic_t(a)		linux_wake_up_atomic_t(a)
255/*
256 * All existing callers have a cb that just schedule()s. To avoid adding
257 * complexity, just emulate that internally. The prototype is different so that
258 * callers must be manually modified; a cb that does something other than call
259 * schedule() will require special treatment.
260 */
261#define	wait_on_atomic_t(a, state)	linux_wait_on_atomic_t(a, state)
262
263struct task_struct;
264bool linux_wake_up_state(struct task_struct *, unsigned int);
265
266#define	wake_up_process(task)		linux_wake_up_state(task, TASK_NORMAL)
267#define	wake_up_state(task, state)	linux_wake_up_state(task, state)
268
269#endif /* _LINUX_WAIT_H_ */
270