1/*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef	KERNEL_PRIVATE
30
31#ifndef _KERN_WAIT_QUEUE_H_
32#define _KERN_WAIT_QUEUE_H_
33
34#include <mach/mach_types.h>
35#include <mach/sync_policy.h>
36#include <mach/kern_return.h>		/* for kern_return_t */
37
38#include <kern/kern_types.h>		/* for wait_queue_t */
39#include <kern/queue.h>
40
41#include <sys/cdefs.h>
42
43#ifdef	MACH_KERNEL_PRIVATE
44
45#include <kern/lock.h>
46#include <mach/branch_predicates.h>
47
48#include <machine/cpu_number.h>
49#include <machine/machine_routines.h> /* machine_timeout_suspended() */
50/*
51 *	wait_queue_t
52 *	This is the definition of the common event wait queue
53 *	that the scheduler APIs understand.  It is used
54 *	internally by the gerneralized event waiting mechanism
55 *	(assert_wait), and also for items that maintain their
56 *	own wait queues (such as ports and semaphores).
57 *
58 *	It is not published to other kernel components.  They
59 *	can create wait queues by calling wait_queue_alloc.
60 *
61 *	NOTE:  Hardware locks are used to protect event wait
62 *	queues since interrupt code is free to post events to
63 *	them.
64 */
65typedef struct wait_queue {
66    unsigned int                    /* flags */
67    /* boolean_t */	wq_type:16,		/* only public field */
68					wq_fifo:1,		/* fifo wakeup policy? */
69					wq_prepost:1,	/* waitq supports prepost? set only */
70					:0;				/* force to long boundary */
71    hw_lock_data_t	wq_interlock;	/* interlock */
72    queue_head_t	wq_queue;		/* queue of elements */
73} WaitQueue;
74
75/*
76 *	wait_queue_set_t
77 *	This is the common definition for a set wait queue.
78 *	These can be linked as members/elements of multiple regular
79 *	wait queues.  They have an additional set of linkages to
80 *	identify the linkage structures that point to them.
81 */
82typedef struct wait_queue_set {
83	WaitQueue		wqs_wait_queue; /* our wait queue */
84	queue_head_t	wqs_setlinks;	/* links from set perspective */
85	queue_head_t	wqs_preposts;	/* preposted links */
86} WaitQueueSet;
87
88#define wqs_type		wqs_wait_queue.wq_type
89#define wqs_fifo		wqs_wait_queue.wq_fifo
90#define wqs_prepost	wqs_wait_queue.wq_prepost
91#define wqs_queue		wqs_wait_queue.wq_queue
92
93/*
94 *	wait_queue_element_t
95 *	This structure describes the elements on an event wait
96 *	queue.  It is the common first fields in a thread shuttle
97 *	and wait_queue_link_t.  In that way, a wait queue can
98 *	consist of both thread shuttle elements and links off of
99 *	to other (set) wait queues.
100 *
101 *	WARNING: These fields correspond to fields in the thread
102 *	shuttle (run queue links and run queue pointer). Any change in
103 *	the layout here will have to be matched with a change there.
104 */
105typedef struct wait_queue_element {
106	queue_chain_t	wqe_links;	/* link of elements on this queue */
107	void *			wqe_type;	/* Identifies link vs. thread */
108	wait_queue_t	wqe_queue;	/* queue this element is on */
109} WaitQueueElement;
110
111typedef WaitQueueElement *wait_queue_element_t;
112
113/*
114 *	wait_queue_link_t
115 *	Specialized wait queue element type for linking set
116 *	event waits queues onto a wait queue.  In this way, an event
117 *	can be constructed so that any thread waiting on any number
118 *	of associated wait queues can handle the event, while letting
119 *	the thread only be linked on the single wait queue it blocked on.
120 *
121 *	One use: ports in multiple portsets.  Each thread is queued up
122 *	on the portset that it specifically blocked on during a receive
123 *	operation.  Each port's event queue links in all the portset
124 *	event queues of which it is a member.  An IPC event post associated
125 *	with that port may wake up any thread from any of those portsets,
126 *	or one that was waiting locally on the port itself.
127 */
128typedef struct _wait_queue_link {
129	WaitQueueElement		wql_element;	/* element on master */
130	queue_chain_t			wql_setlinks;	/* element on set */
131	queue_chain_t			wql_preposts;	/* element on set prepost list */
132    wait_queue_set_t		wql_setqueue;	/* set queue */
133} WaitQueueLink;
134
135#define wql_links wql_element.wqe_links
136#define wql_type  wql_element.wqe_type
137#define wql_queue wql_element.wqe_queue
138
139#define _WAIT_QUEUE_inited			0xf1d0
140#define _WAIT_QUEUE_SET_inited		0xf1d1
141
142#define wait_queue_is_queue(wq)	\
143	((wq)->wq_type == _WAIT_QUEUE_inited)
144
145#define wait_queue_is_set(wqs)	\
146	((wqs)->wqs_type == _WAIT_QUEUE_SET_inited)
147
148#define wait_queue_is_valid(wq)	\
149	(((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
150
151#define wait_queue_empty(wq)	(queue_empty(&(wq)->wq_queue))
152
153#define wait_queue_held(wq)		(hw_lock_held(&(wq)->wq_interlock))
154#define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
155
156/* For x86, the hardware timeout is in TSC units. */
157#if defined(i386) || defined(x86_64)
158#define	hwLockTimeOut LockTimeOutTSC
159#else
160#define	hwLockTimeOut LockTimeOut
161#endif
162/*
163 * Double the standard lock timeout, because wait queues tend
164 * to iterate over a number of threads - locking each.  If there is
165 * a problem with a thread lock, it normally times out at the wait
166 * queue level first, hiding the real problem.
167 */
168
169static inline void wait_queue_lock(wait_queue_t wq) {
170	if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
171		boolean_t wql_acquired = FALSE;
172
173		while (machine_timeout_suspended()) {
174#if	defined(__i386__) || defined(__x86_64__)
175/*
176 * i386/x86_64 return with preemption disabled on a timeout for
177 * diagnostic purposes.
178 */
179			mp_enable_preemption();
180#endif
181			if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
182				break;
183		}
184		if (wql_acquired == FALSE)
185			panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
186	}
187}
188
189static inline void wait_queue_unlock(wait_queue_t wq) {
190	assert(wait_queue_held(wq));
191	hw_lock_unlock(&(wq)->wq_interlock);
192}
193
194#define wqs_lock(wqs)		wait_queue_lock(&(wqs)->wqs_wait_queue)
195#define wqs_unlock(wqs)		wait_queue_unlock(&(wqs)->wqs_wait_queue)
196#define wqs_lock_try(wqs)	wait_queue__try_lock(&(wqs)->wqs_wait_queue)
197#define wqs_is_preposted(wqs)	((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
198
199#define wql_is_preposted(wql)	((wql)->wql_preposts.next != NULL)
200#define wql_clear_prepost(wql)  ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
201
202#define wait_queue_assert_possible(thread) \
203			((thread)->wait_queue == WAIT_QUEUE_NULL)
204
205/* bootstrap interface - can allocate/link wait_queues and sets after calling this */
206__private_extern__ void wait_queue_bootstrap(void);
207
208/******** Decomposed interfaces (to build higher level constructs) ***********/
209
210/* assert intent to wait on a locked wait queue */
211__private_extern__ wait_result_t wait_queue_assert_wait64_locked(
212			wait_queue_t wait_queue,
213			event64_t wait_event,
214			wait_interrupt_t interruptible,
215			uint64_t deadline,
216			thread_t thread);
217
218/* pull a thread from its wait queue */
219__private_extern__ void wait_queue_pull_thread_locked(
220			wait_queue_t wait_queue,
221			thread_t thread,
222			boolean_t unlock);
223
224/* wakeup all threads waiting for a particular event on locked queue */
225__private_extern__ kern_return_t wait_queue_wakeup64_all_locked(
226			wait_queue_t wait_queue,
227			event64_t wake_event,
228			wait_result_t result,
229			boolean_t unlock);
230
231/* wakeup one thread waiting for a particular event on locked queue */
232__private_extern__ kern_return_t wait_queue_wakeup64_one_locked(
233			wait_queue_t wait_queue,
234			event64_t wake_event,
235			wait_result_t result,
236			boolean_t unlock);
237
238/* return identity of a thread awakened for a particular <wait_queue,event> */
239__private_extern__ thread_t wait_queue_wakeup64_identity_locked(
240			wait_queue_t wait_queue,
241			event64_t wake_event,
242			wait_result_t result,
243			boolean_t unlock);
244
245/* wakeup thread iff its still waiting for a particular event on locked queue */
246__private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
247			wait_queue_t wait_queue,
248			event64_t wake_event,
249			thread_t thread,
250			wait_result_t result,
251			boolean_t unlock);
252
253__private_extern__ uint32_t num_wait_queues;
254__private_extern__ struct wait_queue *wait_queues;
255/* The Jenkins "one at a time" hash.
256 * TBD: There may be some value to unrolling here,
257 * depending on the architecture.
258 */
259static inline uint32_t wq_hash(char *key)
260{
261	uint32_t hash = 0;
262	size_t i, length = sizeof(char *);
263
264	for (i = 0; i < length; i++) {
265		hash += key[i];
266		hash += (hash << 10);
267		hash ^= (hash >> 6);
268	}
269
270	hash += (hash << 3);
271	hash ^= (hash >> 11);
272	hash += (hash << 15);
273
274	hash &= (num_wait_queues - 1);
275	return hash;
276}
277
278#define	wait_hash(event) wq_hash((char *)&event)
279
280#endif	/* MACH_KERNEL_PRIVATE */
281
282__BEGIN_DECLS
283
284/******** Semi-Public interfaces (not a part of a higher construct) ************/
285
286extern unsigned int wait_queue_set_size(void);
287extern unsigned int wait_queue_link_size(void);
288
289extern kern_return_t wait_queue_init(
290			wait_queue_t wait_queue,
291			int policy);
292
293extern wait_queue_set_t wait_queue_set_alloc(
294			int policy);
295
296extern kern_return_t wait_queue_set_init(
297			wait_queue_set_t set_queue,
298			int policy);
299
300extern kern_return_t wait_queue_set_free(
301			wait_queue_set_t set_queue);
302
303extern wait_queue_link_t wait_queue_link_alloc(
304			int policy);
305
306extern kern_return_t wait_queue_link_free(
307			wait_queue_link_t link_element);
308
309extern kern_return_t wait_queue_link(
310			wait_queue_t wait_queue,
311			wait_queue_set_t set_queue);
312
313extern kern_return_t wait_queue_link_noalloc(
314			wait_queue_t wait_queue,
315			wait_queue_set_t set_queue,
316			wait_queue_link_t link);
317
318extern boolean_t wait_queue_member(
319			wait_queue_t wait_queue,
320			wait_queue_set_t set_queue);
321
322extern kern_return_t wait_queue_unlink(
323			wait_queue_t wait_queue,
324			wait_queue_set_t set_queue);
325
326extern kern_return_t wait_queue_unlink_all(
327			wait_queue_t wait_queue);
328
329extern kern_return_t wait_queue_set_unlink_all(
330			wait_queue_set_t set_queue);
331
332#ifdef XNU_KERNEL_PRIVATE
333extern kern_return_t wait_queue_set_unlink_one(
334			wait_queue_set_t set_queue,
335			wait_queue_link_t link);
336
337extern kern_return_t wait_queue_unlink_nofree(
338			wait_queue_t wait_queue,
339			wait_queue_set_t set_queue,
340			wait_queue_link_t *wqlp);
341
342extern kern_return_t wait_queue_unlink_all_nofree(
343			wait_queue_t wait_queue,
344			queue_t links);
345
346extern kern_return_t wait_queue_set_unlink_all_nofree(
347			wait_queue_set_t set_queue,
348			queue_t links);
349
350extern wait_queue_link_t wait_queue_link_allocate(void);
351
352#endif /* XNU_KERNEL_PRIVATE */
353
354/* legacy API */
355kern_return_t wait_queue_sub_init(
356			wait_queue_set_t set_queue,
357			int policy);
358
359kern_return_t wait_queue_sub_clearrefs(
360			wait_queue_set_t wq_set);
361
362extern kern_return_t wait_subqueue_unlink_all(
363			wait_queue_set_t set_queue);
364
365extern wait_queue_t wait_queue_alloc(
366			int policy);
367
368extern kern_return_t wait_queue_free(
369			wait_queue_t wait_queue);
370
371/* assert intent to wait on <wait_queue,event64> pair */
372extern wait_result_t wait_queue_assert_wait64(
373			wait_queue_t wait_queue,
374			event64_t wait_event,
375			wait_interrupt_t interruptible,
376			uint64_t deadline);
377
378/* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
379extern kern_return_t wait_queue_wakeup64_one(
380			wait_queue_t wait_queue,
381			event64_t wake_event,
382			wait_result_t result);
383
384/* wakeup all the threads waiting on <wait_queue,event64> pair */
385extern kern_return_t wait_queue_wakeup64_all(
386			wait_queue_t wait_queue,
387			event64_t wake_event,
388			wait_result_t result);
389
390/* wakeup a specified thread waiting iff waiting on <wait_queue,event64> pair */
391extern kern_return_t wait_queue_wakeup64_thread(
392			wait_queue_t wait_queue,
393			event64_t wake_event,
394			thread_t thread,
395			wait_result_t result);
396
397/*
398 * Compatibility Wait Queue APIs based on pointer events instead of 64bit
399 * integer events.
400 */
401
402/* assert intent to wait on <wait_queue,event> pair */
403extern wait_result_t wait_queue_assert_wait(
404			wait_queue_t wait_queue,
405			event_t wait_event,
406			wait_interrupt_t interruptible,
407			uint64_t deadline);
408
409/* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
410extern kern_return_t wait_queue_wakeup_one(
411			wait_queue_t wait_queue,
412			event_t wake_event,
413			wait_result_t result,
414	                int priority);
415
416/* wakeup all the threads waiting on <wait_queue,event> pair */
417extern kern_return_t wait_queue_wakeup_all(
418			wait_queue_t wait_queue,
419			event_t wake_event,
420			wait_result_t result);
421
422/* wakeup a specified thread waiting iff waiting on <wait_queue,event> pair */
423extern kern_return_t wait_queue_wakeup_thread(
424			wait_queue_t wait_queue,
425			event_t wake_event,
426			thread_t thread,
427			wait_result_t result);
428
429__END_DECLS
430
431#endif	/* _KERN_WAIT_QUEUE_H_ */
432
433#endif	/* KERNEL_PRIVATE */
434