1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	wait_queue.c (adapted from sched_prim.c)
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	1986
62 *
63 *	Primitives for manipulating wait queues: either global
64 *	ones from sched_prim.c, or private ones associated with
65 *	particular structures(pots, semaphores, etc..).
66 */
67
68#include <kern/kern_types.h>
69#include <kern/simple_lock.h>
70#include <kern/zalloc.h>
71#include <kern/queue.h>
72#include <kern/spl.h>
73#include <mach/sync_policy.h>
74#include <kern/mach_param.h>
75#include <kern/sched_prim.h>
76
77#include <kern/wait_queue.h>
78#include <vm/vm_kern.h>
79
80/* forward declarations */
81static boolean_t wait_queue_member_locked(
82			wait_queue_t		wq,
83			wait_queue_set_t	wq_set);
84
85static void wait_queues_init(void);
86
87#define WAIT_QUEUE_MAX thread_max
88#define WAIT_QUEUE_SET_MAX task_max * 3
89#define WAIT_QUEUE_LINK_MAX PORT_MAX / 2 + (WAIT_QUEUE_MAX * WAIT_QUEUE_SET_MAX) / 64
90
91static zone_t _wait_queue_link_zone;
92static zone_t _wait_queue_set_zone;
93static zone_t _wait_queue_zone;
94
95/* see rdar://6737748&5561610; we need an unshadowed
96 * definition of a WaitQueueLink for debugging,
97 * but it needs to be used somewhere to wind up in
98 * the dSYM file. */
99volatile WaitQueueLink *unused_except_for_debugging;
100
101
102/*
103 *	Waiting protocols and implementation:
104 *
105 *	Each thread may be waiting for exactly one event; this event
106 *	is set using assert_wait().  That thread may be awakened either
107 *	by performing a thread_wakeup_prim() on its event,
108 *	or by directly waking that thread up with clear_wait().
109 *
110 *	The implementation of wait events uses a hash table.  Each
111 *	bucket is queue of threads having the same hash function
112 *	value; the chain for the queue (linked list) is the run queue
113 *	field.  [It is not possible to be waiting and runnable at the
114 *	same time.]
115 *
116 *	Locks on both the thread and on the hash buckets govern the
117 *	wait event field and the queue chain field.  Because wakeup
118 *	operations only have the event as an argument, the event hash
119 *	bucket must be locked before any thread.
120 *
121 *	Scheduling operations may also occur at interrupt level; therefore,
122 *	interrupts below splsched() must be prevented when holding
123 *	thread or hash bucket locks.
124 *
125 *	The wait event hash table declarations are as follows:
126 */
127
128struct wait_queue boot_wait_queue[1];
129__private_extern__ struct wait_queue *wait_queues = &boot_wait_queue[0];
130__private_extern__ uint32_t num_wait_queues = 1;
131
132#define	P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align)))
133#define ROUNDDOWN(x,y)	(((x)/(y))*(y))
134
135static uint32_t
136compute_wait_hash_size(void)
137{
138	uint32_t hsize, queues;
139
140	if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize)))
141		return (hsize);
142
143	queues = thread_max / 11;
144	hsize = P2ROUNDUP(queues * sizeof(struct wait_queue), PAGE_SIZE);
145
146	return hsize;
147}
148
149static void
150wait_queues_init(void)
151{
152	uint32_t	i, whsize, qsz;
153	kern_return_t	kret;
154
155	/*
156	 * Determine the amount of memory we're willing to reserve for
157	 * the waitqueue hash table
158	 */
159	whsize = compute_wait_hash_size();
160
161	/* Determine the number of waitqueues we can fit. */
162	qsz = sizeof (struct wait_queue);
163	whsize = ROUNDDOWN(whsize, qsz);
164	num_wait_queues = whsize / qsz;
165
166	/*
167	 * The hash algorithm requires that this be a power of 2, so we
168	 * just mask off all the low-order bits.
169	 */
170	for (i = 0; i < 31; i++) {
171		uint32_t bit = (1 << i);
172		if ((num_wait_queues & bit) == num_wait_queues)
173			break;
174		num_wait_queues &= ~bit;
175	}
176	assert(num_wait_queues > 0);
177
178	/* Now determine how much memory we really need. */
179	whsize = P2ROUNDUP(num_wait_queues * qsz, PAGE_SIZE);
180
181	kret = kernel_memory_allocate(kernel_map, (vm_offset_t *) &wait_queues,
182	    whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
183
184	if (kret != KERN_SUCCESS || wait_queues == NULL)
185		panic("kernel_memory_allocate() failed to allocate wait queues, error: %d, whsize: 0x%x", kret, whsize);
186
187	for (i = 0; i < num_wait_queues; i++) {
188		wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO);
189	}
190}
191
192void
193wait_queue_bootstrap(void)
194{
195	wait_queues_init();
196	_wait_queue_zone = zinit(sizeof(struct wait_queue),
197				      WAIT_QUEUE_MAX * sizeof(struct wait_queue),
198				      sizeof(struct wait_queue),
199				      "wait queues");
200	zone_change(_wait_queue_zone, Z_NOENCRYPT, TRUE);
201
202	_wait_queue_set_zone = zinit(sizeof(struct wait_queue_set),
203				      WAIT_QUEUE_SET_MAX * sizeof(struct wait_queue_set),
204				      sizeof(struct wait_queue_set),
205				      "wait queue sets");
206	zone_change(_wait_queue_set_zone, Z_NOENCRYPT, TRUE);
207
208	_wait_queue_link_zone = zinit(sizeof(struct _wait_queue_link),
209				      WAIT_QUEUE_LINK_MAX * sizeof(struct _wait_queue_link),
210				      sizeof(struct _wait_queue_link),
211				      "wait queue links");
212	zone_change(_wait_queue_link_zone, Z_NOENCRYPT, TRUE);
213}
214
215/*
216 *	Routine:        wait_queue_init
217 *	Purpose:
218 *		Initialize a previously allocated wait queue.
219 *	Returns:
220 *		KERN_SUCCESS - The wait_queue_t was initialized
221 *		KERN_INVALID_ARGUMENT - The policy parameter was invalid
222 */
223kern_return_t
224wait_queue_init(
225	wait_queue_t wq,
226	int policy)
227{
228	/* only FIFO and LIFO for now */
229	if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0)
230		return KERN_INVALID_ARGUMENT;
231
232	wq->wq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0);
233	wq->wq_type = _WAIT_QUEUE_inited;
234	wq->wq_eventmask = 0;
235	queue_init(&wq->wq_queue);
236	hw_lock_init(&wq->wq_interlock);
237	return KERN_SUCCESS;
238}
239
240/*
241 *	Routine:		   wait_queue_alloc
242 *	Purpose:
243 *		Allocate and initialize a wait queue for use outside of
244 *		of the mach part of the kernel.
245 *	Conditions:
246 *		Nothing locked - can block.
247 *	Returns:
248 *		The allocated and initialized wait queue
249 *		WAIT_QUEUE_NULL if there is a resource shortage
250 */
251wait_queue_t
252wait_queue_alloc(
253	int policy)
254{
255	wait_queue_t wq;
256	kern_return_t ret;
257
258	wq = (wait_queue_t) zalloc(_wait_queue_zone);
259	if (wq != WAIT_QUEUE_NULL) {
260		ret = wait_queue_init(wq, policy);
261		if (ret != KERN_SUCCESS) {
262			zfree(_wait_queue_zone, wq);
263			wq = WAIT_QUEUE_NULL;
264		}
265	}
266	return wq;
267}
268
269/*
270 *	Routine:        wait_queue_free
271 *	Purpose:
272 *		Free an allocated wait queue.
273 *	Conditions:
274 *		May block.
275 */
276kern_return_t
277wait_queue_free(
278	wait_queue_t wq)
279{
280	if (!wait_queue_is_queue(wq))
281		return KERN_INVALID_ARGUMENT;
282	if (!queue_empty(&wq->wq_queue))
283		return KERN_FAILURE;
284	zfree(_wait_queue_zone, wq);
285	return KERN_SUCCESS;
286}
287
288/*
289 *	Routine:        wait_queue_set_init
290 *	Purpose:
291 *		Initialize a previously allocated wait queue set.
292 *	Returns:
293 *		KERN_SUCCESS - The wait_queue_set_t was initialized
294 *		KERN_INVALID_ARGUMENT - The policy parameter was invalid
295 */
296kern_return_t
297wait_queue_set_init(
298	wait_queue_set_t wqset,
299	int policy)
300{
301	kern_return_t ret;
302
303	ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
304	if (ret != KERN_SUCCESS)
305		return ret;
306
307	wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
308	if (policy & SYNC_POLICY_PREPOST)
309		wqset->wqs_wait_queue.wq_prepost = TRUE;
310	else
311		wqset->wqs_wait_queue.wq_prepost = FALSE;
312	queue_init(&wqset->wqs_setlinks);
313	queue_init(&wqset->wqs_preposts);
314	return KERN_SUCCESS;
315}
316
317
318kern_return_t
319wait_queue_sub_init(
320	wait_queue_set_t wqset,
321	int policy)
322{
323	return wait_queue_set_init(wqset, policy);
324}
325
326kern_return_t
327wait_queue_sub_clearrefs(
328        wait_queue_set_t wq_set)
329{
330	wait_queue_link_t wql;
331	queue_t q;
332	spl_t s;
333
334	if (!wait_queue_is_set(wq_set))
335		return KERN_INVALID_ARGUMENT;
336
337	s = splsched();
338	wqs_lock(wq_set);
339	q = &wq_set->wqs_preposts;
340	while (!queue_empty(q)) {
341		queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
342		assert(!wql_is_preposted(wql));
343	}
344	wqs_unlock(wq_set);
345	splx(s);
346	return KERN_SUCCESS;
347}
348
349/*
350 *	Routine:        wait_queue_set_alloc
351 *	Purpose:
352 *		Allocate and initialize a wait queue set for
353 *		use outside of the mach part of the kernel.
354 *	Conditions:
355 *		May block.
356 *	Returns:
357 *		The allocated and initialized wait queue set
358 *		WAIT_QUEUE_SET_NULL if there is a resource shortage
359 */
360wait_queue_set_t
361wait_queue_set_alloc(
362    int policy)
363{
364	wait_queue_set_t wq_set;
365
366	wq_set = (wait_queue_set_t) zalloc(_wait_queue_set_zone);
367	if (wq_set != WAIT_QUEUE_SET_NULL) {
368		kern_return_t ret;
369
370		ret = wait_queue_set_init(wq_set, policy);
371		if (ret != KERN_SUCCESS) {
372			zfree(_wait_queue_set_zone, wq_set);
373			wq_set = WAIT_QUEUE_SET_NULL;
374		}
375	}
376	return wq_set;
377}
378
379/*
380 *     Routine:        wait_queue_set_free
381 *     Purpose:
382 *             Free an allocated wait queue set
383 *     Conditions:
384 *             May block.
385 */
386kern_return_t
387wait_queue_set_free(
388	wait_queue_set_t wq_set)
389{
390	if (!wait_queue_is_set(wq_set))
391		return KERN_INVALID_ARGUMENT;
392
393	if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
394		return KERN_FAILURE;
395
396	zfree(_wait_queue_set_zone, wq_set);
397	return KERN_SUCCESS;
398}
399
400
401/*
402 *
403 *     Routine:        wait_queue_set_size
404 *     Routine:        wait_queue_link_size
405 *     Purpose:
406 *             Return the size of opaque wait queue structures
407 */
408unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
409unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
410
411/* declare a unique type for wait queue link structures */
412static unsigned int _wait_queue_link;
413static unsigned int _wait_queue_link_noalloc;
414static unsigned int _wait_queue_unlinked;
415
416#define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
417#define WAIT_QUEUE_LINK_NOALLOC ((void *)&_wait_queue_link_noalloc)
418#define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
419
420#define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
421	WQASSERT(((wqe)->wqe_queue == (wq) && \
422	  queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
423	  "wait queue element list corruption: wq=%#x, wqe=%#x", \
424	  (wq), (wqe))
425
426#define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
427			((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
428			(queue_t)(wql) : &(wql)->wql_setlinks)))
429
430#define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
431			((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
432			(queue_t)(wql) : &(wql)->wql_setlinks)))
433
434#define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
435		WQASSERT(((((wql)->wql_type == WAIT_QUEUE_LINK) || \
436			   ((wql)->wql_type == WAIT_QUEUE_LINK_NOALLOC)) && \
437			((wql)->wql_setqueue == (wqs)) && \
438			(((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) || \
439			 ((wql)->wql_queue->wq_type == _WAIT_QUEUE_SET_inited)) && \
440			(WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
441			"wait queue set links corruption: wqs=%#x, wql=%#x", \
442			 (wqs), (wql))
443
444#if defined(_WAIT_QUEUE_DEBUG_)
445
446#define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
447
448#define WAIT_QUEUE_CHECK(wq) \
449MACRO_BEGIN \
450	queue_t q2 = &(wq)->wq_queue; \
451	wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
452	while (!queue_end(q2, (queue_entry_t)wqe2)) { \
453		WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
454		wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
455	} \
456MACRO_END
457
458#define WAIT_QUEUE_SET_CHECK(wqs) \
459MACRO_BEGIN \
460	queue_t q2 = &(wqs)->wqs_setlinks; \
461	wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
462	while (!queue_end(q2, (queue_entry_t)wql2)) { \
463		WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
464		wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
465	} \
466MACRO_END
467
468#else /* !_WAIT_QUEUE_DEBUG_ */
469
470#define WQASSERT(e, s, p0, p1) assert(e)
471
472#define WAIT_QUEUE_CHECK(wq)
473#define WAIT_QUEUE_SET_CHECK(wqs)
474
475#endif /* !_WAIT_QUEUE_DEBUG_ */
476
477/*
478 *	Routine:	wait_queue_global
479 *	Purpose:
480 *		Indicate if this wait queue is a global wait queue or not.
481 */
482static boolean_t
483wait_queue_global(
484	wait_queue_t wq)
485{
486	if ((wq >= wait_queues) && (wq <= (wait_queues + num_wait_queues))) {
487		return TRUE;
488	}
489	return FALSE;
490}
491
492
493/*
494 *	Routine:	wait_queue_member_locked
495 *	Purpose:
496 *		Indicate if this set queue is a member of the queue
497 *	Conditions:
498 *		The wait queue is locked
499 *		The set queue is just that, a set queue
500 */
501static boolean_t
502wait_queue_member_locked(
503	wait_queue_t wq,
504	wait_queue_set_t wq_set)
505{
506	wait_queue_element_t wq_element;
507	queue_t q;
508
509	assert(wait_queue_held(wq));
510	assert(wait_queue_is_set(wq_set));
511
512	q = &wq->wq_queue;
513
514	wq_element = (wait_queue_element_t) queue_first(q);
515	while (!queue_end(q, (queue_entry_t)wq_element)) {
516		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
517		if ((wq_element->wqe_type == WAIT_QUEUE_LINK) ||
518		    (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) {
519			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
520
521			if (wql->wql_setqueue == wq_set)
522				return TRUE;
523		}
524		wq_element = (wait_queue_element_t)
525			     queue_next((queue_t) wq_element);
526	}
527	return FALSE;
528}
529
530
531/*
532 *	Routine:	wait_queue_member
533 *	Purpose:
534 *		Indicate if this set queue is a member of the queue
535 *	Conditions:
536 *		The set queue is just that, a set queue
537 */
538boolean_t
539wait_queue_member(
540	wait_queue_t wq,
541	wait_queue_set_t wq_set)
542{
543	boolean_t ret;
544	spl_t s;
545
546	if (!wait_queue_is_set(wq_set))
547		return FALSE;
548
549	s = splsched();
550	wait_queue_lock(wq);
551	ret = wait_queue_member_locked(wq, wq_set);
552	wait_queue_unlock(wq);
553	splx(s);
554
555	return ret;
556}
557
558
559/*
560 *	Routine:	wait_queue_link_internal
561 *	Purpose:
562 *		Insert a set wait queue into a wait queue.  This
563 *		requires us to link the two together using a wait_queue_link
564 *		structure that was provided.
565 *	Conditions:
566 *		The wait queue being inserted must be inited as a set queue
567 *		The wait_queue_link structure must already be properly typed
568 */
569static
570kern_return_t
571wait_queue_link_internal(
572	wait_queue_t wq,
573	wait_queue_set_t wq_set,
574	wait_queue_link_t wql)
575{
576	wait_queue_element_t wq_element;
577	queue_t q;
578	spl_t s;
579
580	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set))
581  		return KERN_INVALID_ARGUMENT;
582
583	/*
584	 * There are probably fewer threads and sets associated with
585	 * the wait queue than there are wait queues associated with
586	 * the set.  So let's validate it that way.
587	 */
588	s = splsched();
589	wait_queue_lock(wq);
590	q = &wq->wq_queue;
591	wq_element = (wait_queue_element_t) queue_first(q);
592	while (!queue_end(q, (queue_entry_t)wq_element)) {
593		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
594		if ((wq_element->wqe_type == WAIT_QUEUE_LINK ||
595		     wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) &&
596		    ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
597			wait_queue_unlock(wq);
598			splx(s);
599			return KERN_ALREADY_IN_SET;
600		}
601		wq_element = (wait_queue_element_t)
602				queue_next((queue_t) wq_element);
603	}
604
605	/*
606	 * Not already a member, so we can add it.
607	 */
608	wqs_lock(wq_set);
609
610	WAIT_QUEUE_SET_CHECK(wq_set);
611
612	assert(wql->wql_type == WAIT_QUEUE_LINK ||
613	       wql->wql_type == WAIT_QUEUE_LINK_NOALLOC);
614
615	wql->wql_queue = wq;
616	wql_clear_prepost(wql);
617	queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
618	wql->wql_setqueue = wq_set;
619	queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
620
621	wqs_unlock(wq_set);
622	wait_queue_unlock(wq);
623	splx(s);
624
625	return KERN_SUCCESS;
626}
627
628/*
629 *	Routine:	wait_queue_link_noalloc
630 *	Purpose:
631 *		Insert a set wait queue into a wait queue.  This
632 *		requires us to link the two together using a wait_queue_link
633 *		structure that we allocate.
634 *	Conditions:
635 *		The wait queue being inserted must be inited as a set queue
636 */
637kern_return_t
638wait_queue_link_noalloc(
639	wait_queue_t wq,
640	wait_queue_set_t wq_set,
641	wait_queue_link_t wql)
642{
643	wql->wql_type = WAIT_QUEUE_LINK_NOALLOC;
644	return wait_queue_link_internal(wq, wq_set, wql);
645}
646
647/*
648 *	Routine:	wait_queue_link
649 *	Purpose:
650 *		Insert a set wait queue into a wait queue.  This
651 *		requires us to link the two together using a wait_queue_link
652 *		structure that we allocate.
653 *	Conditions:
654 *		The wait queue being inserted must be inited as a set queue
655 */
656kern_return_t
657wait_queue_link(
658	wait_queue_t wq,
659	wait_queue_set_t wq_set)
660{
661	wait_queue_link_t wql;
662	kern_return_t ret;
663
664	wql = (wait_queue_link_t) zalloc(_wait_queue_link_zone);
665	if (wql == WAIT_QUEUE_LINK_NULL)
666		return KERN_RESOURCE_SHORTAGE;
667
668	wql->wql_type = WAIT_QUEUE_LINK;
669	ret = wait_queue_link_internal(wq, wq_set, wql);
670	if (ret != KERN_SUCCESS)
671		zfree(_wait_queue_link_zone, wql);
672
673	return ret;
674}
675
676wait_queue_link_t
677wait_queue_link_allocate(void)
678{
679	wait_queue_link_t wql;
680
681	wql = zalloc(_wait_queue_link_zone); /* Can't fail */
682	bzero(wql, sizeof(*wql));
683	wql->wql_type = WAIT_QUEUE_UNLINKED;
684
685	return wql;
686}
687
688kern_return_t
689wait_queue_link_free(wait_queue_link_t wql)
690{
691	zfree(_wait_queue_link_zone, wql);
692	return KERN_SUCCESS;
693}
694
695
696/*
697 *	Routine:	wait_queue_unlink_locked
698 *	Purpose:
699 *		Undo the linkage between a wait queue and a set.
700 */
701static void
702wait_queue_unlink_locked(
703	wait_queue_t wq,
704	wait_queue_set_t wq_set,
705	wait_queue_link_t wql)
706{
707	assert(wait_queue_held(wq));
708	assert(wait_queue_held(&wq_set->wqs_wait_queue));
709
710	wql->wql_queue = WAIT_QUEUE_NULL;
711	queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
712	wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
713	queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
714	if (wql_is_preposted(wql)) {
715		queue_t ppq = &wq_set->wqs_preposts;
716		queue_remove(ppq, wql, wait_queue_link_t, wql_preposts);
717	}
718	wql->wql_type = WAIT_QUEUE_UNLINKED;
719
720	WAIT_QUEUE_CHECK(wq);
721	WAIT_QUEUE_SET_CHECK(wq_set);
722}
723
724/*
725 *	Routine:	wait_queue_unlink_nofree
726 *	Purpose:
727 *		Remove the linkage between a wait queue and a set,
728 *		returning the linkage structure to the caller to
729 *		free later.
730 *	Conditions:
731 *		The wait queue being must be a member set queue
732 */
733kern_return_t
734wait_queue_unlink_nofree(
735	wait_queue_t wq,
736	wait_queue_set_t wq_set,
737	wait_queue_link_t *wqlp)
738{
739	wait_queue_element_t wq_element;
740	wait_queue_link_t wql;
741	queue_t q;
742	spl_t s;
743
744	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
745		return KERN_INVALID_ARGUMENT;
746	}
747	s = splsched();
748	wait_queue_lock(wq);
749
750	q = &wq->wq_queue;
751	wq_element = (wait_queue_element_t) queue_first(q);
752	while (!queue_end(q, (queue_entry_t)wq_element)) {
753		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
754		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
755		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
756
757		   	wql = (wait_queue_link_t)wq_element;
758
759			if (wql->wql_setqueue == wq_set) {
760
761				wqs_lock(wq_set);
762				wait_queue_unlink_locked(wq, wq_set, wql);
763				wqs_unlock(wq_set);
764				wait_queue_unlock(wq);
765				splx(s);
766				*wqlp = wql;
767				return KERN_SUCCESS;
768			}
769		}
770		wq_element = (wait_queue_element_t)
771				queue_next((queue_t) wq_element);
772	}
773	wait_queue_unlock(wq);
774	splx(s);
775	return KERN_NOT_IN_SET;
776}
777
778/*
779 *	Routine:	wait_queue_unlink
780 *	Purpose:
781 *		Remove the linkage between a wait queue and a set,
782 *		freeing the linkage structure.
783 *	Conditions:
784 *		The wait queue being must be a member set queue
785 */
786kern_return_t
787wait_queue_unlink(
788	wait_queue_t wq,
789	wait_queue_set_t wq_set)
790{
791	wait_queue_element_t wq_element;
792	wait_queue_link_t wql;
793	queue_t q;
794	spl_t s;
795
796	if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) {
797		return KERN_INVALID_ARGUMENT;
798	}
799	s = splsched();
800	wait_queue_lock(wq);
801
802	q = &wq->wq_queue;
803	wq_element = (wait_queue_element_t) queue_first(q);
804	while (!queue_end(q, (queue_entry_t)wq_element)) {
805		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
806		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
807		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
808
809		   	wql = (wait_queue_link_t)wq_element;
810
811			if (wql->wql_setqueue == wq_set) {
812				boolean_t alloced;
813
814				alloced = (wql->wql_type == WAIT_QUEUE_LINK);
815				wqs_lock(wq_set);
816				wait_queue_unlink_locked(wq, wq_set, wql);
817				wqs_unlock(wq_set);
818				wait_queue_unlock(wq);
819				splx(s);
820				if (alloced)
821					zfree(_wait_queue_link_zone, wql);
822				return KERN_SUCCESS;
823			}
824		}
825		wq_element = (wait_queue_element_t)
826				queue_next((queue_t) wq_element);
827	}
828	wait_queue_unlock(wq);
829	splx(s);
830	return KERN_NOT_IN_SET;
831}
832
833/*
834 *	Routine:	wait_queue_unlink_all_nofree_locked
835 *	Purpose:
836 *		Remove the linkage between a wait queue and all its sets.
837 *		All the linkage structures are returned to the caller for
838 *		later freeing.
839 *	Conditions:
840 *		Wait queue locked.
841 */
842
843static void
844wait_queue_unlink_all_nofree_locked(
845	wait_queue_t wq,
846	queue_t links)
847{
848	wait_queue_element_t wq_element;
849	wait_queue_element_t wq_next_element;
850	wait_queue_set_t wq_set;
851	wait_queue_link_t wql;
852	queue_t q;
853
854	q = &wq->wq_queue;
855
856	wq_element = (wait_queue_element_t) queue_first(q);
857	while (!queue_end(q, (queue_entry_t)wq_element)) {
858
859		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
860		wq_next_element = (wait_queue_element_t)
861			     queue_next((queue_t) wq_element);
862
863		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
864		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
865			wql = (wait_queue_link_t)wq_element;
866			wq_set = wql->wql_setqueue;
867			wqs_lock(wq_set);
868			wait_queue_unlink_locked(wq, wq_set, wql);
869			wqs_unlock(wq_set);
870			enqueue(links, &wql->wql_links);
871		}
872		wq_element = wq_next_element;
873	}
874}
875
876/*
877 *	Routine:	wait_queue_unlink_all_nofree
878 *	Purpose:
879 *		Remove the linkage between a wait queue and all its sets.
880 *		All the linkage structures are returned to the caller for
881 *		later freeing.
882 *	Conditions:
883 *		Nothing of interest locked.
884 */
885
886kern_return_t
887wait_queue_unlink_all_nofree(
888	wait_queue_t wq,
889	queue_t links)
890{
891	spl_t s;
892
893	if (!wait_queue_is_valid(wq)) {
894		return KERN_INVALID_ARGUMENT;
895	}
896
897	s = splsched();
898	wait_queue_lock(wq);
899	wait_queue_unlink_all_nofree_locked(wq, links);
900	wait_queue_unlock(wq);
901	splx(s);
902
903	return(KERN_SUCCESS);
904}
905
906/*
907 *	Routine:	wait_queue_unlink_all_locked
908 *	Purpose:
909 *		Remove the linkage between a locked wait queue and all its
910 *		sets and enqueue the allocated ones onto the links queue
911 *		provided.
912 *	Conditions:
913 *		Wait queue locked.
914 */
915static void
916wait_queue_unlink_all_locked(
917	wait_queue_t wq,
918	queue_t links)
919{
920	wait_queue_element_t wq_element;
921	wait_queue_element_t wq_next_element;
922	wait_queue_set_t wq_set;
923	wait_queue_link_t wql;
924	queue_t q;
925
926	q = &wq->wq_queue;
927
928	wq_element = (wait_queue_element_t) queue_first(q);
929	while (!queue_end(q, (queue_entry_t)wq_element)) {
930		boolean_t alloced;
931
932		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
933		wq_next_element = (wait_queue_element_t)
934			     queue_next((queue_t) wq_element);
935
936		alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK);
937		if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
938			wql = (wait_queue_link_t)wq_element;
939			wq_set = wql->wql_setqueue;
940			wqs_lock(wq_set);
941			wait_queue_unlink_locked(wq, wq_set, wql);
942			wqs_unlock(wq_set);
943			if (alloced)
944				enqueue(links, &wql->wql_links);
945		}
946		wq_element = wq_next_element;
947	}
948
949}
950
951
952/*
953 *	Routine:	wait_queue_unlink_all
954 *	Purpose:
955 *		Remove the linkage between a wait queue and all its sets.
956 *		All the linkage structures that were allocated internally
957 *		are freed.  The others are the caller's responsibility.
958 *	Conditions:
959 *		Nothing of interest locked.
960 */
961
962kern_return_t
963wait_queue_unlink_all(
964	wait_queue_t wq)
965{
966	wait_queue_link_t wql;
967	queue_head_t links_queue_head;
968	queue_t links = &links_queue_head;
969	spl_t s;
970
971	if (!wait_queue_is_valid(wq)) {
972		return KERN_INVALID_ARGUMENT;
973	}
974
975	queue_init(links);
976
977	s = splsched();
978	wait_queue_lock(wq);
979	wait_queue_unlink_all_locked(wq, links);
980	wait_queue_unlock(wq);
981	splx(s);
982
983	while(!queue_empty(links)) {
984		wql = (wait_queue_link_t) dequeue(links);
985		zfree(_wait_queue_link_zone, wql);
986	}
987
988	return(KERN_SUCCESS);
989}
990
991/* legacy interface naming */
992kern_return_t
993wait_subqueue_unlink_all(
994	wait_queue_set_t	wq_set)
995{
996	return wait_queue_set_unlink_all(wq_set);
997}
998
999
1000/*
1001 *	Routine:	wait_queue_set_unlink_all_nofree
1002 *	Purpose:
1003 *		Remove the linkage between a set wait queue and all its
1004 *		member wait queues and all the sets it may be a member of.
1005 *		The links structures are returned for later freeing by the
1006 *		caller.
1007 *	Conditions:
1008 *		The wait queue must be a set
1009 */
1010kern_return_t
1011wait_queue_set_unlink_all_nofree(
1012	wait_queue_set_t wq_set,
1013	queue_t		links)
1014{
1015	wait_queue_link_t wql;
1016	wait_queue_t wq;
1017	queue_t q;
1018	spl_t s;
1019
1020	if (!wait_queue_is_set(wq_set)) {
1021		return KERN_INVALID_ARGUMENT;
1022	}
1023
1024retry:
1025	s = splsched();
1026	wqs_lock(wq_set);
1027
1028	/* remove the wait queues that are members of our set */
1029	q = &wq_set->wqs_setlinks;
1030
1031	wql = (wait_queue_link_t)queue_first(q);
1032	while (!queue_end(q, (queue_entry_t)wql)) {
1033		WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1034		wq = wql->wql_queue;
1035		if (wait_queue_lock_try(wq)) {
1036			wait_queue_unlink_locked(wq, wq_set, wql);
1037			wait_queue_unlock(wq);
1038			enqueue(links, &wql->wql_links);
1039			wql = (wait_queue_link_t)queue_first(q);
1040		} else {
1041			wqs_unlock(wq_set);
1042			splx(s);
1043			delay(1);
1044			goto retry;
1045		}
1046	}
1047
1048	/* remove this set from sets it belongs to */
1049	wait_queue_unlink_all_nofree_locked(&wq_set->wqs_wait_queue, links);
1050
1051	wqs_unlock(wq_set);
1052	splx(s);
1053
1054	return(KERN_SUCCESS);
1055}
1056
1057/*
1058 *	Routine:	wait_queue_set_unlink_all
1059 *	Purpose:
1060 *		Remove the linkage between a set wait queue and all its
1061 *		member wait queues and all the sets it may be members of.
1062 *		The link structures are freed for those	links which were
1063 *		dynamically allocated.
1064 *	Conditions:
1065 *		The wait queue must be a set
1066 */
1067kern_return_t
1068wait_queue_set_unlink_all(
1069	wait_queue_set_t wq_set)
1070{
1071	wait_queue_link_t wql;
1072	wait_queue_t wq;
1073	queue_t q;
1074	queue_head_t links_queue_head;
1075	queue_t links = &links_queue_head;
1076	spl_t s;
1077
1078	if (!wait_queue_is_set(wq_set)) {
1079		return KERN_INVALID_ARGUMENT;
1080	}
1081
1082	queue_init(links);
1083
1084retry:
1085	s = splsched();
1086	wqs_lock(wq_set);
1087
1088	/* remove the wait queues that are members of our set */
1089	q = &wq_set->wqs_setlinks;
1090
1091	wql = (wait_queue_link_t)queue_first(q);
1092	while (!queue_end(q, (queue_entry_t)wql)) {
1093		WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1094		wq = wql->wql_queue;
1095		if (wait_queue_lock_try(wq)) {
1096			boolean_t alloced;
1097
1098			alloced = (wql->wql_type == WAIT_QUEUE_LINK);
1099			wait_queue_unlink_locked(wq, wq_set, wql);
1100			wait_queue_unlock(wq);
1101			if (alloced)
1102				enqueue(links, &wql->wql_links);
1103			wql = (wait_queue_link_t)queue_first(q);
1104		} else {
1105			wqs_unlock(wq_set);
1106			splx(s);
1107			delay(1);
1108			goto retry;
1109		}
1110	}
1111
1112
1113	/* remove this set from sets it belongs to */
1114	wait_queue_unlink_all_locked(&wq_set->wqs_wait_queue, links);
1115
1116	wqs_unlock(wq_set);
1117	splx(s);
1118
1119	while (!queue_empty (links)) {
1120		wql = (wait_queue_link_t) dequeue(links);
1121		zfree(_wait_queue_link_zone, wql);
1122	}
1123	return(KERN_SUCCESS);
1124}
1125
1126kern_return_t
1127wait_queue_set_unlink_one(
1128	wait_queue_set_t wq_set,
1129	wait_queue_link_t wql)
1130{
1131	wait_queue_t wq;
1132	spl_t s;
1133
1134	assert(wait_queue_is_set(wq_set));
1135
1136retry:
1137	s = splsched();
1138	wqs_lock(wq_set);
1139
1140	WAIT_QUEUE_SET_CHECK(wq_set);
1141
1142	/* Already unlinked, e.g. by selclearthread() */
1143	if (wql->wql_type == WAIT_QUEUE_UNLINKED) {
1144		goto out;
1145	}
1146
1147	WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
1148
1149	/* On a wait queue, and we hold set queue lock ... */
1150	wq = wql->wql_queue;
1151	if (wait_queue_lock_try(wq)) {
1152		wait_queue_unlink_locked(wq, wq_set, wql);
1153		wait_queue_unlock(wq);
1154	} else {
1155		wqs_unlock(wq_set);
1156		splx(s);
1157		delay(1);
1158		goto retry;
1159	}
1160
1161out:
1162	wqs_unlock(wq_set);
1163	splx(s);
1164
1165	return KERN_SUCCESS;
1166}
1167
1168/*
1169 *	Routine:	wait_queue_assert_wait64_locked
1170 *	Purpose:
1171 *		Insert the current thread into the supplied wait queue
1172 *		waiting for a particular event to be posted to that queue.
1173 *
1174 *	Conditions:
1175 *		The wait queue is assumed locked.
1176 *		The waiting thread is assumed locked.
1177 *
1178 */
1179__private_extern__ wait_result_t
1180wait_queue_assert_wait64_locked(
1181	wait_queue_t wq,
1182	event64_t event,
1183	wait_interrupt_t interruptible,
1184	wait_timeout_urgency_t urgency,
1185	uint64_t deadline,
1186	uint64_t leeway,
1187	thread_t thread)
1188{
1189	wait_result_t wait_result;
1190	boolean_t realtime;
1191
1192	if (!wait_queue_assert_possible(thread))
1193		panic("wait_queue_assert_wait64_locked");
1194
1195	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
1196		wait_queue_set_t wqs = (wait_queue_set_t)wq;
1197
1198		if (event == NO_EVENT64 && wqs_is_preposted(wqs))
1199			return(THREAD_AWAKENED);
1200	}
1201
1202	/*
1203	 * Realtime threads get priority for wait queue placements.
1204	 * This allows wait_queue_wakeup_one to prefer a waiting
1205	 * realtime thread, similar in principle to performing
1206	 * a wait_queue_wakeup_all and allowing scheduler prioritization
1207	 * to run the realtime thread, but without causing the
1208	 * lock contention of that scenario.
1209	 */
1210	realtime = (thread->sched_pri >= BASEPRI_REALTIME);
1211
1212	/*
1213	 * This is the extent to which we currently take scheduling attributes
1214	 * into account.  If the thread is vm priviledged, we stick it at
1215	 * the front of the queue.  Later, these queues will honor the policy
1216	 * value set at wait_queue_init time.
1217	 */
1218	wait_result = thread_mark_wait_locked(thread, interruptible);
1219	if (wait_result == THREAD_WAITING) {
1220		if (!wq->wq_fifo
1221			|| (thread->options & TH_OPT_VMPRIV)
1222			|| realtime)
1223			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
1224		else
1225			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
1226
1227		thread->wait_event = event;
1228		thread->wait_queue = wq;
1229
1230		if (deadline != 0) {
1231
1232			if (!timer_call_enter_with_leeway(&thread->wait_timer, NULL,
1233				deadline, leeway, urgency, FALSE))
1234				thread->wait_timer_active++;
1235			thread->wait_timer_is_set = TRUE;
1236		}
1237		if (wait_queue_global(wq)) {
1238			wq->wq_eventmask = wq->wq_eventmask | CAST_TO_EVENT_MASK(event);
1239		}
1240
1241	}
1242	return(wait_result);
1243}
1244
1245/*
1246 *	Routine:	wait_queue_assert_wait
1247 *	Purpose:
1248 *		Insert the current thread into the supplied wait queue
1249 *		waiting for a particular event to be posted to that queue.
1250 *
1251 *	Conditions:
1252 *		nothing of interest locked.
1253 */
1254wait_result_t
1255wait_queue_assert_wait(
1256	wait_queue_t wq,
1257	event_t event,
1258	wait_interrupt_t interruptible,
1259	uint64_t deadline)
1260{
1261	spl_t s;
1262	wait_result_t ret;
1263	thread_t thread = current_thread();
1264
1265	/* If it is an invalid wait queue, you can't wait on it */
1266	if (!wait_queue_is_valid(wq))
1267		return (thread->wait_result = THREAD_RESTART);
1268
1269	s = splsched();
1270	wait_queue_lock(wq);
1271	thread_lock(thread);
1272	ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
1273					      interruptible,
1274					      TIMEOUT_URGENCY_SYS_NORMAL,
1275					      deadline, 0,
1276					      thread);
1277	thread_unlock(thread);
1278	wait_queue_unlock(wq);
1279	splx(s);
1280	return(ret);
1281}
1282
1283/*
1284 *	Routine:	wait_queue_assert_wait_with_leeway
1285 *	Purpose:
1286 *		Insert the current thread into the supplied wait queue
1287 *		waiting for a particular event to be posted to that queue.
1288 *		Deadline values are specified with urgency and leeway.
1289 *
1290 *	Conditions:
1291 *		nothing of interest locked.
1292 */
1293wait_result_t
1294wait_queue_assert_wait_with_leeway(
1295	wait_queue_t wq,
1296	event_t event,
1297	wait_interrupt_t interruptible,
1298	wait_timeout_urgency_t urgency,
1299	uint64_t deadline,
1300	uint64_t leeway)
1301{
1302	spl_t s;
1303	wait_result_t ret;
1304	thread_t thread = current_thread();
1305
1306	/* If it is an invalid wait queue, you can't wait on it */
1307	if (!wait_queue_is_valid(wq))
1308		return (thread->wait_result = THREAD_RESTART);
1309
1310	s = splsched();
1311	wait_queue_lock(wq);
1312	thread_lock(thread);
1313	ret = wait_queue_assert_wait64_locked(wq, CAST_DOWN(event64_t,event),
1314					      interruptible,
1315					      urgency, deadline, leeway,
1316					      thread);
1317	thread_unlock(thread);
1318	wait_queue_unlock(wq);
1319	splx(s);
1320	return(ret);
1321}
1322
1323/*
1324 *	Routine:	wait_queue_assert_wait64
1325 *	Purpose:
1326 *		Insert the current thread into the supplied wait queue
1327 *		waiting for a particular event to be posted to that queue.
1328 *	Conditions:
1329 *		nothing of interest locked.
1330 */
1331wait_result_t
1332wait_queue_assert_wait64(
1333	wait_queue_t wq,
1334	event64_t event,
1335	wait_interrupt_t interruptible,
1336	uint64_t deadline)
1337{
1338	spl_t s;
1339	wait_result_t ret;
1340	thread_t thread = current_thread();
1341
1342	/* If it is an invalid wait queue, you cant wait on it */
1343	if (!wait_queue_is_valid(wq))
1344		return (thread->wait_result = THREAD_RESTART);
1345
1346	s = splsched();
1347	wait_queue_lock(wq);
1348	thread_lock(thread);
1349	ret = wait_queue_assert_wait64_locked(wq, event, interruptible,
1350					      TIMEOUT_URGENCY_SYS_NORMAL,
1351					      deadline, 0,
1352					      thread);
1353	thread_unlock(thread);
1354	wait_queue_unlock(wq);
1355	splx(s);
1356	return(ret);
1357}
1358
1359/*
1360 *	Routine:	wait_queue_assert_wait64_with_leeway
1361 *	Purpose:
1362 *		Insert the current thread into the supplied wait queue
1363 *		waiting for a particular event to be posted to that queue.
1364 *		Deadline values are specified with urgency and leeway.
1365 *	Conditions:
1366 *		nothing of interest locked.
1367 */
1368wait_result_t
1369wait_queue_assert_wait64_with_leeway(
1370	wait_queue_t wq,
1371	event64_t event,
1372	wait_interrupt_t interruptible,
1373	wait_timeout_urgency_t urgency,
1374	uint64_t deadline,
1375	uint64_t leeway)
1376{
1377	spl_t s;
1378	wait_result_t ret;
1379	thread_t thread = current_thread();
1380
1381	/* If it is an invalid wait queue, you cant wait on it */
1382	if (!wait_queue_is_valid(wq))
1383		return (thread->wait_result = THREAD_RESTART);
1384
1385	s = splsched();
1386	wait_queue_lock(wq);
1387	thread_lock(thread);
1388	ret = wait_queue_assert_wait64_locked(wq, event, interruptible,
1389					      urgency, deadline, leeway,
1390					      thread);
1391	thread_unlock(thread);
1392	wait_queue_unlock(wq);
1393	splx(s);
1394	return(ret);
1395}
1396
1397/*
1398 *	Routine:	_wait_queue_select64_all
1399 *	Purpose:
1400 *		Select all threads off a wait queue that meet the
1401 *		supplied criteria.
1402 *	Conditions:
1403 *		at splsched
1404 *		wait queue locked
1405 *		wake_queue initialized and ready for insertion
1406 *		possibly recursive
1407 *	Returns:
1408 *		a queue of locked threads
1409 */
1410static void
1411_wait_queue_select64_all(
1412	wait_queue_t wq,
1413	event64_t event,
1414	queue_t wake_queue)
1415{
1416	wait_queue_element_t wq_element;
1417	wait_queue_element_t wqe_next;
1418	unsigned long eventmask = 0;
1419	boolean_t is_queue_global = FALSE;
1420	queue_t q;
1421
1422	is_queue_global = wait_queue_global(wq);
1423	if (is_queue_global) {
1424		eventmask = CAST_TO_EVENT_MASK(event);
1425		if ((wq->wq_eventmask & eventmask) != eventmask) {
1426			return;
1427		}
1428		eventmask = 0;
1429	}
1430	q = &wq->wq_queue;
1431
1432	wq_element = (wait_queue_element_t) queue_first(q);
1433	while (!queue_end(q, (queue_entry_t)wq_element)) {
1434		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1435		wqe_next = (wait_queue_element_t)
1436			   queue_next((queue_t) wq_element);
1437
1438		/*
1439		 * We may have to recurse if this is a compound wait queue.
1440		 */
1441		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1442		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1443			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1444			wait_queue_set_t set_queue = wql->wql_setqueue;
1445
1446			/*
1447			 * We have to check the set wait queue. If it is marked
1448			 * as pre-post, and it is the "generic event" then mark
1449			 * it pre-posted now (if not already).
1450			 */
1451			wqs_lock(set_queue);
1452			if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1453				queue_t ppq = &set_queue->wqs_preposts;
1454				queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1455			}
1456			if (! wait_queue_empty(&set_queue->wqs_wait_queue))
1457				_wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue);
1458			wqs_unlock(set_queue);
1459		} else {
1460
1461			/*
1462			 * Otherwise, its a thread.  If it is waiting on
1463			 * the event we are posting to this queue, pull
1464			 * it off the queue and stick it in out wake_queue.
1465			 */
1466			thread_t t = (thread_t)(void *)wq_element;
1467
1468			if (t->wait_event == event) {
1469				thread_lock(t);
1470				remqueue((queue_entry_t) t);
1471				enqueue (wake_queue, (queue_entry_t) t);
1472				t->wait_queue = WAIT_QUEUE_NULL;
1473				t->wait_event = NO_EVENT64;
1474				t->at_safe_point = FALSE;
1475				/* returned locked */
1476			} else {
1477				if (is_queue_global) {
1478					eventmask = eventmask |
1479						CAST_TO_EVENT_MASK(t->wait_event);
1480				}
1481			}
1482		}
1483		wq_element = wqe_next;
1484	}
1485	/* Update event mask if global wait queue */
1486	if (is_queue_global) {
1487		wq->wq_eventmask = eventmask;
1488	}
1489
1490}
1491
1492/*
1493 *	Routine:        wait_queue_wakeup64_all_locked
1494 *	Purpose:
1495 *		Wakeup some number of threads that are in the specified
1496 *		wait queue and waiting on the specified event.
1497 *	Conditions:
1498 *		wait queue already locked (may be released).
1499 *	Returns:
1500 *		KERN_SUCCESS - Threads were woken up
1501 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1502 */
1503__private_extern__ kern_return_t
1504wait_queue_wakeup64_all_locked(
1505	wait_queue_t wq,
1506	event64_t event,
1507	wait_result_t result,
1508	boolean_t unlock)
1509{
1510	queue_head_t wake_queue_head;
1511	queue_t q = &wake_queue_head;
1512	kern_return_t res;
1513
1514//	assert(wait_queue_held(wq));
1515//	if(!wq->wq_interlock.lock_data) {		/* (BRINGUP */
1516//		panic("wait_queue_wakeup64_all_locked: lock not held on %p\n", wq);	/* (BRINGUP) */
1517//	}
1518
1519	queue_init(q);
1520
1521	/*
1522	 * Select the threads that we will wake up.	 The threads
1523	 * are returned to us locked and cleanly removed from the
1524	 * wait queue.
1525	 */
1526	_wait_queue_select64_all(wq, event, q);
1527	if (unlock)
1528		wait_queue_unlock(wq);
1529
1530	/*
1531	 * For each thread, set it running.
1532	 */
1533	res = KERN_NOT_WAITING;
1534	while (!queue_empty (q)) {
1535		thread_t thread = (thread_t)(void *) dequeue(q);
1536		res = thread_go(thread, result);
1537		assert(res == KERN_SUCCESS);
1538		thread_unlock(thread);
1539	}
1540	return res;
1541}
1542
1543
1544/*
1545 *	Routine:		wait_queue_wakeup_all
1546 *	Purpose:
1547 *		Wakeup some number of threads that are in the specified
1548 *		wait queue and waiting on the specified event.
1549 *	Conditions:
1550 *		Nothing locked
1551 *	Returns:
1552 *		KERN_SUCCESS - Threads were woken up
1553 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1554 */
1555kern_return_t
1556wait_queue_wakeup_all(
1557	wait_queue_t wq,
1558	event_t event,
1559	wait_result_t result)
1560{
1561	kern_return_t ret;
1562	spl_t s;
1563
1564	if (!wait_queue_is_valid(wq)) {
1565		return KERN_INVALID_ARGUMENT;
1566	}
1567
1568	s = splsched();
1569	wait_queue_lock(wq);
1570//	if(!wq->wq_interlock.lock_data) {		/* (BRINGUP */
1571//		panic("wait_queue_wakeup_all: we did not get the lock on %p\n", wq);	/* (BRINGUP) */
1572//	}
1573	ret = wait_queue_wakeup64_all_locked(
1574				wq, CAST_DOWN(event64_t,event),
1575				result, TRUE);
1576	/* lock released */
1577	splx(s);
1578	return ret;
1579}
1580
1581/*
1582 *	Routine:		wait_queue_wakeup64_all
1583 *	Purpose:
1584 *		Wakeup some number of threads that are in the specified
1585 *		wait queue and waiting on the specified event.
1586 *	Conditions:
1587 *		Nothing locked
1588 *	Returns:
1589 *		KERN_SUCCESS - Threads were woken up
1590 *		KERN_NOT_WAITING - No threads were waiting <wq,event> pair
1591 */
1592kern_return_t
1593wait_queue_wakeup64_all(
1594	wait_queue_t wq,
1595	event64_t event,
1596	wait_result_t result)
1597{
1598	kern_return_t ret;
1599	spl_t s;
1600
1601	if (!wait_queue_is_valid(wq)) {
1602		return KERN_INVALID_ARGUMENT;
1603	}
1604
1605	s = splsched();
1606	wait_queue_lock(wq);
1607	ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
1608	/* lock released */
1609	splx(s);
1610	return ret;
1611}
1612
1613/*
1614 *	Routine:	_wait_queue_select64_one
1615 *	Purpose:
1616 *		Select the best thread off a wait queue that meet the
1617 *		supplied criteria.
1618 * 	Conditions:
1619 *		at splsched
1620 *		wait queue locked
1621 *		possibly recursive
1622 * 	Returns:
1623 *		a locked thread - if one found
1624 *	Note:
1625 *		This is where the sync policy of the wait queue comes
1626 *		into effect.  For now, we just assume FIFO/LIFO.
1627 */
1628static thread_t
1629_wait_queue_select64_one(
1630	wait_queue_t wq,
1631	event64_t event)
1632{
1633	wait_queue_element_t wq_element;
1634	wait_queue_element_t wqe_next;
1635	thread_t t = THREAD_NULL;
1636	thread_t fifo_thread = THREAD_NULL;
1637	boolean_t is_queue_fifo = TRUE;
1638	boolean_t is_queue_global = FALSE;
1639	boolean_t thread_imp_donor = FALSE;
1640	boolean_t realtime = FALSE;
1641	unsigned long eventmask = 0;
1642	queue_t q;
1643
1644	if (wait_queue_global(wq)) {
1645		eventmask = CAST_TO_EVENT_MASK(event);
1646		if ((wq->wq_eventmask & eventmask) != eventmask) {
1647			return THREAD_NULL;
1648		}
1649		eventmask = 0;
1650		is_queue_global = TRUE;
1651#if IMPORTANCE_INHERITANCE
1652		is_queue_fifo = FALSE;
1653#endif /* IMPORTANCE_INHERITANCE */
1654	}
1655
1656	q = &wq->wq_queue;
1657
1658	wq_element = (wait_queue_element_t) queue_first(q);
1659	while (!queue_end(q, (queue_entry_t)wq_element)) {
1660		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1661		wqe_next = (wait_queue_element_t)
1662			       queue_next((queue_t) wq_element);
1663
1664		/*
1665		 * We may have to recurse if this is a compound wait queue.
1666		 */
1667		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1668		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1669			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1670			wait_queue_set_t set_queue = wql->wql_setqueue;
1671
1672			/*
1673			 * We have to check the set wait queue. If the set
1674			 * supports pre-posting, it isn't already preposted,
1675			 * and we didn't find a thread in the set, then mark it.
1676			 *
1677			 * If we later find a thread, there may be a spurious
1678			 * pre-post here on this set.  The wait side has to check
1679			 * for that either pre- or post-wait.
1680			 */
1681			wqs_lock(set_queue);
1682			if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1683				t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event);
1684			}
1685			if (t != THREAD_NULL) {
1686				wqs_unlock(set_queue);
1687				return t;
1688			}
1689			if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) {
1690				queue_t ppq = &set_queue->wqs_preposts;
1691				queue_enter(ppq, wql, wait_queue_link_t, wql_preposts);
1692			}
1693			wqs_unlock(set_queue);
1694
1695		} else {
1696
1697			/*
1698			 * Otherwise, its a thread.  If it is waiting on
1699			 * the event we are posting to this queue, pull
1700			 * it off the queue and stick it in out wake_queue.
1701			 */
1702			t = (thread_t)(void *)wq_element;
1703			if (t->wait_event == event) {
1704				if (fifo_thread == THREAD_NULL) {
1705					fifo_thread = t;
1706				}
1707#if IMPORTANCE_INHERITANCE
1708				/*
1709				 * Checking imp donor bit does not need thread lock or
1710				 * or task lock since we have the wait queue lock and
1711				 * thread can not be removed from it without acquiring
1712				 * wait queue lock. The imp donor bit may change
1713				 * once we read its value, but it is ok to wake
1714				 * a thread while someone drops importance assertion
1715				 * on the that thread.
1716				 */
1717				thread_imp_donor = task_is_importance_donor(t->task);
1718#endif /* IMPORTANCE_INHERITANCE */
1719				realtime = (t->sched_pri >= BASEPRI_REALTIME);
1720				if (is_queue_fifo || thread_imp_donor || realtime ||
1721						(t->options & TH_OPT_VMPRIV)) {
1722					thread_lock(t);
1723					remqueue((queue_entry_t) t);
1724					t->wait_queue = WAIT_QUEUE_NULL;
1725					t->wait_event = NO_EVENT64;
1726					t->at_safe_point = FALSE;
1727					return t;	/* still locked */
1728				}
1729			}
1730			if (is_queue_global) {
1731				eventmask = eventmask | CAST_TO_EVENT_MASK(t->wait_event);
1732			}
1733			t = THREAD_NULL;
1734		}
1735		wq_element = wqe_next;
1736	}
1737
1738	if (is_queue_global) {
1739		wq->wq_eventmask = eventmask;
1740	}
1741#if IMPORTANCE_INHERITANCE
1742	if (fifo_thread != THREAD_NULL) {
1743		thread_lock(fifo_thread);
1744		remqueue((queue_entry_t) fifo_thread);
1745		fifo_thread->wait_queue = WAIT_QUEUE_NULL;
1746		fifo_thread->wait_event = NO_EVENT64;
1747		fifo_thread->at_safe_point = FALSE;
1748		return fifo_thread;	/* still locked */
1749	}
1750#endif /* IMPORTANCE_INHERITANCE */
1751	return THREAD_NULL;
1752}
1753
1754
1755/*
1756 *	Routine:	wait_queue_pull_thread_locked
1757 *	Purpose:
1758 *		Pull a thread off its wait queue and (possibly) unlock
1759 *		the waitq.
1760 * 	Conditions:
1761 *		at splsched
1762 *		wait queue locked
1763 *		thread locked
1764 * 	Returns:
1765 *		with the thread still locked.
1766 */
1767void
1768wait_queue_pull_thread_locked(
1769	wait_queue_t waitq,
1770	thread_t thread,
1771	boolean_t unlock)
1772{
1773
1774	assert(thread->wait_queue == waitq);
1775
1776	remqueue((queue_entry_t)thread );
1777	thread->wait_queue = WAIT_QUEUE_NULL;
1778	thread->wait_event = NO_EVENT64;
1779	thread->at_safe_point = FALSE;
1780	if (unlock)
1781		wait_queue_unlock(waitq);
1782}
1783
1784
1785/*
1786 *	Routine:	wait_queue_select64_thread
1787 *	Purpose:
1788 *		Look for a thread and remove it from the queues, if
1789 *		(and only if) the thread is waiting on the supplied
1790 *		<wait_queue, event> pair.
1791 * 	Conditions:
1792 *		at splsched
1793 *		wait queue locked
1794 *		possibly recursive
1795 * 	Returns:
1796 *		KERN_NOT_WAITING: Thread is not waiting here.
1797 *		KERN_SUCCESS: It was, and is now removed (returned locked)
1798 */
1799static kern_return_t
1800_wait_queue_select64_thread(
1801	wait_queue_t wq,
1802	event64_t event,
1803	thread_t thread)
1804{
1805	wait_queue_element_t wq_element;
1806	wait_queue_element_t wqe_next;
1807	kern_return_t res = KERN_NOT_WAITING;
1808	queue_t q = &wq->wq_queue;
1809
1810	thread_lock(thread);
1811	if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
1812		remqueue((queue_entry_t) thread);
1813		thread->at_safe_point = FALSE;
1814		thread->wait_event = NO_EVENT64;
1815		thread->wait_queue = WAIT_QUEUE_NULL;
1816		/* thread still locked */
1817		return KERN_SUCCESS;
1818	}
1819	thread_unlock(thread);
1820
1821	/*
1822	 * The wait_queue associated with the thread may be one of this
1823	 * wait queue's sets.  Go see.  If so, removing it from
1824	 * there is like removing it from here.
1825	 */
1826	wq_element = (wait_queue_element_t) queue_first(q);
1827	while (!queue_end(q, (queue_entry_t)wq_element)) {
1828		WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
1829		wqe_next = (wait_queue_element_t)
1830			       queue_next((queue_t) wq_element);
1831
1832		if (wq_element->wqe_type == WAIT_QUEUE_LINK ||
1833		    wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) {
1834			wait_queue_link_t wql = (wait_queue_link_t)wq_element;
1835			wait_queue_set_t set_queue = wql->wql_setqueue;
1836
1837			wqs_lock(set_queue);
1838			if (! wait_queue_empty(&set_queue->wqs_wait_queue)) {
1839				res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue,
1840								event,
1841								thread);
1842			}
1843			wqs_unlock(set_queue);
1844			if (res == KERN_SUCCESS)
1845				return KERN_SUCCESS;
1846		}
1847		wq_element = wqe_next;
1848	}
1849	return res;
1850}
1851
1852
1853/*
1854 *	Routine:	wait_queue_wakeup64_identity_locked
1855 *	Purpose:
1856 *		Select a single thread that is most-eligible to run and set
1857 *		set it running.  But return the thread locked.
1858 *
1859 * 	Conditions:
1860 *		at splsched
1861 *		wait queue locked
1862 *		possibly recursive
1863 * 	Returns:
1864 *		a pointer to the locked thread that was awakened
1865 */
1866__private_extern__ thread_t
1867wait_queue_wakeup64_identity_locked(
1868	wait_queue_t wq,
1869	event64_t event,
1870	wait_result_t result,
1871	boolean_t unlock)
1872{
1873	kern_return_t res;
1874	thread_t thread;
1875
1876	assert(wait_queue_held(wq));
1877
1878	thread = _wait_queue_select64_one(wq, event);
1879	if (unlock)
1880		wait_queue_unlock(wq);
1881
1882	if (thread) {
1883		res = thread_go(thread, result);
1884		assert(res == KERN_SUCCESS);
1885	}
1886	return thread;  /* still locked if not NULL */
1887}
1888
1889
1890/*
1891 *	Routine:	wait_queue_wakeup64_one_locked
1892 *	Purpose:
1893 *		Select a single thread that is most-eligible to run and set
1894 *		set it runnings.
1895 *
1896 * 	Conditions:
1897 *		at splsched
1898 *		wait queue locked
1899 *		possibly recursive
1900 * 	Returns:
1901 *		KERN_SUCCESS: It was, and is, now removed.
1902 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1903 */
1904__private_extern__ kern_return_t
1905wait_queue_wakeup64_one_locked(
1906	wait_queue_t wq,
1907	event64_t event,
1908	wait_result_t result,
1909	boolean_t unlock)
1910{
1911	thread_t thread;
1912
1913	assert(wait_queue_held(wq));
1914
1915	thread = _wait_queue_select64_one(wq, event);
1916	if (unlock)
1917		wait_queue_unlock(wq);
1918
1919	if (thread) {
1920		kern_return_t res;
1921
1922		res = thread_go(thread, result);
1923		assert(res == KERN_SUCCESS);
1924		thread_unlock(thread);
1925		return res;
1926	}
1927
1928	return KERN_NOT_WAITING;
1929}
1930
1931/*
1932 *	Routine:	wait_queue_wakeup_one
1933 *	Purpose:
1934 *		Wakeup the most appropriate thread that is in the specified
1935 *		wait queue for the specified event.
1936 *	Conditions:
1937 *		Nothing locked
1938 *	Returns:
1939 *		KERN_SUCCESS - Thread was woken up
1940 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1941 */
1942kern_return_t
1943wait_queue_wakeup_one(
1944	wait_queue_t wq,
1945	event_t event,
1946	wait_result_t result,
1947	int priority)
1948{
1949	thread_t thread;
1950	spl_t s;
1951
1952	if (!wait_queue_is_valid(wq)) {
1953		return KERN_INVALID_ARGUMENT;
1954	}
1955
1956	s = splsched();
1957	wait_queue_lock(wq);
1958	thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event));
1959	wait_queue_unlock(wq);
1960
1961	if (thread) {
1962		kern_return_t res;
1963
1964		if (thread->sched_pri < priority) {
1965			if (priority <= MAXPRI) {
1966				set_sched_pri(thread, priority);
1967
1968				thread->was_promoted_on_wakeup = 1;
1969				thread->sched_flags |= TH_SFLAG_PROMOTED;
1970			}
1971		}
1972		res = thread_go(thread, result);
1973		assert(res == KERN_SUCCESS);
1974		thread_unlock(thread);
1975		splx(s);
1976		return res;
1977	}
1978
1979	splx(s);
1980	return KERN_NOT_WAITING;
1981}
1982
1983/*
1984 *	Routine:	wait_queue_wakeup64_one
1985 *	Purpose:
1986 *		Wakeup the most appropriate thread that is in the specified
1987 *		wait queue for the specified event.
1988 *	Conditions:
1989 *		Nothing locked
1990 *	Returns:
1991 *		KERN_SUCCESS - Thread was woken up
1992 *		KERN_NOT_WAITING - No thread was waiting <wq,event> pair
1993 */
1994kern_return_t
1995wait_queue_wakeup64_one(
1996	wait_queue_t wq,
1997	event64_t event,
1998	wait_result_t result)
1999{
2000	thread_t thread;
2001	spl_t s;
2002
2003	if (!wait_queue_is_valid(wq)) {
2004		return KERN_INVALID_ARGUMENT;
2005	}
2006	s = splsched();
2007	wait_queue_lock(wq);
2008	thread = _wait_queue_select64_one(wq, event);
2009	wait_queue_unlock(wq);
2010
2011	if (thread) {
2012		kern_return_t res;
2013
2014		res = thread_go(thread, result);
2015		assert(res == KERN_SUCCESS);
2016		thread_unlock(thread);
2017		splx(s);
2018		return res;
2019	}
2020
2021	splx(s);
2022	return KERN_NOT_WAITING;
2023}
2024
2025
2026/*
2027 *	Routine:	wait_queue_wakeup64_thread_locked
2028 *	Purpose:
2029 *		Wakeup the particular thread that was specified if and only
2030 *		it was in this wait queue (or one of it's set queues)
2031 *		and waiting on the specified event.
2032 *
2033 *		This is much safer than just removing the thread from
2034 *		whatever wait queue it happens to be on.  For instance, it
2035 *		may have already been awoken from the wait you intended to
2036 *		interrupt and waited on something else (like another
2037 *		semaphore).
2038 *	Conditions:
2039 *		at splsched
2040 *		wait queue already locked (may be released).
2041 *	Returns:
2042 *		KERN_SUCCESS - the thread was found waiting and awakened
2043 *		KERN_NOT_WAITING - the thread was not waiting here
2044 */
2045__private_extern__ kern_return_t
2046wait_queue_wakeup64_thread_locked(
2047	wait_queue_t wq,
2048	event64_t event,
2049	thread_t thread,
2050	wait_result_t result,
2051	boolean_t unlock)
2052{
2053	kern_return_t res;
2054
2055	assert(wait_queue_held(wq));
2056
2057	/*
2058	 * See if the thread was still waiting there.  If so, it got
2059	 * dequeued and returned locked.
2060	 */
2061	res = _wait_queue_select64_thread(wq, event, thread);
2062	if (unlock)
2063	    wait_queue_unlock(wq);
2064
2065	if (res != KERN_SUCCESS)
2066		return KERN_NOT_WAITING;
2067
2068	res = thread_go(thread, result);
2069	assert(res == KERN_SUCCESS);
2070	thread_unlock(thread);
2071	return res;
2072}
2073
2074/*
2075 *	Routine:	wait_queue_wakeup_thread
2076 *	Purpose:
2077 *		Wakeup the particular thread that was specified if and only
2078 *		it was in this wait queue (or one of it's set queues)
2079 *		and waiting on the specified event.
2080 *
2081 *		This is much safer than just removing the thread from
2082 *		whatever wait queue it happens to be on.  For instance, it
2083 *		may have already been awoken from the wait you intended to
2084 *		interrupt and waited on something else (like another
2085 *		semaphore).
2086 *	Conditions:
2087 *		nothing of interest locked
2088 *		we need to assume spl needs to be raised
2089 *	Returns:
2090 *		KERN_SUCCESS - the thread was found waiting and awakened
2091 *		KERN_NOT_WAITING - the thread was not waiting here
2092 */
2093kern_return_t
2094wait_queue_wakeup_thread(
2095	wait_queue_t wq,
2096	event_t event,
2097	thread_t thread,
2098	wait_result_t result)
2099{
2100	kern_return_t res;
2101	spl_t s;
2102
2103	if (!wait_queue_is_valid(wq)) {
2104		return KERN_INVALID_ARGUMENT;
2105	}
2106
2107	s = splsched();
2108	wait_queue_lock(wq);
2109	res = _wait_queue_select64_thread(wq, CAST_DOWN(event64_t,event), thread);
2110	wait_queue_unlock(wq);
2111
2112	if (res == KERN_SUCCESS) {
2113		res = thread_go(thread, result);
2114		assert(res == KERN_SUCCESS);
2115		thread_unlock(thread);
2116		splx(s);
2117		return res;
2118	}
2119	splx(s);
2120	return KERN_NOT_WAITING;
2121}
2122
2123/*
2124 *	Routine:	wait_queue_wakeup64_thread
2125 *	Purpose:
2126 *		Wakeup the particular thread that was specified if and only
2127 *		it was in this wait queue (or one of it's set's queues)
2128 *		and waiting on the specified event.
2129 *
2130 *		This is much safer than just removing the thread from
2131 *		whatever wait queue it happens to be on.  For instance, it
2132 *		may have already been awoken from the wait you intended to
2133 *		interrupt and waited on something else (like another
2134 *		semaphore).
2135 *	Conditions:
2136 *		nothing of interest locked
2137 *		we need to assume spl needs to be raised
2138 *	Returns:
2139 *		KERN_SUCCESS - the thread was found waiting and awakened
2140 *		KERN_NOT_WAITING - the thread was not waiting here
2141 */
2142kern_return_t
2143wait_queue_wakeup64_thread(
2144	wait_queue_t wq,
2145	event64_t event,
2146	thread_t thread,
2147	wait_result_t result)
2148{
2149	kern_return_t res;
2150	spl_t s;
2151
2152	if (!wait_queue_is_valid(wq)) {
2153		return KERN_INVALID_ARGUMENT;
2154	}
2155
2156	s = splsched();
2157	wait_queue_lock(wq);
2158	res = _wait_queue_select64_thread(wq, event, thread);
2159	wait_queue_unlock(wq);
2160
2161	if (res == KERN_SUCCESS) {
2162		res = thread_go(thread, result);
2163		assert(res == KERN_SUCCESS);
2164		thread_unlock(thread);
2165		splx(s);
2166		return res;
2167	}
2168	splx(s);
2169	return KERN_NOT_WAITING;
2170}
2171