1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32/*
33 *	File:	kern/sync_lock.c
34 *	Author:	Joseph CaraDonna
35 *
36 *	Contains RT distributed lock synchronization services.
37 */
38
39#include <mach/mach_types.h>
40#include <mach/lock_set_server.h>
41#include <mach/task_server.h>
42
43#include <kern/misc_protos.h>
44#include <kern/kalloc.h>
45#include <kern/sync_lock.h>
46#include <kern/sched_prim.h>
47#include <kern/ipc_kobject.h>
48#include <kern/ipc_sync.h>
49#include <kern/thread.h>
50#include <kern/task.h>
51
52#include <ipc/ipc_port.h>
53#include <ipc/ipc_space.h>
54#include <libkern/OSAtomic.h>
55
56/*
57 *	Ulock ownership MACROS
58 *
59 *	Assumes: ulock internal lock is held
60 */
61
62#define ulock_ownership_set(ul, th)				\
63	MACRO_BEGIN						\
64	thread_mtx_lock(th);					\
65	enqueue (&th->held_ulocks, (queue_entry_t) (ul));  \
66	thread_mtx_unlock(th);					\
67	(ul)->holder = th;					\
68	MACRO_END
69
70#define ulock_ownership_clear(ul)				\
71	MACRO_BEGIN						\
72	thread_t th;						\
73	th = (ul)->holder;					\
74        if ((th)->active) {					\
75		thread_mtx_lock(th);				\
76		remqueue((queue_entry_t) (ul));		\
77		thread_mtx_unlock(th);				\
78	} else {						\
79		remqueue((queue_entry_t) (ul));		\
80	}							\
81	(ul)->holder = THREAD_NULL;				\
82	MACRO_END
83
84/*
85 *	Lock set ownership MACROS
86 */
87
88#define lock_set_ownership_set(ls, t)				\
89	MACRO_BEGIN						\
90	task_lock((t));						\
91	enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
92	(t)->lock_sets_owned++;					\
93	task_unlock((t));					\
94	(ls)->owner = (t);					\
95	MACRO_END
96
97#define lock_set_ownership_clear(ls, t)				\
98	MACRO_BEGIN						\
99	task_lock((t));						\
100	remqueue((queue_entry_t) (ls));	\
101	(t)->lock_sets_owned--;					\
102	task_unlock((t));					\
103	MACRO_END
104
105unsigned int lock_set_event;
106#define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event)
107
108unsigned int lock_set_handoff;
109#define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff)
110
111
112lck_attr_t				lock_set_attr;
113lck_grp_t				lock_set_grp;
114static lck_grp_attr_t	lock_set_grp_attr;
115
116
117
118/*
119 *	ROUTINE:	lock_set_init		[private]
120 *
121 *	Initialize the lock_set subsystem.
122 */
123void
124lock_set_init(void)
125{
126	lck_grp_attr_setdefault(&lock_set_grp_attr);
127	lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr);
128	lck_attr_setdefault(&lock_set_attr);
129}
130
131
132/*
133 *	ROUTINE:	lock_set_create		[exported]
134 *
135 *	Creates a lock set.
136 *	The port representing the lock set is returned as a parameter.
137 */
138kern_return_t
139lock_set_create (
140	task_t		task,
141	lock_set_t	*new_lock_set,
142	int		n_ulocks,
143	int		policy)
144{
145	lock_set_t 	lock_set = LOCK_SET_NULL;
146	ulock_t		ulock;
147	vm_size_t 	size;
148	int 		x;
149
150	*new_lock_set = LOCK_SET_NULL;
151
152	if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
153		return KERN_INVALID_ARGUMENT;
154
155	if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
156		return KERN_RESOURCE_SHORTAGE;
157
158	size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
159	lock_set = (lock_set_t) kalloc (size);
160
161	if (lock_set == LOCK_SET_NULL)
162		return KERN_RESOURCE_SHORTAGE;
163
164
165	lock_set_lock_init(lock_set);
166	lock_set->n_ulocks = n_ulocks;
167	lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */
168
169	/*
170	 *  Create and initialize the lock set port
171	 */
172	lock_set->port = ipc_port_alloc_kernel();
173	if (lock_set->port == IP_NULL) {
174		kfree(lock_set, size);
175		return KERN_RESOURCE_SHORTAGE;
176	}
177
178	ipc_kobject_set (lock_set->port,
179			(ipc_kobject_t) lock_set,
180			IKOT_LOCK_SET);
181
182	/*
183	 *  Initialize each ulock in the lock set
184	 */
185
186	for (x=0; x < n_ulocks; x++) {
187		ulock = (ulock_t) &lock_set->ulock_list[x];
188		ulock_lock_init(ulock);
189		ulock->lock_set  = lock_set;
190		ulock->holder	 = THREAD_NULL;
191		ulock->blocked   = FALSE;
192		ulock->unstable	 = FALSE;
193		ulock->ho_wait	 = FALSE;
194		ulock->accept_wait = FALSE;
195		wait_queue_init(&ulock->wait_queue, policy);
196	}
197
198	lock_set_ownership_set(lock_set, task);
199
200	lock_set->active = TRUE;
201	*new_lock_set = lock_set;
202
203	return KERN_SUCCESS;
204}
205
206/*
207 *	ROUTINE:	lock_set_destroy	[exported]
208 *
209 *	Destroys a lock set.  This call will only succeed if the
210 *	specified task is the SAME task name specified at the lock set's
211 *	creation.
212 *
213 *	NOTES:
214 *	- All threads currently blocked on the lock set's ulocks are awoken.
215 *	- These threads will return with the KERN_LOCK_SET_DESTROYED error.
216 */
217kern_return_t
218lock_set_destroy (task_t task, lock_set_t lock_set)
219{
220	ulock_t		ulock;
221	int		i;
222
223	if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
224		return KERN_INVALID_ARGUMENT;
225
226	if (lock_set->owner != task)
227		return KERN_INVALID_RIGHT;
228
229	lock_set_lock(lock_set);
230	if (!lock_set->active) {
231		lock_set_unlock(lock_set);
232		return KERN_LOCK_SET_DESTROYED;
233	}
234
235	/*
236	 *  Deactivate lock set
237	 */
238	lock_set->active = FALSE;
239
240	/*
241	 *  If a ulock is currently held in the target lock set:
242	 *
243	 *  1) Wakeup all threads blocked on the ulock (if any).  Threads
244	 *     may be blocked waiting normally, or waiting for a handoff.
245	 *     Blocked threads will return with KERN_LOCK_SET_DESTROYED.
246	 *
247	 *  2) ulock ownership is cleared.
248	 *     The thread currently holding the ulock is revoked of its
249	 *     ownership.
250	 */
251	for (i = 0; i < lock_set->n_ulocks; i++) {
252		ulock = &lock_set->ulock_list[i];
253
254		ulock_lock(ulock);
255
256		if (ulock->accept_wait) {
257			ulock->accept_wait = FALSE;
258			wait_queue_wakeup64_one(&ulock->wait_queue,
259					      LOCK_SET_HANDOFF,
260					      THREAD_RESTART);
261		}
262
263		if (ulock->holder) {
264			if (ulock->blocked) {
265				ulock->blocked = FALSE;
266				wait_queue_wakeup64_all(&ulock->wait_queue,
267						      LOCK_SET_EVENT,
268						      THREAD_RESTART);
269			}
270			if (ulock->ho_wait) {
271				ulock->ho_wait = FALSE;
272				wait_queue_wakeup64_one(&ulock->wait_queue,
273						      LOCK_SET_HANDOFF,
274						      THREAD_RESTART);
275			}
276			ulock_ownership_clear(ulock);
277		}
278
279		ulock_unlock(ulock);
280	}
281
282	lock_set_unlock(lock_set);
283	lock_set_ownership_clear(lock_set, task);
284
285	/*
286	 *  Drop the lock set reference given to the containing task,
287	 *  which inturn destroys the lock set structure if the reference
288	 *  count goes to zero.
289	 */
290	lock_set_dereference(lock_set);
291
292	return KERN_SUCCESS;
293}
294
295kern_return_t
296lock_acquire (lock_set_t lock_set, int lock_id)
297{
298	ulock_t   ulock;
299
300	if (lock_set == LOCK_SET_NULL)
301		return KERN_INVALID_ARGUMENT;
302
303	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
304		return KERN_INVALID_ARGUMENT;
305
306 retry:
307	lock_set_lock(lock_set);
308	if (!lock_set->active) {
309		lock_set_unlock(lock_set);
310		return KERN_LOCK_SET_DESTROYED;
311	}
312
313	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
314	ulock_lock(ulock);
315	lock_set_unlock(lock_set);
316
317	/*
318	 *  Block the current thread if the lock is already held.
319	 */
320
321	if (ulock->holder != THREAD_NULL) {
322		int wait_result;
323
324		if (ulock->holder == current_thread()) {
325			ulock_unlock(ulock);
326			return KERN_LOCK_OWNED_SELF;
327		}
328
329		ulock->blocked = TRUE;
330		wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
331				       LOCK_SET_EVENT,
332				       THREAD_ABORTSAFE, 0);
333		ulock_unlock(ulock);
334
335		/*
336		 *  Block - Wait for lock to become available.
337		 */
338		if (wait_result == THREAD_WAITING)
339			wait_result = thread_block(THREAD_CONTINUE_NULL);
340
341		/*
342		 *  Check the result status:
343		 *
344		 *  Check to see why thread was woken up.  In all cases, we
345		 *  already have been removed from the queue.
346		 */
347		switch (wait_result) {
348		case THREAD_AWAKENED:
349			/* lock transitioned from old locker to us */
350			/* they already made us owner */
351			return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
352				                   KERN_SUCCESS;
353
354		case THREAD_INTERRUPTED:
355			return KERN_ABORTED;
356
357		case THREAD_RESTART:
358			goto retry;  /* probably a dead lock_set */
359
360		default:
361			panic("lock_acquire\n");
362		}
363	}
364
365	/*
366	 *  Assign lock ownership
367	 */
368	ulock_ownership_set(ulock, current_thread());
369	ulock_unlock(ulock);
370
371	return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
372}
373
374kern_return_t
375lock_release (lock_set_t lock_set, int lock_id)
376{
377	ulock_t	 ulock;
378
379	if (lock_set == LOCK_SET_NULL)
380		return KERN_INVALID_ARGUMENT;
381
382	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
383		return KERN_INVALID_ARGUMENT;
384
385	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
386
387	return (ulock_release_internal(ulock, current_thread()));
388}
389
390kern_return_t
391lock_try (lock_set_t lock_set, int lock_id)
392{
393	ulock_t   ulock;
394
395
396	if (lock_set == LOCK_SET_NULL)
397		return KERN_INVALID_ARGUMENT;
398
399	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
400		return KERN_INVALID_ARGUMENT;
401
402
403	lock_set_lock(lock_set);
404	if (!lock_set->active) {
405		lock_set_unlock(lock_set);
406		return KERN_LOCK_SET_DESTROYED;
407	}
408
409	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
410	ulock_lock(ulock);
411	lock_set_unlock(lock_set);
412
413	/*
414	 *  If the lock is already owned, we return without blocking.
415	 *
416	 *  An ownership status is returned to inform the caller as to
417	 *  whether it already holds the lock or another thread does.
418	 */
419
420	if (ulock->holder != THREAD_NULL) {
421		lock_set_unlock(lock_set);
422
423		if (ulock->holder == current_thread()) {
424			ulock_unlock(ulock);
425			return KERN_LOCK_OWNED_SELF;
426		}
427
428		ulock_unlock(ulock);
429		return KERN_LOCK_OWNED;
430 	}
431
432	/*
433	 *  Add the ulock to the lock set's held_ulocks list.
434	 */
435
436	ulock_ownership_set(ulock, current_thread());
437	ulock_unlock(ulock);
438
439	return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
440}
441
442kern_return_t
443lock_make_stable (lock_set_t lock_set, int lock_id)
444{
445	ulock_t	 ulock;
446
447
448	if (lock_set == LOCK_SET_NULL)
449		return KERN_INVALID_ARGUMENT;
450
451	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
452		return KERN_INVALID_ARGUMENT;
453
454
455	lock_set_lock(lock_set);
456	if (!lock_set->active) {
457		lock_set_unlock(lock_set);
458		return KERN_LOCK_SET_DESTROYED;
459	}
460
461	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
462	ulock_lock(ulock);
463	lock_set_unlock(lock_set);
464
465	if (ulock->holder != current_thread()) {
466		ulock_unlock(ulock);
467		return KERN_INVALID_RIGHT;
468	}
469
470	ulock->unstable = FALSE;
471	ulock_unlock(ulock);
472
473	return KERN_SUCCESS;
474}
475
476/*
477 *	ROUTINE:	lock_make_unstable	[internal]
478 *
479 *	Marks the lock as unstable.
480 *
481 *	NOTES:
482 *	- All future acquisitions of the lock will return with a
483 *	  KERN_LOCK_UNSTABLE status, until the lock is made stable again.
484 */
485kern_return_t
486lock_make_unstable (ulock_t ulock, thread_t thread)
487{
488	lock_set_t	lock_set;
489
490	lock_set = ulock->lock_set;
491	lock_set_lock(lock_set);
492	if (!lock_set->active) {
493		lock_set_unlock(lock_set);
494		return KERN_LOCK_SET_DESTROYED;
495	}
496
497	ulock_lock(ulock);
498	lock_set_unlock(lock_set);
499
500	if (ulock->holder != thread) {
501		ulock_unlock(ulock);
502		return KERN_INVALID_RIGHT;
503	}
504
505	ulock->unstable = TRUE;
506	ulock_unlock(ulock);
507
508	return KERN_SUCCESS;
509}
510
511/*
512 *	ROUTINE:	ulock_release_internal	[internal]
513 *
514 *	Releases the ulock.
515 *	If any threads are blocked waiting for the ulock, one is woken-up.
516 *
517 */
518kern_return_t
519ulock_release_internal (ulock_t ulock, thread_t thread)
520{
521	lock_set_t	lock_set;
522
523	if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
524		return KERN_INVALID_ARGUMENT;
525
526	lock_set_lock(lock_set);
527	if (!lock_set->active) {
528		lock_set_unlock(lock_set);
529		return KERN_LOCK_SET_DESTROYED;
530	}
531	ulock_lock(ulock);
532	lock_set_unlock(lock_set);
533
534	if (ulock->holder != thread) {
535		ulock_unlock(ulock);
536		return KERN_INVALID_RIGHT;
537	}
538
539 	/*
540	 *  If we have a hint that threads might be waiting,
541	 *  try to transfer the lock ownership to a waiting thread
542	 *  and wake it up.
543	 */
544	if (ulock->blocked) {
545		wait_queue_t	wq = &ulock->wait_queue;
546		thread_t	wqthread;
547		spl_t		s;
548
549		s = splsched();
550		wait_queue_lock(wq);
551		wqthread = wait_queue_wakeup64_identity_locked(wq,
552							   LOCK_SET_EVENT,
553							   THREAD_AWAKENED,
554							   TRUE);
555		/* wait_queue now unlocked, thread locked */
556
557		if (wqthread != THREAD_NULL) {
558			thread_unlock(wqthread);
559			splx(s);
560
561			/*
562			 *  Transfer ulock ownership
563			 *  from the current thread to the acquisition thread.
564			 */
565			ulock_ownership_clear(ulock);
566			ulock_ownership_set(ulock, wqthread);
567			ulock_unlock(ulock);
568
569			return KERN_SUCCESS;
570		} else {
571			ulock->blocked = FALSE;
572			splx(s);
573		}
574	}
575
576	/*
577	 *  Disown ulock
578	 */
579	ulock_ownership_clear(ulock);
580	ulock_unlock(ulock);
581
582	return KERN_SUCCESS;
583}
584
585kern_return_t
586lock_handoff (lock_set_t lock_set, int lock_id)
587{
588	ulock_t   ulock;
589	int	  wait_result;
590
591
592	if (lock_set == LOCK_SET_NULL)
593		return KERN_INVALID_ARGUMENT;
594
595	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
596		return KERN_INVALID_ARGUMENT;
597
598 retry:
599	lock_set_lock(lock_set);
600
601	if (!lock_set->active) {
602		lock_set_unlock(lock_set);
603		return KERN_LOCK_SET_DESTROYED;
604	}
605
606	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
607	ulock_lock(ulock);
608	lock_set_unlock(lock_set);
609
610	if (ulock->holder != current_thread()) {
611		ulock_unlock(ulock);
612		return KERN_INVALID_RIGHT;
613	}
614
615	/*
616	 *  If the accepting thread (the receiver) is already waiting
617	 *  to accept the lock from the handoff thread (the sender),
618	 *  then perform the hand-off now.
619	 */
620
621	if (ulock->accept_wait) {
622		wait_queue_t	wq = &ulock->wait_queue;
623		thread_t	thread;
624		spl_t		s;
625
626		/*
627		 *  See who the lucky devil is, if they are still there waiting.
628		 */
629		s = splsched();
630		wait_queue_lock(wq);
631		thread = wait_queue_wakeup64_identity_locked(
632					   wq,
633					   LOCK_SET_HANDOFF,
634					   THREAD_AWAKENED,
635					   TRUE);
636		/* wait queue unlocked, thread locked */
637
638		/*
639		 *  Transfer lock ownership
640		 */
641		if (thread != THREAD_NULL) {
642			/*
643			 * The thread we are transferring to will try
644			 * to take the lock on the ulock, and therefore
645			 * will wait for us complete the handoff even
646			 * through we set the thread running.
647			 */
648			thread_unlock(thread);
649			splx(s);
650
651			ulock_ownership_clear(ulock);
652			ulock_ownership_set(ulock, thread);
653			ulock->accept_wait = FALSE;
654			ulock_unlock(ulock);
655			return KERN_SUCCESS;
656		} else {
657
658			/*
659			 * OOPS.  The accepting thread must have been aborted.
660			 * and is racing back to clear the flag that says is
661			 * waiting for an accept. They will clear it when we
662			 * release the lock, so just fall thru and wait for
663			 * the next accept thread (that's the way it is
664			 * specified).
665			 */
666			splx(s);
667		}
668	}
669
670	/*
671	 * Indicate that there is a hand-off thread waiting, and then wait
672	 * for an accepting thread.
673	 */
674	ulock->ho_wait = TRUE;
675	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
676			       LOCK_SET_HANDOFF,
677			       THREAD_ABORTSAFE, 0);
678	ulock_unlock(ulock);
679
680	if (wait_result == THREAD_WAITING)
681		wait_result = thread_block(THREAD_CONTINUE_NULL);
682
683	/*
684	 *  If the thread was woken-up via some action other than
685	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
686	 *  then we need to clear the ulock's handoff state.
687	 */
688	switch (wait_result) {
689
690
691	case THREAD_AWAKENED:
692		/*
693		 * we take the ulock lock to syncronize with the
694		 * thread that is accepting ownership.
695		 */
696		ulock_lock(ulock);
697		assert(ulock->holder != current_thread());
698		ulock_unlock(ulock);
699		return KERN_SUCCESS;
700
701	case THREAD_INTERRUPTED:
702		ulock_lock(ulock);
703		assert(ulock->holder == current_thread());
704		ulock->ho_wait = FALSE;
705		ulock_unlock(ulock);
706		return KERN_ABORTED;
707
708	case THREAD_RESTART:
709		goto retry;
710	}
711
712	panic("lock_handoff");
713	return KERN_FAILURE;
714}
715
716kern_return_t
717lock_handoff_accept (lock_set_t lock_set, int lock_id)
718{
719	ulock_t   ulock;
720	int	  wait_result;
721
722
723	if (lock_set == LOCK_SET_NULL)
724		return KERN_INVALID_ARGUMENT;
725
726	if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
727		return KERN_INVALID_ARGUMENT;
728
729 retry:
730	lock_set_lock(lock_set);
731	if (!lock_set->active) {
732		lock_set_unlock(lock_set);
733		return KERN_LOCK_SET_DESTROYED;
734	}
735
736	ulock = (ulock_t) &lock_set->ulock_list[lock_id];
737	ulock_lock(ulock);
738	lock_set_unlock(lock_set);
739
740	/*
741	 * If there is another accepting thread that beat us, just
742	 * return with an error.
743	 */
744	if (ulock->accept_wait) {
745		ulock_unlock(ulock);
746		return KERN_ALREADY_WAITING;
747	}
748
749	if (ulock->holder == current_thread()) {
750		ulock_unlock(ulock);
751		return KERN_LOCK_OWNED_SELF;
752	}
753
754	/*
755	 *  If the handoff thread (the sender) is already waiting to
756	 *  hand-off the lock to the accepting thread (the receiver),
757	 *  then perform the hand-off now.
758	 */
759	if (ulock->ho_wait) {
760		wait_queue_t	wq = &ulock->wait_queue;
761
762		/*
763		 *  See who the lucky devil is, if they are still there waiting.
764		 */
765		assert(ulock->holder != THREAD_NULL);
766
767		if (wait_queue_wakeup64_thread(wq,
768					    LOCK_SET_HANDOFF,
769					    ulock->holder,
770					    THREAD_AWAKENED) == KERN_SUCCESS) {
771			/*
772			 * Holder thread was still waiting to give it
773			 * away.  Take over ownership.
774			 */
775			ulock_ownership_clear(ulock);
776			ulock_ownership_set(ulock, current_thread());
777			ulock->ho_wait = FALSE;
778			ulock_unlock(ulock);
779			return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
780						   KERN_SUCCESS;
781		}
782
783		/*
784		 * OOPS.  The owner was aborted out of the handoff.
785		 * They will clear his own flag when they get back.
786		 * in the meantime, we will wait as if we didn't
787		 * even see his flag (by falling thru).
788		 */
789	}
790
791	ulock->accept_wait = TRUE;
792	wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
793			       LOCK_SET_HANDOFF,
794			       THREAD_ABORTSAFE, 0);
795	ulock_unlock(ulock);
796
797	if (wait_result == THREAD_WAITING)
798		wait_result = thread_block(THREAD_CONTINUE_NULL);
799
800	/*
801	 *  If the thread was woken-up via some action other than
802	 *  lock_handoff_accept or lock_set_destroy (i.e. thread_terminate),
803	 *  then we need to clear the ulock's handoff state.
804	 */
805	switch (wait_result) {
806
807	case THREAD_AWAKENED:
808		/*
809		 * Take the lock to synchronize with the thread handing
810		 * off the lock to us.  We don't want to continue until
811		 * they complete the handoff.
812		 */
813		ulock_lock(ulock);
814		assert(ulock->accept_wait == FALSE);
815		assert(ulock->holder == current_thread());
816		ulock_unlock(ulock);
817		return KERN_SUCCESS;
818
819	case THREAD_INTERRUPTED:
820		ulock_lock(ulock);
821		ulock->accept_wait = FALSE;
822		ulock_unlock(ulock);
823		return KERN_ABORTED;
824
825	case THREAD_RESTART:
826		goto retry;
827	}
828
829	panic("lock_handoff_accept");
830	return KERN_FAILURE;
831}
832
833/*
834 *	Routine:	lock_set_reference
835 *
836 *	Take out a reference on a lock set.  This keeps the data structure
837 *	in existence (but the lock set may be deactivated).
838 */
839void
840lock_set_reference(lock_set_t lock_set)
841{
842	OSIncrementAtomic(&((lock_set)->ref_count));
843}
844
845/*
846 *	Routine:	lock_set_dereference
847 *
848 *	Release a reference on a lock set.  If this is the last reference,
849 *	the lock set data structure is deallocated.
850 */
851void
852lock_set_dereference(lock_set_t lock_set)
853{
854	int 	size;
855
856	if (1 == OSDecrementAtomic(&((lock_set)->ref_count))) {
857		ipc_port_dealloc_kernel(lock_set->port);
858		size = (int)(sizeof(struct lock_set) +
859			(sizeof(struct ulock) * (lock_set->n_ulocks - 1)));
860		kfree(lock_set, size);
861	}
862}
863
864void
865ulock_release_all(
866	thread_t		thread)
867{
868	ulock_t		ulock;
869
870	while (!queue_empty(&thread->held_ulocks)) {
871		ulock = (ulock_t)queue_first(&thread->held_ulocks);
872		lock_make_unstable(ulock, thread);
873		ulock_release_internal(ulock, thread);
874	}
875}
876