Lines Matching refs:ulock

59  *	Assumes: ulock internal lock is held 
146 ulock_t ulock;
155 if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
158 size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
183 * Initialize each ulock in the lock set
187 ulock = (ulock_t) &lock_set->ulock_list[x];
188 ulock_lock_init(ulock);
189 ulock->lock_set = lock_set;
190 ulock->holder = THREAD_NULL;
191 ulock->blocked = FALSE;
192 ulock->unstable = FALSE;
193 ulock->ho_wait = FALSE;
194 ulock->accept_wait = FALSE;
195 wait_queue_init(&ulock->wait_queue, policy);
220 ulock_t ulock;
241 * If a ulock is currently held in the target lock set:
243 * 1) Wakeup all threads blocked on the ulock (if any). Threads
247 * 2) ulock ownership is cleared.
248 * The thread currently holding the ulock is revoked of its
252 ulock = &lock_set->ulock_list[i];
254 ulock_lock(ulock);
256 if (ulock->accept_wait) {
257 ulock->accept_wait = FALSE;
258 wait_queue_wakeup64_one(&ulock->wait_queue,
263 if (ulock->holder) {
264 if (ulock->blocked) {
265 ulock->blocked = FALSE;
266 wait_queue_wakeup64_all(&ulock->wait_queue,
270 if (ulock->ho_wait) {
271 ulock->ho_wait = FALSE;
272 wait_queue_wakeup64_one(&ulock->wait_queue,
276 ulock_ownership_clear(ulock);
279 ulock_unlock(ulock);
298 ulock_t ulock;
313 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
314 ulock_lock(ulock);
321 if (ulock->holder != THREAD_NULL) {
324 if (ulock->holder == current_thread()) {
325 ulock_unlock(ulock);
329 ulock->blocked = TRUE;
330 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
333 ulock_unlock(ulock);
351 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
368 ulock_ownership_set(ulock, current_thread());
369 ulock_unlock(ulock);
371 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
377 ulock_t ulock;
385 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
387 return (ulock_release_internal(ulock, current_thread()));
393 ulock_t ulock;
409 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
410 ulock_lock(ulock);
420 if (ulock->holder != THREAD_NULL) {
423 if (ulock->holder == current_thread()) {
424 ulock_unlock(ulock);
428 ulock_unlock(ulock);
433 * Add the ulock to the lock set's held_ulocks list.
436 ulock_ownership_set(ulock, current_thread());
437 ulock_unlock(ulock);
439 return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
445 ulock_t ulock;
461 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
462 ulock_lock(ulock);
465 if (ulock->holder != current_thread()) {
466 ulock_unlock(ulock);
470 ulock->unstable = FALSE;
471 ulock_unlock(ulock);
486 lock_make_unstable (ulock_t ulock, thread_t thread)
490 lock_set = ulock->lock_set;
497 ulock_lock(ulock);
500 if (ulock->holder != thread) {
501 ulock_unlock(ulock);
505 ulock->unstable = TRUE;
506 ulock_unlock(ulock);
514 * Releases the ulock.
515 * If any threads are blocked waiting for the ulock, one is woken-up.
519 ulock_release_internal (ulock_t ulock, thread_t thread)
523 if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
531 ulock_lock(ulock);
534 if (ulock->holder != thread) {
535 ulock_unlock(ulock);
544 if (ulock->blocked) {
545 wait_queue_t wq = &ulock->wait_queue;
562 * Transfer ulock ownership
565 ulock_ownership_clear(ulock);
566 ulock_ownership_set(ulock, wqthread);
567 ulock_unlock(ulock);
571 ulock->blocked = FALSE;
577 * Disown ulock
579 ulock_ownership_clear(ulock);
580 ulock_unlock(ulock);
588 ulock_t ulock;
606 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
607 ulock_lock(ulock);
610 if (ulock->holder != current_thread()) {
611 ulock_unlock(ulock);
621 if (ulock->accept_wait) {
622 wait_queue_t wq = &ulock->wait_queue;
644 * to take the lock on the ulock, and therefore
651 ulock_ownership_clear(ulock);
652 ulock_ownership_set(ulock, thread);
653 ulock->accept_wait = FALSE;
654 ulock_unlock(ulock);
674 ulock->ho_wait = TRUE;
675 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
678 ulock_unlock(ulock);
686 * then we need to clear the ulock's handoff state.
693 * we take the ulock lock to syncronize with the
696 ulock_lock(ulock);
697 assert(ulock->holder != current_thread());
698 ulock_unlock(ulock);
702 ulock_lock(ulock);
703 assert(ulock->holder == current_thread());
704 ulock->ho_wait = FALSE;
705 ulock_unlock(ulock);
719 ulock_t ulock;
736 ulock = (ulock_t) &lock_set->ulock_list[lock_id];
737 ulock_lock(ulock);
744 if (ulock->accept_wait) {
745 ulock_unlock(ulock);
749 if (ulock->holder == current_thread()) {
750 ulock_unlock(ulock);
759 if (ulock->ho_wait) {
760 wait_queue_t wq = &ulock->wait_queue;
765 assert(ulock->holder != THREAD_NULL);
769 ulock->holder,
775 ulock_ownership_clear(ulock);
776 ulock_ownership_set(ulock, current_thread());
777 ulock->ho_wait = FALSE;
778 ulock_unlock(ulock);
779 return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
791 ulock->accept_wait = TRUE;
792 wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
795 ulock_unlock(ulock);
803 * then we need to clear the ulock's handoff state.
813 ulock_lock(ulock);
814 assert(ulock->accept_wait == FALSE);
815 assert(ulock->holder == current_thread());
816 ulock_unlock(ulock);
820 ulock_lock(ulock);
821 ulock->accept_wait = FALSE;
822 ulock_unlock(ulock);
859 (sizeof(struct ulock) * (lock_set->n_ulocks - 1)));
868 ulock_t ulock;
871 ulock = (ulock_t)queue_first(&thread->held_ulocks);
872 lock_make_unstable(ulock, thread);
873 ulock_release_internal(ulock, thread);