Lines Matching refs:lock

14 #include <lock.h>
47 recursive_lock_get_recursion(recursive_lock *lock)
49 if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50 return lock->recursion;
57 recursive_lock_init(recursive_lock *lock, const char *name)
59 recursive_lock_init_etc(lock, name, 0);
64 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
66 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
68 lock->holder = -1;
70 lock->recursion = 0;
75 recursive_lock_destroy(recursive_lock *lock)
77 if (lock == NULL)
80 mutex_destroy(&lock->lock);
85 recursive_lock_lock(recursive_lock *lock)
89 panic("recursive_lock_lock: called with interrupts disabled for lock "
90 "%p (\"%s\")\n", lock, lock->lock.name);
96 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97 mutex_lock(&lock->lock);
99 lock->holder = thread;
103 lock->recursion++;
109 recursive_lock_trylock(recursive_lock *lock)
115 panic("recursive_lock_lock: called with interrupts disabled for lock "
116 "%p (\"%s\")\n", lock, lock->lock.name);
120 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121 status_t status = mutex_trylock(&lock->lock);
126 lock->holder = thread;
130 lock->recursion++;
136 recursive_lock_unlock(recursive_lock *lock)
138 if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139 panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
141 if (--lock->recursion == 0) {
143 lock->holder = -1;
145 mutex_unlock(&lock->lock);
171 mutex_unlock(&from->lock);
175 status_t status = mutex_switch_lock(&from->lock, &to->lock);
210 status_t status = mutex_switch_lock(from, &to->lock);
235 status_t status = mutex_switch_from_read_lock(from, &to->lock);
259 recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]);
261 if (!IS_KERNEL_ADDRESS(lock)) {
262 kprintf("invalid address: %p\n", lock);
266 kprintf("recrusive_lock %p:\n", lock);
267 kprintf(" mutex: %p\n", &lock->lock);
268 kprintf(" name: %s\n", lock->lock.name);
269 kprintf(" flags: 0x%x\n", lock->lock.flags);
271 kprintf(" holder: %" B_PRId32 "\n", lock->lock.holder);
273 kprintf(" holder: %" B_PRId32 "\n", lock->holder);
275 kprintf(" recursion: %d\n", lock->recursion);
278 mutex_waiter* waiter = lock->lock.waiters;
293 rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
301 if (lock->waiters != NULL)
302 lock->waiters->last->next = &waiter;
304 lock->waiters = &waiter;
306 lock->waiters->last = &waiter;
309 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
320 rw_lock_unblock(rw_lock* lock)
323 // has the write lock.
324 rw_lock_waiter* waiter = lock->waiters;
325 if (waiter == NULL || lock->holder >= 0)
330 if (lock->active_readers > 0 || lock->pending_readers > 0)
334 lock->waiters = waiter->next;
335 if (lock->waiters != NULL)
336 lock->waiters->last = waiter->last;
338 lock->holder = waiter->thread->id;
351 lock->waiters = waiter->next;
352 if (lock->waiters != NULL)
353 lock->waiters->last = waiter->last;
361 } while ((waiter = lock->waiters) != NULL && !waiter->writer);
363 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
364 lock->active_readers += readerCount;
371 rw_lock_init(rw_lock* lock, const char* name)
373 lock->name = name;
374 lock->waiters = NULL;
375 B_INITIALIZE_SPINLOCK(&lock->lock);
376 lock->holder = -1;
377 lock->count = 0;
378 lock->owner_count = 0;
379 lock->active_readers = 0;
380 lock->pending_readers = 0;
381 lock->flags = 0;
383 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
384 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
389 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
391 lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
392 lock->waiters = NULL;
393 B_INITIALIZE_SPINLOCK(&lock->lock);
394 lock->holder = -1;
395 lock->count = 0;
396 lock->owner_count = 0;
397 lock->active_readers = 0;
398 lock->pending_readers = 0;
399 lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
401 T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
402 NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
407 rw_lock_destroy(rw_lock* lock)
409 char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
410 ? (char*)lock->name : NULL;
413 InterruptsSpinLocker locker(lock->lock);
416 if (lock->waiters != NULL && thread_get_current_thread_id()
417 != lock->holder) {
419 "doesn't hold the write lock (%p)", lock);
422 if (rw_lock_write_lock(lock) != B_OK)
428 while (rw_lock_waiter* waiter = lock->waiters) {
430 lock->waiters = waiter->next;
436 lock->name = NULL;
447 _rw_lock_is_read_locked(rw_lock* lock)
449 if (lock->holder == thread_get_current_thread_id())
454 if (thread->held_read_locks[i] == lock)
462 _rw_lock_set_read_locked(rw_lock* lock)
469 thread->held_read_locks[i] = lock;
477 _rw_lock_unset_read_locked(rw_lock* lock)
481 if (thread->held_read_locks[i] != lock)
493 _rw_lock_read_lock(rw_lock* lock)
497 panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
498 lock);
502 int32 oldCount = atomic_add(&lock->count, 1);
504 ASSERT_UNLOCKED_RW_LOCK(lock);
505 _rw_lock_set_read_locked(lock);
510 InterruptsSpinLocker locker(lock->lock);
513 if (lock->holder == thread_get_current_thread_id()) {
514 lock->owner_count++;
518 ASSERT_UNLOCKED_RW_LOCK(lock);
520 // The writer that originally had the lock when we called atomic_add() might
524 if (lock->pending_readers > 0) {
525 lock->pending_readers--;
527 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
528 lock->active_readers++;
531 _rw_lock_set_read_locked(lock);
536 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
539 status_t status = rw_lock_wait(lock, false, locker);
543 _rw_lock_set_read_locked(lock);
551 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
557 "disabled for lock %p", lock);
561 int32 oldCount = atomic_add(&lock->count, 1);
563 ASSERT_UNLOCKED_RW_LOCK(lock);
564 _rw_lock_set_read_locked(lock);
569 InterruptsSpinLocker locker(lock->lock);
572 if (lock->holder == thread_get_current_thread_id()) {
573 lock->owner_count++;
577 ASSERT_UNLOCKED_RW_LOCK(lock);
579 // The writer that originally had the lock when we called atomic_add() might
583 if (lock->pending_readers > 0) {
584 lock->pending_readers--;
586 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
587 lock->active_readers++;
590 _rw_lock_set_read_locked(lock);
595 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
605 if (lock->waiters != NULL)
606 lock->waiters->last->next = &waiter;
608 lock->waiters = &waiter;
610 lock->waiters->last = &waiter;
613 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
619 // us after we already failed. In either case, we've got the lock, now.
621 _rw_lock_set_read_locked(lock);
627 // We failed to get the lock -- dequeue from waiter list.
629 rw_lock_waiter* other = lock->waiters;
637 lock->waiters = waiter.next;
638 if (lock->waiters != NULL)
639 lock->waiters->last = waiter.last;
643 if (lock->waiters->last == &waiter)
644 lock->waiters->last = previous;
649 // us (writers only manipulate the lock data with thread spinlock being
652 atomic_add(&lock->count, -1);
659 _rw_lock_read_unlock(rw_lock* lock)
661 InterruptsSpinLocker locker(lock->lock);
663 // If we're still holding the write lock or if there are other readers,
665 if (lock->holder == thread_get_current_thread_id()) {
666 ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
667 lock->owner_count--;
672 _rw_lock_unset_read_locked(lock);
674 int32 oldCount = atomic_add(&lock->count, -1);
679 if (--lock->active_readers > 0)
682 if (lock->active_readers < 0) {
683 panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
684 lock->active_readers = 0;
688 rw_lock_unblock(lock);
693 rw_lock_write_lock(rw_lock* lock)
697 panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
698 lock);
702 InterruptsSpinLocker locker(lock->lock);
704 // If we're already the lock holder, we just need to increment the owner
707 if (lock->holder == thread) {
708 lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
712 ASSERT_UNLOCKED_RW_LOCK(lock);
715 int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
718 // No-one else held a read or write lock, so it's ours now.
719 lock->holder = thread;
720 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
727 lock->active_readers = oldCount - lock->pending_readers;
729 status_t status = rw_lock_wait(lock, true, locker);
731 lock->holder = thread;
732 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
740 _rw_lock_write_unlock(rw_lock* lock)
742 InterruptsSpinLocker locker(lock->lock);
744 if (thread_get_current_thread_id() != lock->holder) {
745 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
746 lock);
750 ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
752 lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
753 if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
756 // We gave up our last write lock -- clean up and unblock waiters.
757 int32 readerCount = lock->owner_count;
758 lock->holder = -1;
759 lock->owner_count = 0;
763 _rw_lock_set_read_locked(lock);
766 int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
772 lock->active_readers = readerCount;
773 rw_lock_unblock(lock);
783 lock->pending_readers = oldCount - readerCount
784 - rw_lock_unblock(lock);
798 rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
800 if (!IS_KERNEL_ADDRESS(lock)) {
801 kprintf("invalid address: %p\n", lock);
805 kprintf("rw lock %p:\n", lock);
806 kprintf(" name: %s\n", lock->name);
807 kprintf(" holder: %" B_PRId32 "\n", lock->holder);
808 kprintf(" count: %#" B_PRIx32 "\n", lock->count);
809 kprintf(" active readers %d\n", lock->active_readers);
810 kprintf(" pending readers %d\n", lock->pending_readers);
811 kprintf(" owner count: %#" B_PRIx32 "\n", lock->owner_count);
812 kprintf(" flags: %#" B_PRIx32 "\n", lock->flags);
815 rw_lock_waiter* waiter = lock->waiters;
830 mutex_init(mutex* lock, const char *name)
832 mutex_init_etc(lock, name, 0);
837 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
839 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
840 lock->waiters = NULL;
841 B_INITIALIZE_SPINLOCK(&lock->lock);
843 lock->holder = -1;
845 lock->count = 0;
847 lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
849 T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
850 NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
855 mutex_destroy(mutex* lock)
857 char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
858 ? (char*)lock->name : NULL;
861 InterruptsSpinLocker locker(lock->lock);
864 if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
865 panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
866 "by the caller", lock, lock->holder);
867 if (_mutex_lock(lock, &locker) != B_OK)
873 while (mutex_waiter* waiter = lock->waiters) {
875 lock->waiters = waiter->next;
883 lock->name = NULL;
884 lock->flags = 0;
886 lock->holder = 0;
888 lock->count = INT16_MIN;
898 mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
901 return _mutex_lock(lock, locker);
903 if (atomic_add(&lock->count, -1) < 0)
904 return _mutex_lock(lock, locker);
920 InterruptsSpinLocker locker(to->lock);
929 mutex_transfer_lock(mutex* lock, thread_id thread)
932 if (thread_get_current_thread_id() != lock->holder)
933 panic("mutex_transfer_lock(): current thread is not the lock holder!");
934 lock->holder = thread;
949 InterruptsSpinLocker locker(to->lock);
958 _mutex_lock(mutex* lock, void* _locker)
962 panic("_mutex_lock(): called with interrupts disabled for lock %p",
963 lock);
967 // lock only, if !lockLocked
973 lockLocker.SetTo(lock->lock, false);
980 if (lock->holder < 0) {
981 lock->holder = thread_get_current_thread_id();
983 } else if (lock->holder == thread_get_current_thread_id()) {
984 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
985 lock->holder);
986 } else if (lock->holder == 0)
987 panic("_mutex_lock(): using uninitialized lock %p", lock);
989 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
990 lock->flags &= ~MUTEX_FLAG_RELEASED;
1000 if (lock->waiters != NULL) {
1001 lock->waiters->last->next = &waiter;
1003 lock->waiters = &waiter;
1005 lock->waiters->last = &waiter;
1008 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1014 ASSERT(lock->holder == waiter.thread->id);
1022 _mutex_unlock(mutex* lock)
1024 InterruptsSpinLocker locker(lock->lock);
1027 if (thread_get_current_thread_id() != lock->holder) {
1030 thread_get_current_thread_id(), lock, lock->holder);
1035 mutex_waiter* waiter = lock->waiters;
1038 lock->waiters = waiter->next;
1039 if (lock->waiters != NULL)
1040 lock->waiters->last = waiter->last;
1045 // cause a race condition, since another locker could think the lock
1047 lock->holder = waiter->thread->id;
1053 // There are no waiters, so mark the lock as released.
1055 lock->holder = -1;
1057 lock->flags |= MUTEX_FLAG_RELEASED;
1064 _mutex_trylock(mutex* lock)
1067 InterruptsSpinLocker _(lock->lock);
1069 if (lock->holder < 0) {
1070 lock->holder = thread_get_current_thread_id();
1072 } else if (lock->holder == 0)
1073 panic("_mutex_trylock(): using uninitialized lock %p", lock);
1076 return mutex_trylock(lock);
1082 _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
1086 panic("_mutex_lock(): called with interrupts disabled for lock %p",
1087 lock);
1091 InterruptsSpinLocker locker(lock->lock);
1096 if (lock->holder < 0) {
1097 lock->holder = thread_get_current_thread_id();
1099 } else if (lock->holder == thread_get_current_thread_id()) {
1100 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1101 lock->holder);
1102 } else if (lock->holder == 0)
1103 panic("_mutex_lock(): using uninitialized lock %p", lock);
1105 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1106 lock->flags &= ~MUTEX_FLAG_RELEASED;
1116 if (lock->waiters != NULL) {
1117 lock->waiters->last->next = &waiter;
1119 lock->waiters = &waiter;
1121 lock->waiters->last = &waiter;
1124 thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1131 ASSERT(lock->holder == waiter.thread->id);
1134 // If the lock was destroyed, our "thread" entry will be NULL.
1146 mutex_waiter* otherWaiter = lock->waiters;
1153 if (&waiter == lock->waiters) {
1156 lock->waiters = waiter.next;
1159 lock->waiters->last = previousWaiter;
1164 // we need to fix the lock count
1165 atomic_add(&lock->count, 1);
1169 // occurred, this means we own the lock now
1171 ASSERT(lock->holder == waiter.thread->id);
1189 mutex* lock = (mutex*)parse_expression(argv[1]);
1191 if (!IS_KERNEL_ADDRESS(lock)) {
1192 kprintf("invalid address: %p\n", lock);
1196 kprintf("mutex %p:\n", lock);
1197 kprintf(" name: %s\n", lock->name);
1198 kprintf(" flags: 0x%x\n", lock->flags);
1200 kprintf(" holder: %" B_PRId32 "\n", lock->holder);
1202 kprintf(" count: %" B_PRId32 "\n", lock->count);
1206 mutex_waiter* waiter = lock->waiters;
1229 "Dump info about an rw lock",
1230 "<lock>\n"
1231 "Prints info about the specified rw lock.\n"
1232 " <lock> - pointer to the rw lock to print the info for.\n", 0);
1234 "Dump info about a recursive lock",
1235 "<lock>\n"
1236 "Prints info about the specified recursive lock.\n"
1237 " <lock> - pointer to the recursive lock to print the info for.\n",