Lines Matching refs:lock

10 #include <lock.h>
46 static void _rw_lock_read_unlock_threads_locked(rw_lock* lock);
47 static void _rw_lock_write_unlock_threads_locked(rw_lock* lock);
49 static status_t _mutex_lock_threads_locked(mutex* lock);
50 static void _mutex_unlock_threads_locked(mutex* lock);
104 recursive_lock_get_recursion(recursive_lock *lock)
106 if (RECURSIVE_LOCK_HOLDER(lock) == find_thread(NULL))
107 return lock->recursion;
114 recursive_lock_init(recursive_lock *lock, const char *name)
116 mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
117 RECURSIVE_LOCK_HOLDER(lock) = -1;
118 lock->recursion = 0;
123 recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
125 mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
126 RECURSIVE_LOCK_HOLDER(lock) = -1;
127 lock->recursion = 0;
132 recursive_lock_destroy(recursive_lock *lock)
134 if (lock == NULL)
137 mutex_destroy(&lock->lock);
142 recursive_lock_lock(recursive_lock *lock)
146 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
147 mutex_lock(&lock->lock);
149 lock->holder = thread;
153 lock->recursion++;
159 recursive_lock_trylock(recursive_lock *lock)
163 if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
164 status_t status = mutex_trylock(&lock->lock);
169 lock->holder = thread;
173 lock->recursion++;
179 recursive_lock_unlock(recursive_lock *lock)
181 if (find_thread(NULL) != RECURSIVE_LOCK_HOLDER(lock))
182 panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
184 if (--lock->recursion == 0) {
186 lock->holder = -1;
188 mutex_unlock(&lock->lock);
197 rw_lock_wait(rw_lock* lock, bool writer)
205 if (lock->waiters != NULL)
206 lock->waiters->last->next = &waiter;
208 lock->waiters = &waiter;
210 lock->waiters->last = &waiter;
226 rw_lock_unblock(rw_lock* lock)
229 // has the write lock.
230 rw_lock_waiter* waiter = lock->waiters;
231 if (waiter == NULL || lock->holder > 0)
236 if (lock->active_readers > 0 || lock->pending_readers > 0)
240 lock->waiters = waiter->next;
241 if (lock->waiters != NULL)
242 lock->waiters->last = waiter->last;
244 lock->holder = get_thread_id(waiter->thread);
256 lock->waiters = waiter->next;
257 if (lock->waiters != NULL)
258 lock->waiters->last = waiter->last;
265 } while ((waiter = lock->waiters) != NULL && !waiter->writer);
267 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
268 lock->active_readers += readerCount;
275 rw_lock_init(rw_lock* lock, const char* name)
277 lock->name = name;
278 lock->waiters = NULL;
279 lock->holder = -1;
280 lock->count = 0;
281 lock->owner_count = 0;
282 lock->active_readers = 0;
283 lock->pending_readers = 0;
284 lock->flags = 0;
289 rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
291 lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
292 lock->waiters = NULL;
293 lock->holder = -1;
294 lock->count = 0;
295 lock->owner_count = 0;
296 lock->active_readers = 0;
297 lock->pending_readers = 0;
298 lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
303 rw_lock_destroy(rw_lock* lock)
305 char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
306 ? (char*)lock->name : NULL;
312 if (lock->waiters != NULL && find_thread(NULL)
313 != lock->holder) {
315 "doesn't hold the write lock (%p)", lock);
318 if (rw_lock_write_lock(lock) != B_OK)
324 while (rw_lock_waiter* waiter = lock->waiters) {
326 lock->waiters = waiter->next;
332 lock->name = NULL;
343 _rw_lock_read_lock(rw_lock* lock)
348 if (lock->holder == find_thread(NULL)) {
349 lock->owner_count++;
353 // The writer that originally had the lock when we called atomic_add() might
357 if (lock->pending_readers > 0) {
358 lock->pending_readers--;
360 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
361 lock->active_readers++;
367 return rw_lock_wait(lock, false);
372 _rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
378 if (lock->holder == find_thread(NULL)) {
379 lock->owner_count++;
383 // The writer that originally had the lock when we called atomic_add() might
387 if (lock->pending_readers > 0) {
388 lock->pending_readers--;
390 if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
391 lock->active_readers++;
396 ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
406 if (lock->waiters != NULL)
407 lock->waiters->last->next = &waiter;
409 lock->waiters = &waiter;
411 lock->waiters->last = &waiter;
426 // us after we already failed. In either case, we've got the lock, now.
430 // We failed to get the lock -- dequeue from waiter list.
432 rw_lock_waiter* other = lock->waiters;
440 lock->waiters = waiter.next;
441 if (lock->waiters != NULL)
442 lock->waiters->last = waiter.last;
446 if (lock->waiters->last == &waiter)
447 lock->waiters->last = previous;
452 // us (writers only manipulate the lock data with thread spinlock being
455 atomic_add(&lock->count, -1);
462 _rw_lock_read_unlock(rw_lock* lock)
465 _rw_lock_read_unlock_threads_locked(lock);
470 _rw_lock_read_unlock_threads_locked(rw_lock* lock)
472 // If we're still holding the write lock or if there are other readers,
474 if (lock->holder == find_thread(NULL)) {
475 lock->owner_count--;
479 if (--lock->active_readers > 0)
482 if (lock->active_readers < 0) {
483 panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
484 lock->active_readers = 0;
488 rw_lock_unblock(lock);
495 rw_lock_write_lock(rw_lock* lock)
499 // If we're already the lock holder, we just need to increment the owner
502 if (lock->holder == thread) {
503 lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
508 int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
511 // No-one else held a read or write lock, so it's ours now.
512 lock->holder = thread;
513 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
520 lock->active_readers = oldCount - lock->pending_readers;
522 status_t status = rw_lock_wait(lock, true);
524 lock->holder = thread;
525 lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
533 _rw_lock_write_unlock(rw_lock* lock)
536 _rw_lock_write_unlock_threads_locked(lock);
541 _rw_lock_write_unlock_threads_locked(rw_lock* lock)
543 if (find_thread(NULL) != lock->holder) {
544 panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
545 lock);
549 lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
550 if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
553 // We gave up our last write lock -- clean up and unblock waiters.
554 int32 readerCount = lock->owner_count;
555 lock->holder = -1;
556 lock->owner_count = 0;
558 int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
564 lock->active_readers = readerCount;
565 rw_lock_unblock(lock);
575 lock->pending_readers = oldCount - readerCount
576 - rw_lock_unblock(lock);
586 mutex_init(mutex* lock, const char *name)
588 lock->name = name;
589 lock->waiters = NULL;
591 lock->holder = -1;
593 lock->count = 0;
595 lock->flags = 0;
600 mutex_init_etc(mutex* lock, const char *name, uint32 flags)
602 lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
603 lock->waiters = NULL;
605 lock->holder = -1;
607 lock->count = 0;
609 lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
614 mutex_destroy(mutex* lock)
616 char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
617 ? (char*)lock->name : NULL;
623 if (lock->waiters != NULL && find_thread(NULL)
624 != lock->holder) {
626 "hold the lock (%p)", lock);
627 if (_mutex_lock_threads_locked(lock) != B_OK)
632 while (mutex_waiter* waiter = lock->waiters) {
634 lock->waiters = waiter->next;
640 lock->name = NULL;
681 _mutex_lock_threads_locked(mutex* lock)
687 if (lock->holder < 0) {
688 lock->holder = find_thread(NULL);
690 } else if (lock->holder == find_thread(NULL)) {
691 panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
692 lock->holder);
693 } else if (lock->holder == 0)
694 panic("_mutex_lock(): using unitialized lock %p", lock);
696 if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
697 lock->flags &= ~MUTEX_FLAG_RELEASED;
707 if (lock->waiters != NULL) {
708 lock->waiters->last->next = &waiter;
710 lock->waiters = &waiter;
712 lock->waiters->last = &waiter;
726 lock->holder = get_thread_id(waiter.thread);
734 _mutex_lock(mutex* lock, void*)
737 return _mutex_lock_threads_locked(lock);
742 _mutex_unlock_threads_locked(mutex* lock)
745 if (find_thread(NULL) != lock->holder) {
748 find_thread(NULL), lock, lock->holder);
753 mutex_waiter* waiter = lock->waiters;
756 lock->waiters = waiter->next;
757 if (lock->waiters != NULL)
758 lock->waiters->last = waiter->last;
766 // cause a race condition, since another locker could think the lock
768 lock->holder = get_thread_id(waiter->thread);
772 // Just mark the lock as released.
774 lock->holder = -1;
776 lock->flags |= MUTEX_FLAG_RELEASED;
783 _mutex_unlock(mutex* lock)
786 _mutex_unlock_threads_locked(lock);
791 _mutex_trylock(mutex* lock)
796 if (lock->holder <= 0) {
797 lock->holder = find_thread(NULL);