1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11/*! Mutex and recursive_lock code */
12
13
14#include <lock.h>
15
16#include <stdlib.h>
17#include <string.h>
18
19#include <OS.h>
20
21#include <debug.h>
22#include <int.h>
23#include <kernel.h>
24#include <listeners.h>
25#include <scheduling_analysis.h>
26#include <thread.h>
27#include <util/AutoLock.h>
28
29
30struct mutex_waiter {
31	Thread*			thread;
32	mutex_waiter*	next;		// next in queue
33	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34};
35
36struct rw_lock_waiter {
37	Thread*			thread;
38	rw_lock_waiter*	next;		// next in queue
39	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40	bool			writer;
41};
42
43#define MUTEX_FLAG_OWNS_NAME	MUTEX_FLAG_CLONE_NAME
44#define MUTEX_FLAG_RELEASED		0x2
45
46#define RW_LOCK_FLAG_OWNS_NAME	RW_LOCK_FLAG_CLONE_NAME
47
48
49int32
50recursive_lock_get_recursion(recursive_lock *lock)
51{
52	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
53		return lock->recursion;
54
55	return -1;
56}
57
58
59void
60recursive_lock_init(recursive_lock *lock, const char *name)
61{
62	mutex_init(&lock->lock, name != NULL ? name : "recursive lock");
63	RECURSIVE_LOCK_HOLDER(lock) = -1;
64	lock->recursion = 0;
65}
66
67
68void
69recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
70{
71	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
72	RECURSIVE_LOCK_HOLDER(lock) = -1;
73	lock->recursion = 0;
74}
75
76
77void
78recursive_lock_destroy(recursive_lock *lock)
79{
80	if (lock == NULL)
81		return;
82
83	mutex_destroy(&lock->lock);
84}
85
86
87status_t
88recursive_lock_lock(recursive_lock *lock)
89{
90	thread_id thread = thread_get_current_thread_id();
91
92	if (!gKernelStartup && !are_interrupts_enabled()) {
93		panic("recursive_lock_lock: called with interrupts disabled for lock "
94			"%p (\"%s\")\n", lock, lock->lock.name);
95	}
96
97	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
98		mutex_lock(&lock->lock);
99#if !KDEBUG
100		lock->holder = thread;
101#endif
102	}
103
104	lock->recursion++;
105	return B_OK;
106}
107
108
109status_t
110recursive_lock_trylock(recursive_lock *lock)
111{
112	thread_id thread = thread_get_current_thread_id();
113
114	if (!gKernelStartup && !are_interrupts_enabled())
115		panic("recursive_lock_lock: called with interrupts disabled for lock "
116			"%p (\"%s\")\n", lock, lock->lock.name);
117
118	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
119		status_t status = mutex_trylock(&lock->lock);
120		if (status != B_OK)
121			return status;
122
123#if !KDEBUG
124		lock->holder = thread;
125#endif
126	}
127
128	lock->recursion++;
129	return B_OK;
130}
131
132
133void
134recursive_lock_unlock(recursive_lock *lock)
135{
136	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
137		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
138
139	if (--lock->recursion == 0) {
140#if !KDEBUG
141		lock->holder = -1;
142#endif
143		mutex_unlock(&lock->lock);
144	}
145}
146
147
148//	#pragma mark -
149
150
151static status_t
152rw_lock_wait(rw_lock* lock, bool writer)
153{
154	// enqueue in waiter list
155	rw_lock_waiter waiter;
156	waiter.thread = thread_get_current_thread();
157	waiter.next = NULL;
158	waiter.writer = writer;
159
160	if (lock->waiters != NULL)
161		lock->waiters->last->next = &waiter;
162	else
163		lock->waiters = &waiter;
164
165	lock->waiters->last = &waiter;
166
167	// block
168	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
169	return thread_block_locked(waiter.thread);
170}
171
172
173static int32
174rw_lock_unblock(rw_lock* lock)
175{
176	// Check whether there are any waiting threads at all and whether anyone
177	// has the write lock.
178	rw_lock_waiter* waiter = lock->waiters;
179	if (waiter == NULL || lock->holder >= 0)
180		return 0;
181
182	// writer at head of queue?
183	if (waiter->writer) {
184		if (lock->active_readers > 0 || lock->pending_readers > 0)
185			return 0;
186
187		// dequeue writer
188		lock->waiters = waiter->next;
189		if (lock->waiters != NULL)
190			lock->waiters->last = waiter->last;
191
192		lock->holder = waiter->thread->id;
193
194		// unblock thread
195		thread_unblock_locked(waiter->thread, B_OK);
196		waiter->thread = NULL;
197		return RW_LOCK_WRITER_COUNT_BASE;
198	}
199
200	// wake up one or more readers
201	uint32 readerCount = 0;
202	do {
203		// dequeue reader
204		lock->waiters = waiter->next;
205		if (lock->waiters != NULL)
206			lock->waiters->last = waiter->last;
207
208		readerCount++;
209
210		// unblock thread
211		thread_unblock_locked(waiter->thread, B_OK);
212		waiter->thread = NULL;
213	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
214
215	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
216		lock->active_readers += readerCount;
217
218	return readerCount;
219}
220
221
222void
223rw_lock_init(rw_lock* lock, const char* name)
224{
225	lock->name = name;
226	lock->waiters = NULL;
227	lock->holder = -1;
228	lock->count = 0;
229	lock->owner_count = 0;
230	lock->active_readers = 0;
231	lock->pending_readers = 0;
232	lock->flags = 0;
233
234	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
235	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
236}
237
238
239void
240rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
241{
242	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
243	lock->waiters = NULL;
244	lock->holder = -1;
245	lock->count = 0;
246	lock->owner_count = 0;
247	lock->active_readers = 0;
248	lock->pending_readers = 0;
249	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
250
251	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
252	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
253}
254
255
256void
257rw_lock_destroy(rw_lock* lock)
258{
259	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
260		? (char*)lock->name : NULL;
261
262	// unblock all waiters
263	InterruptsSpinLocker locker(gSchedulerLock);
264
265#if KDEBUG
266	if (lock->waiters != NULL && thread_get_current_thread_id()
267			!= lock->holder) {
268		panic("rw_lock_destroy(): there are blocking threads, but the caller "
269			"doesn't hold the write lock (%p)", lock);
270
271		locker.Unlock();
272		if (rw_lock_write_lock(lock) != B_OK)
273			return;
274		locker.Lock();
275	}
276#endif
277
278	while (rw_lock_waiter* waiter = lock->waiters) {
279		// dequeue
280		lock->waiters = waiter->next;
281
282		// unblock thread
283		thread_unblock_locked(waiter->thread, B_ERROR);
284	}
285
286	lock->name = NULL;
287
288	locker.Unlock();
289
290	free(name);
291}
292
293
294#if !KDEBUG_RW_LOCK_DEBUG
295
296status_t
297_rw_lock_read_lock(rw_lock* lock)
298{
299	InterruptsSpinLocker locker(gSchedulerLock);
300
301	// We might be the writer ourselves.
302	if (lock->holder == thread_get_current_thread_id()) {
303		lock->owner_count++;
304		return B_OK;
305	}
306
307	// The writer that originally had the lock when we called atomic_add() might
308	// already have gone and another writer could have overtaken us. In this
309	// case the original writer set pending_readers, so we know that we don't
310	// have to wait.
311	if (lock->pending_readers > 0) {
312		lock->pending_readers--;
313
314		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
315			lock->active_readers++;
316
317		return B_OK;
318	}
319
320	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
321
322	// we need to wait
323	return rw_lock_wait(lock, false);
324}
325
326
327status_t
328_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
329	bigtime_t timeout)
330{
331	InterruptsSpinLocker locker(gSchedulerLock);
332
333	// We might be the writer ourselves.
334	if (lock->holder == thread_get_current_thread_id()) {
335		lock->owner_count++;
336		return B_OK;
337	}
338
339	// The writer that originally had the lock when we called atomic_add() might
340	// already have gone and another writer could have overtaken us. In this
341	// case the original writer set pending_readers, so we know that we don't
342	// have to wait.
343	if (lock->pending_readers > 0) {
344		lock->pending_readers--;
345
346		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
347			lock->active_readers++;
348
349		return B_OK;
350	}
351
352	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
353
354	// we need to wait
355
356	// enqueue in waiter list
357	rw_lock_waiter waiter;
358	waiter.thread = thread_get_current_thread();
359	waiter.next = NULL;
360	waiter.writer = false;
361
362	if (lock->waiters != NULL)
363		lock->waiters->last->next = &waiter;
364	else
365		lock->waiters = &waiter;
366
367	lock->waiters->last = &waiter;
368
369	// block
370	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
371	status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout);
372	if (error == B_OK || waiter.thread == NULL) {
373		// We were unblocked successfully -- potentially our unblocker overtook
374		// us after we already failed. In either case, we've got the lock, now.
375		return B_OK;
376	}
377
378	// We failed to get the lock -- dequeue from waiter list.
379	rw_lock_waiter* previous = NULL;
380	rw_lock_waiter* other = lock->waiters;
381	while (other != &waiter) {
382		previous = other;
383		other = other->next;
384	}
385
386	if (previous == NULL) {
387		// we are the first in line
388		lock->waiters = waiter.next;
389		if (lock->waiters != NULL)
390			lock->waiters->last = waiter.last;
391	} else {
392		// one or more other waiters are before us in the queue
393		previous->next = waiter.next;
394		if (lock->waiters->last == &waiter)
395			lock->waiters->last = previous;
396	}
397
398	// Decrement the count. ATM this is all we have to do. There's at least
399	// one writer ahead of us -- otherwise the last writer would have unblocked
400	// us (writers only manipulate the lock data with thread spinlock being
401	// held) -- so our leaving doesn't make a difference to the ones behind us
402	// in the queue.
403	atomic_add(&lock->count, -1);
404
405	return error;
406}
407
408
409void
410_rw_lock_read_unlock(rw_lock* lock, bool schedulerLocked)
411{
412	InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
413
414	// If we're still holding the write lock or if there are other readers,
415	// no-one can be woken up.
416	if (lock->holder == thread_get_current_thread_id()) {
417		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
418		lock->owner_count--;
419		return;
420	}
421
422	if (--lock->active_readers > 0)
423		return;
424
425 	if (lock->active_readers < 0) {
426 		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
427		lock->active_readers = 0;
428 		return;
429 	}
430
431	rw_lock_unblock(lock);
432}
433
434#endif	// !KDEBUG_RW_LOCK_DEBUG
435
436
437status_t
438rw_lock_write_lock(rw_lock* lock)
439{
440	InterruptsSpinLocker locker(gSchedulerLock);
441
442	// If we're already the lock holder, we just need to increment the owner
443	// count.
444	thread_id thread = thread_get_current_thread_id();
445	if (lock->holder == thread) {
446		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
447		return B_OK;
448	}
449
450	// announce our claim
451	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
452
453	if (oldCount == 0) {
454		// No-one else held a read or write lock, so it's ours now.
455		lock->holder = thread;
456		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
457		return B_OK;
458	}
459
460	// We have to wait. If we're the first writer, note the current reader
461	// count.
462	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
463		lock->active_readers = oldCount - lock->pending_readers;
464
465	status_t status = rw_lock_wait(lock, true);
466	if (status == B_OK) {
467		lock->holder = thread;
468		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
469	}
470
471	return status;
472}
473
474
475void
476_rw_lock_write_unlock(rw_lock* lock, bool schedulerLocked)
477{
478	InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
479
480	if (thread_get_current_thread_id() != lock->holder) {
481		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
482			lock);
483		return;
484	}
485
486	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
487
488	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
489	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
490		return;
491
492	// We gave up our last write lock -- clean up and unblock waiters.
493	int32 readerCount = lock->owner_count;
494	lock->holder = -1;
495	lock->owner_count = 0;
496
497	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
498	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
499
500	if (oldCount != 0) {
501		// If writers are waiting, take over our reader count.
502		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
503			lock->active_readers = readerCount;
504			rw_lock_unblock(lock);
505		} else {
506			// No waiting writer, but there are one or more readers. We will
507			// unblock all waiting readers -- that's the easy part -- and must
508			// also make sure that all readers that haven't entered the critical
509			// section yet, won't start to wait. Otherwise a writer overtaking
510			// such a reader will correctly start to wait, but the reader,
511			// seeing the writer count > 0, would also start to wait. We set
512			// pending_readers to the number of readers that are still expected
513			// to enter the critical section.
514			lock->pending_readers = oldCount - readerCount
515				- rw_lock_unblock(lock);
516		}
517	}
518}
519
520
521static int
522dump_rw_lock_info(int argc, char** argv)
523{
524	if (argc < 2) {
525		print_debugger_command_usage(argv[0]);
526		return 0;
527	}
528
529	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
530
531	if (!IS_KERNEL_ADDRESS(lock)) {
532		kprintf("invalid address: %p\n", lock);
533		return 0;
534	}
535
536	kprintf("rw lock %p:\n", lock);
537	kprintf("  name:            %s\n", lock->name);
538	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
539	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
540	kprintf("  active readers   %d\n", lock->active_readers);
541	kprintf("  pending readers  %d\n", lock->pending_readers);
542	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
543	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
544
545	kprintf("  waiting threads:");
546	rw_lock_waiter* waiter = lock->waiters;
547	while (waiter != NULL) {
548		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
549		waiter = waiter->next;
550	}
551	kputs("\n");
552
553	return 0;
554}
555
556
557// #pragma mark -
558
559
560void
561mutex_init(mutex* lock, const char *name)
562{
563	lock->name = name;
564	lock->waiters = NULL;
565#if KDEBUG
566	lock->holder = -1;
567#else
568	lock->count = 0;
569	lock->ignore_unlock_count = 0;
570#endif
571	lock->flags = 0;
572
573	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
574	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
575}
576
577
578void
579mutex_init_etc(mutex* lock, const char *name, uint32 flags)
580{
581	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
582	lock->waiters = NULL;
583#if KDEBUG
584	lock->holder = -1;
585#else
586	lock->count = 0;
587	lock->ignore_unlock_count = 0;
588#endif
589	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
590
591	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
592	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
593}
594
595
596void
597mutex_destroy(mutex* lock)
598{
599	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
600		? (char*)lock->name : NULL;
601
602	// unblock all waiters
603	InterruptsSpinLocker locker(gSchedulerLock);
604
605#if KDEBUG
606	if (lock->waiters != NULL && thread_get_current_thread_id()
607		!= lock->holder) {
608		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
609			"hold the lock (%p)", lock);
610		if (_mutex_lock(lock, true) != B_OK)
611			return;
612	}
613#endif
614
615	while (mutex_waiter* waiter = lock->waiters) {
616		// dequeue
617		lock->waiters = waiter->next;
618
619		// unblock thread
620		thread_unblock_locked(waiter->thread, B_ERROR);
621	}
622
623	lock->name = NULL;
624
625	locker.Unlock();
626
627	free(name);
628}
629
630
631status_t
632mutex_switch_lock(mutex* from, mutex* to)
633{
634	InterruptsSpinLocker locker(gSchedulerLock);
635
636#if !KDEBUG
637	if (atomic_add(&from->count, 1) < -1)
638#endif
639		_mutex_unlock(from, true);
640
641	return mutex_lock_threads_locked(to);
642}
643
644
645status_t
646mutex_switch_from_read_lock(rw_lock* from, mutex* to)
647{
648	InterruptsSpinLocker locker(gSchedulerLock);
649
650#if KDEBUG_RW_LOCK_DEBUG
651	_rw_lock_write_unlock(from, true);
652#else
653	int32 oldCount = atomic_add(&from->count, -1);
654	if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
655		_rw_lock_read_unlock(from, true);
656#endif
657
658	return mutex_lock_threads_locked(to);
659}
660
661
662status_t
663_mutex_lock(mutex* lock, bool schedulerLocked)
664{
665#if KDEBUG
666	if (!gKernelStartup && !schedulerLocked && !are_interrupts_enabled()) {
667		panic("_mutex_lock(): called with interrupts disabled for lock %p",
668			lock);
669	}
670#endif
671
672	// lock only, if !threadsLocked
673	InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
674
675	// Might have been released after we decremented the count, but before
676	// we acquired the spinlock.
677#if KDEBUG
678	if (lock->holder < 0) {
679		lock->holder = thread_get_current_thread_id();
680		return B_OK;
681	} else if (lock->holder == thread_get_current_thread_id()) {
682		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
683			lock->holder);
684	} else if (lock->holder == 0)
685		panic("_mutex_lock(): using unitialized lock %p", lock);
686#else
687	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
688		lock->flags &= ~MUTEX_FLAG_RELEASED;
689		return B_OK;
690	}
691#endif
692
693	// enqueue in waiter list
694	mutex_waiter waiter;
695	waiter.thread = thread_get_current_thread();
696	waiter.next = NULL;
697
698	if (lock->waiters != NULL) {
699		lock->waiters->last->next = &waiter;
700	} else
701		lock->waiters = &waiter;
702
703	lock->waiters->last = &waiter;
704
705	// block
706	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
707	status_t error = thread_block_locked(waiter.thread);
708
709#if KDEBUG
710	if (error == B_OK)
711		lock->holder = waiter.thread->id;
712#endif
713
714	return error;
715}
716
717
718void
719_mutex_unlock(mutex* lock, bool schedulerLocked)
720{
721	// lock only, if !threadsLocked
722	InterruptsSpinLocker locker(gSchedulerLock, false, !schedulerLocked);
723
724#if KDEBUG
725	if (thread_get_current_thread_id() != lock->holder) {
726		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
727			"release mutex %p (current holder %" B_PRId32 ")\n",
728			thread_get_current_thread_id(), lock, lock->holder);
729		return;
730	}
731#else
732	if (lock->ignore_unlock_count > 0) {
733		lock->ignore_unlock_count--;
734		return;
735	}
736#endif
737
738	mutex_waiter* waiter = lock->waiters;
739	if (waiter != NULL) {
740		// dequeue the first waiter
741		lock->waiters = waiter->next;
742		if (lock->waiters != NULL)
743			lock->waiters->last = waiter->last;
744
745		// unblock thread
746		thread_unblock_locked(waiter->thread, B_OK);
747
748#if KDEBUG
749		// Already set the holder to the unblocked thread. Besides that this
750		// actually reflects the current situation, setting it to -1 would
751		// cause a race condition, since another locker could think the lock
752		// is not held by anyone.
753		lock->holder = waiter->thread->id;
754#endif
755	} else {
756		// We've acquired the spinlock before the locker that is going to wait.
757		// Just mark the lock as released.
758#if KDEBUG
759		lock->holder = -1;
760#else
761		lock->flags |= MUTEX_FLAG_RELEASED;
762#endif
763	}
764}
765
766
767status_t
768_mutex_trylock(mutex* lock)
769{
770#if KDEBUG
771	InterruptsSpinLocker _(gSchedulerLock);
772
773	if (lock->holder <= 0) {
774		lock->holder = thread_get_current_thread_id();
775		return B_OK;
776	}
777#endif
778	return B_WOULD_BLOCK;
779}
780
781
782status_t
783_mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
784{
785#if KDEBUG
786	if (!gKernelStartup && !are_interrupts_enabled()) {
787		panic("_mutex_lock(): called with interrupts disabled for lock %p",
788			lock);
789	}
790#endif
791
792	InterruptsSpinLocker locker(gSchedulerLock);
793
794	// Might have been released after we decremented the count, but before
795	// we acquired the spinlock.
796#if KDEBUG
797	if (lock->holder < 0) {
798		lock->holder = thread_get_current_thread_id();
799		return B_OK;
800	} else if (lock->holder == thread_get_current_thread_id()) {
801		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
802			lock->holder);
803	} else if (lock->holder == 0)
804		panic("_mutex_lock(): using unitialized lock %p", lock);
805#else
806	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
807		lock->flags &= ~MUTEX_FLAG_RELEASED;
808		return B_OK;
809	}
810#endif
811
812	// enqueue in waiter list
813	mutex_waiter waiter;
814	waiter.thread = thread_get_current_thread();
815	waiter.next = NULL;
816
817	if (lock->waiters != NULL) {
818		lock->waiters->last->next = &waiter;
819	} else
820		lock->waiters = &waiter;
821
822	lock->waiters->last = &waiter;
823
824	// block
825	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
826	status_t error = thread_block_with_timeout_locked(timeoutFlags, timeout);
827
828	if (error == B_OK) {
829#if KDEBUG
830		lock->holder = waiter.thread->id;
831#endif
832	} else {
833		// If the timeout occurred, we must remove our waiter structure from
834		// the queue.
835		mutex_waiter* previousWaiter = NULL;
836		mutex_waiter* otherWaiter = lock->waiters;
837		while (otherWaiter != NULL && otherWaiter != &waiter) {
838			previousWaiter = otherWaiter;
839			otherWaiter = otherWaiter->next;
840		}
841		if (otherWaiter == &waiter) {
842			// the structure is still in the list -- dequeue
843			if (&waiter == lock->waiters) {
844				if (waiter.next != NULL)
845					waiter.next->last = waiter.last;
846				lock->waiters = waiter.next;
847			} else {
848				if (waiter.next == NULL)
849					lock->waiters->last = previousWaiter;
850				previousWaiter->next = waiter.next;
851			}
852
853#if !KDEBUG
854			// we need to fix the lock count
855			if (atomic_add(&lock->count, 1) == -1) {
856				// This means we were the only thread waiting for the lock and
857				// the lock owner has already called atomic_add() in
858				// mutex_unlock(). That is we probably would get the lock very
859				// soon (if the lock holder has a low priority, that might
860				// actually take rather long, though), but the timeout already
861				// occurred, so we don't try to wait. Just increment the ignore
862				// unlock count.
863				lock->ignore_unlock_count++;
864			}
865#endif
866		}
867	}
868
869	return error;
870}
871
872
873static int
874dump_mutex_info(int argc, char** argv)
875{
876	if (argc < 2) {
877		print_debugger_command_usage(argv[0]);
878		return 0;
879	}
880
881	mutex* lock = (mutex*)parse_expression(argv[1]);
882
883	if (!IS_KERNEL_ADDRESS(lock)) {
884		kprintf("invalid address: %p\n", lock);
885		return 0;
886	}
887
888	kprintf("mutex %p:\n", lock);
889	kprintf("  name:            %s\n", lock->name);
890	kprintf("  flags:           0x%x\n", lock->flags);
891#if KDEBUG
892	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
893#else
894	kprintf("  count:           %" B_PRId32 "\n", lock->count);
895#endif
896
897	kprintf("  waiting threads:");
898	mutex_waiter* waiter = lock->waiters;
899	while (waiter != NULL) {
900		kprintf(" %" B_PRId32, waiter->thread->id);
901		waiter = waiter->next;
902	}
903	kputs("\n");
904
905	return 0;
906}
907
908
909// #pragma mark -
910
911
912void
913lock_debug_init()
914{
915	add_debugger_command_etc("mutex", &dump_mutex_info,
916		"Dump info about a mutex",
917		"<mutex>\n"
918		"Prints info about the specified mutex.\n"
919		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
920	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
921		"Dump info about an rw lock",
922		"<lock>\n"
923		"Prints info about the specified rw lock.\n"
924		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
925}
926