1/*
2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
9
10
11/*! Mutex and recursive_lock code */
12
13
14#include <lock.h>
15
16#include <stdlib.h>
17#include <string.h>
18
19#include <OS.h>
20
21#include <debug.h>
22#include <int.h>
23#include <kernel.h>
24#include <listeners.h>
25#include <scheduling_analysis.h>
26#include <thread.h>
27#include <util/AutoLock.h>
28
29
30struct mutex_waiter {
31	Thread*			thread;
32	mutex_waiter*	next;		// next in queue
33	mutex_waiter*	last;		// last in queue (valid for the first in queue)
34};
35
36struct rw_lock_waiter {
37	Thread*			thread;
38	rw_lock_waiter*	next;		// next in queue
39	rw_lock_waiter*	last;		// last in queue (valid for the first in queue)
40	bool			writer;
41};
42
43#define MUTEX_FLAG_RELEASED		0x2
44
45
46int32
47recursive_lock_get_recursion(recursive_lock *lock)
48{
49	if (RECURSIVE_LOCK_HOLDER(lock) == thread_get_current_thread_id())
50		return lock->recursion;
51
52	return -1;
53}
54
55
56void
57recursive_lock_init(recursive_lock *lock, const char *name)
58{
59	recursive_lock_init_etc(lock, name, 0);
60}
61
62
63void
64recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
65{
66	mutex_init_etc(&lock->lock, name != NULL ? name : "recursive lock", flags);
67#if !KDEBUG
68	lock->holder = -1;
69#endif
70	lock->recursion = 0;
71}
72
73
74void
75recursive_lock_destroy(recursive_lock *lock)
76{
77	if (lock == NULL)
78		return;
79
80	mutex_destroy(&lock->lock);
81}
82
83
84status_t
85recursive_lock_lock(recursive_lock *lock)
86{
87#if KDEBUG
88	if (!gKernelStartup && !are_interrupts_enabled()) {
89		panic("recursive_lock_lock: called with interrupts disabled for lock "
90			"%p (\"%s\")\n", lock, lock->lock.name);
91	}
92#endif
93
94	thread_id thread = thread_get_current_thread_id();
95
96	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
97		mutex_lock(&lock->lock);
98#if !KDEBUG
99		lock->holder = thread;
100#endif
101	}
102
103	lock->recursion++;
104	return B_OK;
105}
106
107
108status_t
109recursive_lock_trylock(recursive_lock *lock)
110{
111	thread_id thread = thread_get_current_thread_id();
112
113#if KDEBUG
114	if (!gKernelStartup && !are_interrupts_enabled()) {
115		panic("recursive_lock_lock: called with interrupts disabled for lock "
116			"%p (\"%s\")\n", lock, lock->lock.name);
117	}
118#endif
119
120	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
121		status_t status = mutex_trylock(&lock->lock);
122		if (status != B_OK)
123			return status;
124
125#if !KDEBUG
126		lock->holder = thread;
127#endif
128	}
129
130	lock->recursion++;
131	return B_OK;
132}
133
134
135void
136recursive_lock_unlock(recursive_lock *lock)
137{
138	if (thread_get_current_thread_id() != RECURSIVE_LOCK_HOLDER(lock))
139		panic("recursive_lock %p unlocked by non-holder thread!\n", lock);
140
141	if (--lock->recursion == 0) {
142#if !KDEBUG
143		lock->holder = -1;
144#endif
145		mutex_unlock(&lock->lock);
146	}
147}
148
149
150status_t
151recursive_lock_switch_lock(recursive_lock* from, recursive_lock* to)
152{
153#if KDEBUG
154	if (!gKernelStartup && !are_interrupts_enabled()) {
155		panic("recursive_lock_switch_lock(): called with interrupts "
156			"disabled for locks %p, %p", from, to);
157	}
158#endif
159
160	if (--from->recursion > 0)
161		return recursive_lock_lock(to);
162
163#if !KDEBUG
164	from->holder = -1;
165#endif
166
167	thread_id thread = thread_get_current_thread_id();
168
169	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
170		to->recursion++;
171		mutex_unlock(&from->lock);
172		return B_OK;
173	}
174
175	status_t status = mutex_switch_lock(&from->lock, &to->lock);
176	if (status != B_OK) {
177		from->recursion++;
178#if !KDEBUG
179		from->holder = thread;
180#endif
181		return status;
182	}
183
184#if !KDEBUG
185	to->holder = thread;
186#endif
187	to->recursion++;
188	return B_OK;
189}
190
191
192status_t
193recursive_lock_switch_from_mutex(mutex* from, recursive_lock* to)
194{
195#if KDEBUG
196	if (!gKernelStartup && !are_interrupts_enabled()) {
197		panic("recursive_lock_switch_from_mutex(): called with interrupts "
198			"disabled for locks %p, %p", from, to);
199	}
200#endif
201
202	thread_id thread = thread_get_current_thread_id();
203
204	if (thread == RECURSIVE_LOCK_HOLDER(to)) {
205		to->recursion++;
206		mutex_unlock(from);
207		return B_OK;
208	}
209
210	status_t status = mutex_switch_lock(from, &to->lock);
211	if (status != B_OK)
212		return status;
213
214#if !KDEBUG
215	to->holder = thread;
216#endif
217	to->recursion++;
218	return B_OK;
219}
220
221
222status_t
223recursive_lock_switch_from_read_lock(rw_lock* from, recursive_lock* to)
224{
225#if KDEBUG
226	if (!gKernelStartup && !are_interrupts_enabled()) {
227		panic("recursive_lock_switch_from_read_lock(): called with interrupts "
228			"disabled for locks %p, %p", from, to);
229	}
230#endif
231
232	thread_id thread = thread_get_current_thread_id();
233
234	if (thread != RECURSIVE_LOCK_HOLDER(to)) {
235		status_t status = mutex_switch_from_read_lock(from, &to->lock);
236		if (status != B_OK)
237			return status;
238
239#if !KDEBUG
240		to->holder = thread;
241#endif
242	} else {
243		rw_lock_read_unlock(from);
244	}
245
246	to->recursion++;
247	return B_OK;
248}
249
250
251static int
252dump_recursive_lock_info(int argc, char** argv)
253{
254	if (argc < 2) {
255		print_debugger_command_usage(argv[0]);
256		return 0;
257	}
258
259	recursive_lock* lock = (recursive_lock*)parse_expression(argv[1]);
260
261	if (!IS_KERNEL_ADDRESS(lock)) {
262		kprintf("invalid address: %p\n", lock);
263		return 0;
264	}
265
266	kprintf("recrusive_lock %p:\n", lock);
267	kprintf("  mutex:           %p\n", &lock->lock);
268	kprintf("  name:            %s\n", lock->lock.name);
269	kprintf("  flags:           0x%x\n", lock->lock.flags);
270#if KDEBUG
271	kprintf("  holder:          %" B_PRId32 "\n", lock->lock.holder);
272#else
273	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
274#endif
275	kprintf("  recursion:       %d\n", lock->recursion);
276
277	kprintf("  waiting threads:");
278	mutex_waiter* waiter = lock->lock.waiters;
279	while (waiter != NULL) {
280		kprintf(" %" B_PRId32, waiter->thread->id);
281		waiter = waiter->next;
282	}
283	kputs("\n");
284
285	return 0;
286}
287
288
289//	#pragma mark -
290
291
292static status_t
293rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker)
294{
295	// enqueue in waiter list
296	rw_lock_waiter waiter;
297	waiter.thread = thread_get_current_thread();
298	waiter.next = NULL;
299	waiter.writer = writer;
300
301	if (lock->waiters != NULL)
302		lock->waiters->last->next = &waiter;
303	else
304		lock->waiters = &waiter;
305
306	lock->waiters->last = &waiter;
307
308	// block
309	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
310	locker.Unlock();
311
312	status_t result = thread_block();
313
314	locker.Lock();
315	return result;
316}
317
318
319static int32
320rw_lock_unblock(rw_lock* lock)
321{
322	// Check whether there are any waiting threads at all and whether anyone
323	// has the write lock.
324	rw_lock_waiter* waiter = lock->waiters;
325	if (waiter == NULL || lock->holder >= 0)
326		return 0;
327
328	// writer at head of queue?
329	if (waiter->writer) {
330		if (lock->active_readers > 0 || lock->pending_readers > 0)
331			return 0;
332
333		// dequeue writer
334		lock->waiters = waiter->next;
335		if (lock->waiters != NULL)
336			lock->waiters->last = waiter->last;
337
338		lock->holder = waiter->thread->id;
339
340		// unblock thread
341		thread_unblock(waiter->thread, B_OK);
342
343		waiter->thread = NULL;
344		return RW_LOCK_WRITER_COUNT_BASE;
345	}
346
347	// wake up one or more readers
348	uint32 readerCount = 0;
349	do {
350		// dequeue reader
351		lock->waiters = waiter->next;
352		if (lock->waiters != NULL)
353			lock->waiters->last = waiter->last;
354
355		readerCount++;
356
357		// unblock thread
358		thread_unblock(waiter->thread, B_OK);
359
360		waiter->thread = NULL;
361	} while ((waiter = lock->waiters) != NULL && !waiter->writer);
362
363	if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
364		lock->active_readers += readerCount;
365
366	return readerCount;
367}
368
369
370void
371rw_lock_init(rw_lock* lock, const char* name)
372{
373	lock->name = name;
374	lock->waiters = NULL;
375	B_INITIALIZE_SPINLOCK(&lock->lock);
376	lock->holder = -1;
377	lock->count = 0;
378	lock->owner_count = 0;
379	lock->active_readers = 0;
380	lock->pending_readers = 0;
381	lock->flags = 0;
382
383	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
384	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
385}
386
387
388void
389rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
390{
391	lock->name = (flags & RW_LOCK_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
392	lock->waiters = NULL;
393	B_INITIALIZE_SPINLOCK(&lock->lock);
394	lock->holder = -1;
395	lock->count = 0;
396	lock->owner_count = 0;
397	lock->active_readers = 0;
398	lock->pending_readers = 0;
399	lock->flags = flags & RW_LOCK_FLAG_CLONE_NAME;
400
401	T_SCHEDULING_ANALYSIS(InitRWLock(lock, name));
402	NotifyWaitObjectListeners(&WaitObjectListener::RWLockInitialized, lock);
403}
404
405
406void
407rw_lock_destroy(rw_lock* lock)
408{
409	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
410		? (char*)lock->name : NULL;
411
412	// unblock all waiters
413	InterruptsSpinLocker locker(lock->lock);
414
415#if KDEBUG
416	if (lock->waiters != NULL && thread_get_current_thread_id()
417			!= lock->holder) {
418		panic("rw_lock_destroy(): there are blocking threads, but the caller "
419			"doesn't hold the write lock (%p)", lock);
420
421		locker.Unlock();
422		if (rw_lock_write_lock(lock) != B_OK)
423			return;
424		locker.Lock();
425	}
426#endif
427
428	while (rw_lock_waiter* waiter = lock->waiters) {
429		// dequeue
430		lock->waiters = waiter->next;
431
432		// unblock thread
433		thread_unblock(waiter->thread, B_ERROR);
434	}
435
436	lock->name = NULL;
437
438	locker.Unlock();
439
440	free(name);
441}
442
443
444#if KDEBUG_RW_LOCK_DEBUG
445
446bool
447_rw_lock_is_read_locked(rw_lock* lock)
448{
449	if (lock->holder == thread_get_current_thread_id())
450		return true;
451
452	Thread* thread = thread_get_current_thread();
453	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
454		if (thread->held_read_locks[i] == lock)
455			return true;
456	}
457	return false;
458}
459
460
461static void
462_rw_lock_set_read_locked(rw_lock* lock)
463{
464	Thread* thread = thread_get_current_thread();
465	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
466		if (thread->held_read_locks[i] != NULL)
467			continue;
468
469		thread->held_read_locks[i] = lock;
470		return;
471	}
472	panic("too many read locks!");
473}
474
475
476static void
477_rw_lock_unset_read_locked(rw_lock* lock)
478{
479	Thread* thread = thread_get_current_thread();
480	for (size_t i = 0; i < B_COUNT_OF(Thread::held_read_locks); i++) {
481		if (thread->held_read_locks[i] != lock)
482			continue;
483
484		thread->held_read_locks[i] = NULL;
485		return;
486	}
487}
488
489#endif
490
491
492status_t
493_rw_lock_read_lock(rw_lock* lock)
494{
495#if KDEBUG
496	if (!gKernelStartup && !are_interrupts_enabled()) {
497		panic("_rw_lock_read_lock(): called with interrupts disabled for lock %p",
498			lock);
499	}
500#endif
501#if KDEBUG_RW_LOCK_DEBUG
502	int32 oldCount = atomic_add(&lock->count, 1);
503	if (oldCount < RW_LOCK_WRITER_COUNT_BASE) {
504		ASSERT_UNLOCKED_RW_LOCK(lock);
505		_rw_lock_set_read_locked(lock);
506		return B_OK;
507	}
508#endif
509
510	InterruptsSpinLocker locker(lock->lock);
511
512	// We might be the writer ourselves.
513	if (lock->holder == thread_get_current_thread_id()) {
514		lock->owner_count++;
515		return B_OK;
516	}
517
518	ASSERT_UNLOCKED_RW_LOCK(lock);
519
520	// The writer that originally had the lock when we called atomic_add() might
521	// already have gone and another writer could have overtaken us. In this
522	// case the original writer set pending_readers, so we know that we don't
523	// have to wait.
524	if (lock->pending_readers > 0) {
525		lock->pending_readers--;
526
527		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
528			lock->active_readers++;
529
530#if KDEBUG_RW_LOCK_DEBUG
531		_rw_lock_set_read_locked(lock);
532#endif
533		return B_OK;
534	}
535
536	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
537
538	// we need to wait
539	status_t status = rw_lock_wait(lock, false, locker);
540
541#if KDEBUG_RW_LOCK_DEBUG
542	if (status == B_OK)
543		_rw_lock_set_read_locked(lock);
544#endif
545
546	return status;
547}
548
549
550status_t
551_rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
552	bigtime_t timeout)
553{
554#if KDEBUG
555	if (!gKernelStartup && !are_interrupts_enabled()) {
556		panic("_rw_lock_read_lock_with_timeout(): called with interrupts "
557			"disabled for lock %p", lock);
558	}
559#endif
560#if KDEBUG_RW_LOCK_DEBUG
561	int32 oldCount = atomic_add(&lock->count, 1);
562	if (oldCount < RW_LOCK_WRITER_COUNT_BASE) {
563		ASSERT_UNLOCKED_RW_LOCK(lock);
564		_rw_lock_set_read_locked(lock);
565		return B_OK;
566	}
567#endif
568
569	InterruptsSpinLocker locker(lock->lock);
570
571	// We might be the writer ourselves.
572	if (lock->holder == thread_get_current_thread_id()) {
573		lock->owner_count++;
574		return B_OK;
575	}
576
577	ASSERT_UNLOCKED_RW_LOCK(lock);
578
579	// The writer that originally had the lock when we called atomic_add() might
580	// already have gone and another writer could have overtaken us. In this
581	// case the original writer set pending_readers, so we know that we don't
582	// have to wait.
583	if (lock->pending_readers > 0) {
584		lock->pending_readers--;
585
586		if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
587			lock->active_readers++;
588
589#if KDEBUG_RW_LOCK_DEBUG
590		_rw_lock_set_read_locked(lock);
591#endif
592		return B_OK;
593	}
594
595	ASSERT(lock->count >= RW_LOCK_WRITER_COUNT_BASE);
596
597	// we need to wait
598
599	// enqueue in waiter list
600	rw_lock_waiter waiter;
601	waiter.thread = thread_get_current_thread();
602	waiter.next = NULL;
603	waiter.writer = false;
604
605	if (lock->waiters != NULL)
606		lock->waiters->last->next = &waiter;
607	else
608		lock->waiters = &waiter;
609
610	lock->waiters->last = &waiter;
611
612	// block
613	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock);
614	locker.Unlock();
615
616	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
617	if (error == B_OK || waiter.thread == NULL) {
618		// We were unblocked successfully -- potentially our unblocker overtook
619		// us after we already failed. In either case, we've got the lock, now.
620#if KDEBUG_RW_LOCK_DEBUG
621		_rw_lock_set_read_locked(lock);
622#endif
623		return B_OK;
624	}
625
626	locker.Lock();
627	// We failed to get the lock -- dequeue from waiter list.
628	rw_lock_waiter* previous = NULL;
629	rw_lock_waiter* other = lock->waiters;
630	while (other != &waiter) {
631		previous = other;
632		other = other->next;
633	}
634
635	if (previous == NULL) {
636		// we are the first in line
637		lock->waiters = waiter.next;
638		if (lock->waiters != NULL)
639			lock->waiters->last = waiter.last;
640	} else {
641		// one or more other waiters are before us in the queue
642		previous->next = waiter.next;
643		if (lock->waiters->last == &waiter)
644			lock->waiters->last = previous;
645	}
646
647	// Decrement the count. ATM this is all we have to do. There's at least
648	// one writer ahead of us -- otherwise the last writer would have unblocked
649	// us (writers only manipulate the lock data with thread spinlock being
650	// held) -- so our leaving doesn't make a difference to the ones behind us
651	// in the queue.
652	atomic_add(&lock->count, -1);
653
654	return error;
655}
656
657
658void
659_rw_lock_read_unlock(rw_lock* lock)
660{
661	InterruptsSpinLocker locker(lock->lock);
662
663	// If we're still holding the write lock or if there are other readers,
664	// no-one can be woken up.
665	if (lock->holder == thread_get_current_thread_id()) {
666		ASSERT(lock->owner_count % RW_LOCK_WRITER_COUNT_BASE > 0);
667		lock->owner_count--;
668		return;
669	}
670
671#if KDEBUG_RW_LOCK_DEBUG
672	_rw_lock_unset_read_locked(lock);
673
674	int32 oldCount = atomic_add(&lock->count, -1);
675	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
676		return;
677#endif
678
679	if (--lock->active_readers > 0)
680		return;
681
682	if (lock->active_readers < 0) {
683		panic("rw_lock_read_unlock(): lock %p not read-locked", lock);
684		lock->active_readers = 0;
685		return;
686	}
687
688	rw_lock_unblock(lock);
689}
690
691
692status_t
693rw_lock_write_lock(rw_lock* lock)
694{
695#if KDEBUG
696	if (!gKernelStartup && !are_interrupts_enabled()) {
697		panic("_rw_lock_write_lock(): called with interrupts disabled for lock %p",
698			lock);
699	}
700#endif
701
702	InterruptsSpinLocker locker(lock->lock);
703
704	// If we're already the lock holder, we just need to increment the owner
705	// count.
706	thread_id thread = thread_get_current_thread_id();
707	if (lock->holder == thread) {
708		lock->owner_count += RW_LOCK_WRITER_COUNT_BASE;
709		return B_OK;
710	}
711
712	ASSERT_UNLOCKED_RW_LOCK(lock);
713
714	// announce our claim
715	int32 oldCount = atomic_add(&lock->count, RW_LOCK_WRITER_COUNT_BASE);
716
717	if (oldCount == 0) {
718		// No-one else held a read or write lock, so it's ours now.
719		lock->holder = thread;
720		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
721		return B_OK;
722	}
723
724	// We have to wait. If we're the first writer, note the current reader
725	// count.
726	if (oldCount < RW_LOCK_WRITER_COUNT_BASE)
727		lock->active_readers = oldCount - lock->pending_readers;
728
729	status_t status = rw_lock_wait(lock, true, locker);
730	if (status == B_OK) {
731		lock->holder = thread;
732		lock->owner_count = RW_LOCK_WRITER_COUNT_BASE;
733	}
734
735	return status;
736}
737
738
739void
740_rw_lock_write_unlock(rw_lock* lock)
741{
742	InterruptsSpinLocker locker(lock->lock);
743
744	if (thread_get_current_thread_id() != lock->holder) {
745		panic("rw_lock_write_unlock(): lock %p not write-locked by this thread",
746			lock);
747		return;
748	}
749
750	ASSERT(lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE);
751
752	lock->owner_count -= RW_LOCK_WRITER_COUNT_BASE;
753	if (lock->owner_count >= RW_LOCK_WRITER_COUNT_BASE)
754		return;
755
756	// We gave up our last write lock -- clean up and unblock waiters.
757	int32 readerCount = lock->owner_count;
758	lock->holder = -1;
759	lock->owner_count = 0;
760
761#if KDEBUG_RW_LOCK_DEBUG
762	if (readerCount != 0)
763		_rw_lock_set_read_locked(lock);
764#endif
765
766	int32 oldCount = atomic_add(&lock->count, -RW_LOCK_WRITER_COUNT_BASE);
767	oldCount -= RW_LOCK_WRITER_COUNT_BASE;
768
769	if (oldCount != 0) {
770		// If writers are waiting, take over our reader count.
771		if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) {
772			lock->active_readers = readerCount;
773			rw_lock_unblock(lock);
774		} else {
775			// No waiting writer, but there are one or more readers. We will
776			// unblock all waiting readers -- that's the easy part -- and must
777			// also make sure that all readers that haven't entered the critical
778			// section yet, won't start to wait. Otherwise a writer overtaking
779			// such a reader will correctly start to wait, but the reader,
780			// seeing the writer count > 0, would also start to wait. We set
781			// pending_readers to the number of readers that are still expected
782			// to enter the critical section.
783			lock->pending_readers = oldCount - readerCount
784				- rw_lock_unblock(lock);
785		}
786	}
787}
788
789
790static int
791dump_rw_lock_info(int argc, char** argv)
792{
793	if (argc < 2) {
794		print_debugger_command_usage(argv[0]);
795		return 0;
796	}
797
798	rw_lock* lock = (rw_lock*)parse_expression(argv[1]);
799
800	if (!IS_KERNEL_ADDRESS(lock)) {
801		kprintf("invalid address: %p\n", lock);
802		return 0;
803	}
804
805	kprintf("rw lock %p:\n", lock);
806	kprintf("  name:            %s\n", lock->name);
807	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
808	kprintf("  count:           %#" B_PRIx32 "\n", lock->count);
809	kprintf("  active readers   %d\n", lock->active_readers);
810	kprintf("  pending readers  %d\n", lock->pending_readers);
811	kprintf("  owner count:     %#" B_PRIx32 "\n", lock->owner_count);
812	kprintf("  flags:           %#" B_PRIx32 "\n", lock->flags);
813
814	kprintf("  waiting threads:");
815	rw_lock_waiter* waiter = lock->waiters;
816	while (waiter != NULL) {
817		kprintf(" %" B_PRId32 "/%c", waiter->thread->id, waiter->writer ? 'w' : 'r');
818		waiter = waiter->next;
819	}
820	kputs("\n");
821
822	return 0;
823}
824
825
826// #pragma mark -
827
828
829void
830mutex_init(mutex* lock, const char *name)
831{
832	mutex_init_etc(lock, name, 0);
833}
834
835
836void
837mutex_init_etc(mutex* lock, const char *name, uint32 flags)
838{
839	lock->name = (flags & MUTEX_FLAG_CLONE_NAME) != 0 ? strdup(name) : name;
840	lock->waiters = NULL;
841	B_INITIALIZE_SPINLOCK(&lock->lock);
842#if KDEBUG
843	lock->holder = -1;
844#else
845	lock->count = 0;
846#endif
847	lock->flags = flags & MUTEX_FLAG_CLONE_NAME;
848
849	T_SCHEDULING_ANALYSIS(InitMutex(lock, name));
850	NotifyWaitObjectListeners(&WaitObjectListener::MutexInitialized, lock);
851}
852
853
854void
855mutex_destroy(mutex* lock)
856{
857	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
858		? (char*)lock->name : NULL;
859
860	// unblock all waiters
861	InterruptsSpinLocker locker(lock->lock);
862
863#if KDEBUG
864	if (lock->holder != -1 && thread_get_current_thread_id() != lock->holder) {
865		panic("mutex_destroy(): the lock (%p) is held by %" B_PRId32 ", not "
866			"by the caller", lock, lock->holder);
867		if (_mutex_lock(lock, &locker) != B_OK)
868			return;
869		locker.Lock();
870	}
871#endif
872
873	while (mutex_waiter* waiter = lock->waiters) {
874		// dequeue
875		lock->waiters = waiter->next;
876
877		// unblock thread
878		Thread* thread = waiter->thread;
879		waiter->thread = NULL;
880		thread_unblock(thread, B_ERROR);
881	}
882
883	lock->name = NULL;
884	lock->flags = 0;
885#if KDEBUG
886	lock->holder = 0;
887#else
888	lock->count = INT16_MIN;
889#endif
890
891	locker.Unlock();
892
893	free(name);
894}
895
896
897static inline status_t
898mutex_lock_threads_locked(mutex* lock, InterruptsSpinLocker* locker)
899{
900#if KDEBUG
901	return _mutex_lock(lock, locker);
902#else
903	if (atomic_add(&lock->count, -1) < 0)
904		return _mutex_lock(lock, locker);
905	return B_OK;
906#endif
907}
908
909
910status_t
911mutex_switch_lock(mutex* from, mutex* to)
912{
913#if KDEBUG
914	if (!gKernelStartup && !are_interrupts_enabled()) {
915		panic("mutex_switch_lock(): called with interrupts disabled "
916			"for locks %p, %p", from, to);
917	}
918#endif
919
920	InterruptsSpinLocker locker(to->lock);
921
922	mutex_unlock(from);
923
924	return mutex_lock_threads_locked(to, &locker);
925}
926
927
928void
929mutex_transfer_lock(mutex* lock, thread_id thread)
930{
931#if KDEBUG
932	if (thread_get_current_thread_id() != lock->holder)
933		panic("mutex_transfer_lock(): current thread is not the lock holder!");
934	lock->holder = thread;
935#endif
936}
937
938
939status_t
940mutex_switch_from_read_lock(rw_lock* from, mutex* to)
941{
942#if KDEBUG
943	if (!gKernelStartup && !are_interrupts_enabled()) {
944		panic("mutex_switch_from_read_lock(): called with interrupts disabled "
945			"for locks %p, %p", from, to);
946	}
947#endif
948
949	InterruptsSpinLocker locker(to->lock);
950
951	rw_lock_read_unlock(from);
952
953	return mutex_lock_threads_locked(to, &locker);
954}
955
956
957status_t
958_mutex_lock(mutex* lock, void* _locker)
959{
960#if KDEBUG
961	if (!gKernelStartup && _locker == NULL && !are_interrupts_enabled()) {
962		panic("_mutex_lock(): called with interrupts disabled for lock %p",
963			lock);
964	}
965#endif
966
967	// lock only, if !lockLocked
968	InterruptsSpinLocker* locker
969		= reinterpret_cast<InterruptsSpinLocker*>(_locker);
970
971	InterruptsSpinLocker lockLocker;
972	if (locker == NULL) {
973		lockLocker.SetTo(lock->lock, false);
974		locker = &lockLocker;
975	}
976
977	// Might have been released after we decremented the count, but before
978	// we acquired the spinlock.
979#if KDEBUG
980	if (lock->holder < 0) {
981		lock->holder = thread_get_current_thread_id();
982		return B_OK;
983	} else if (lock->holder == thread_get_current_thread_id()) {
984		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
985			lock->holder);
986	} else if (lock->holder == 0)
987		panic("_mutex_lock(): using uninitialized lock %p", lock);
988#else
989	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
990		lock->flags &= ~MUTEX_FLAG_RELEASED;
991		return B_OK;
992	}
993#endif
994
995	// enqueue in waiter list
996	mutex_waiter waiter;
997	waiter.thread = thread_get_current_thread();
998	waiter.next = NULL;
999
1000	if (lock->waiters != NULL) {
1001		lock->waiters->last->next = &waiter;
1002	} else
1003		lock->waiters = &waiter;
1004
1005	lock->waiters->last = &waiter;
1006
1007	// block
1008	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1009	locker->Unlock();
1010
1011	status_t error = thread_block();
1012#if KDEBUG
1013	if (error == B_OK) {
1014		ASSERT(lock->holder == waiter.thread->id);
1015	}
1016#endif
1017	return error;
1018}
1019
1020
1021void
1022_mutex_unlock(mutex* lock)
1023{
1024	InterruptsSpinLocker locker(lock->lock);
1025
1026#if KDEBUG
1027	if (thread_get_current_thread_id() != lock->holder) {
1028		panic("_mutex_unlock() failure: thread %" B_PRId32 " is trying to "
1029			"release mutex %p (current holder %" B_PRId32 ")\n",
1030			thread_get_current_thread_id(), lock, lock->holder);
1031		return;
1032	}
1033#endif
1034
1035	mutex_waiter* waiter = lock->waiters;
1036	if (waiter != NULL) {
1037		// dequeue the first waiter
1038		lock->waiters = waiter->next;
1039		if (lock->waiters != NULL)
1040			lock->waiters->last = waiter->last;
1041
1042#if KDEBUG
1043		// Already set the holder to the unblocked thread. Besides that this
1044		// actually reflects the current situation, setting it to -1 would
1045		// cause a race condition, since another locker could think the lock
1046		// is not held by anyone.
1047		lock->holder = waiter->thread->id;
1048#endif
1049
1050		// unblock thread
1051		thread_unblock(waiter->thread, B_OK);
1052	} else {
1053		// There are no waiters, so mark the lock as released.
1054#if KDEBUG
1055		lock->holder = -1;
1056#else
1057		lock->flags |= MUTEX_FLAG_RELEASED;
1058#endif
1059	}
1060}
1061
1062
1063status_t
1064_mutex_trylock(mutex* lock)
1065{
1066#if KDEBUG
1067	InterruptsSpinLocker _(lock->lock);
1068
1069	if (lock->holder < 0) {
1070		lock->holder = thread_get_current_thread_id();
1071		return B_OK;
1072	} else if (lock->holder == 0)
1073		panic("_mutex_trylock(): using uninitialized lock %p", lock);
1074	return B_WOULD_BLOCK;
1075#else
1076	return mutex_trylock(lock);
1077#endif
1078}
1079
1080
1081status_t
1082_mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
1083{
1084#if KDEBUG
1085	if (!gKernelStartup && !are_interrupts_enabled()) {
1086		panic("_mutex_lock(): called with interrupts disabled for lock %p",
1087			lock);
1088	}
1089#endif
1090
1091	InterruptsSpinLocker locker(lock->lock);
1092
1093	// Might have been released after we decremented the count, but before
1094	// we acquired the spinlock.
1095#if KDEBUG
1096	if (lock->holder < 0) {
1097		lock->holder = thread_get_current_thread_id();
1098		return B_OK;
1099	} else if (lock->holder == thread_get_current_thread_id()) {
1100		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
1101			lock->holder);
1102	} else if (lock->holder == 0)
1103		panic("_mutex_lock(): using uninitialized lock %p", lock);
1104#else
1105	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
1106		lock->flags &= ~MUTEX_FLAG_RELEASED;
1107		return B_OK;
1108	}
1109#endif
1110
1111	// enqueue in waiter list
1112	mutex_waiter waiter;
1113	waiter.thread = thread_get_current_thread();
1114	waiter.next = NULL;
1115
1116	if (lock->waiters != NULL) {
1117		lock->waiters->last->next = &waiter;
1118	} else
1119		lock->waiters = &waiter;
1120
1121	lock->waiters->last = &waiter;
1122
1123	// block
1124	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
1125	locker.Unlock();
1126
1127	status_t error = thread_block_with_timeout(timeoutFlags, timeout);
1128
1129	if (error == B_OK) {
1130#if KDEBUG
1131		ASSERT(lock->holder == waiter.thread->id);
1132#endif
1133	} else {
1134		// If the lock was destroyed, our "thread" entry will be NULL.
1135		if (waiter.thread == NULL)
1136			return B_ERROR;
1137
1138		// TODO: There is still a race condition during mutex destruction,
1139		// if we resume due to a timeout before our thread is set to NULL.
1140
1141		locker.Lock();
1142
1143		// If the timeout occurred, we must remove our waiter structure from
1144		// the queue.
1145		mutex_waiter* previousWaiter = NULL;
1146		mutex_waiter* otherWaiter = lock->waiters;
1147		while (otherWaiter != NULL && otherWaiter != &waiter) {
1148			previousWaiter = otherWaiter;
1149			otherWaiter = otherWaiter->next;
1150		}
1151		if (otherWaiter == &waiter) {
1152			// the structure is still in the list -- dequeue
1153			if (&waiter == lock->waiters) {
1154				if (waiter.next != NULL)
1155					waiter.next->last = waiter.last;
1156				lock->waiters = waiter.next;
1157			} else {
1158				if (waiter.next == NULL)
1159					lock->waiters->last = previousWaiter;
1160				previousWaiter->next = waiter.next;
1161			}
1162
1163#if !KDEBUG
1164			// we need to fix the lock count
1165			atomic_add(&lock->count, 1);
1166#endif
1167		} else {
1168			// the structure is not in the list -- even though the timeout
1169			// occurred, this means we own the lock now
1170#if KDEBUG
1171			ASSERT(lock->holder == waiter.thread->id);
1172#endif
1173			return B_OK;
1174		}
1175	}
1176
1177	return error;
1178}
1179
1180
1181static int
1182dump_mutex_info(int argc, char** argv)
1183{
1184	if (argc < 2) {
1185		print_debugger_command_usage(argv[0]);
1186		return 0;
1187	}
1188
1189	mutex* lock = (mutex*)parse_expression(argv[1]);
1190
1191	if (!IS_KERNEL_ADDRESS(lock)) {
1192		kprintf("invalid address: %p\n", lock);
1193		return 0;
1194	}
1195
1196	kprintf("mutex %p:\n", lock);
1197	kprintf("  name:            %s\n", lock->name);
1198	kprintf("  flags:           0x%x\n", lock->flags);
1199#if KDEBUG
1200	kprintf("  holder:          %" B_PRId32 "\n", lock->holder);
1201#else
1202	kprintf("  count:           %" B_PRId32 "\n", lock->count);
1203#endif
1204
1205	kprintf("  waiting threads:");
1206	mutex_waiter* waiter = lock->waiters;
1207	while (waiter != NULL) {
1208		kprintf(" %" B_PRId32, waiter->thread->id);
1209		waiter = waiter->next;
1210	}
1211	kputs("\n");
1212
1213	return 0;
1214}
1215
1216
1217// #pragma mark -
1218
1219
1220void
1221lock_debug_init()
1222{
1223	add_debugger_command_etc("mutex", &dump_mutex_info,
1224		"Dump info about a mutex",
1225		"<mutex>\n"
1226		"Prints info about the specified mutex.\n"
1227		"  <mutex>  - pointer to the mutex to print the info for.\n", 0);
1228	add_debugger_command_etc("rwlock", &dump_rw_lock_info,
1229		"Dump info about an rw lock",
1230		"<lock>\n"
1231		"Prints info about the specified rw lock.\n"
1232		"  <lock>  - pointer to the rw lock to print the info for.\n", 0);
1233	add_debugger_command_etc("recursivelock", &dump_recursive_lock_info,
1234		"Dump info about a recursive lock",
1235		"<lock>\n"
1236		"Prints info about the specified recursive lock.\n"
1237		"  <lock>  - pointer to the recursive lock to print the info for.\n",
1238		0);
1239}
1240