1/*
2 * Copyright 2022, Haiku, Inc. All rights reserved.
3 * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7#include <pthread.h>
8
9#include <new>
10
11#include <Debug.h>
12
13#include <AutoLocker.h>
14#include <syscalls.h>
15#include <user_mutex_defs.h>
16#include <user_thread.h>
17#include <util/DoublyLinkedList.h>
18
19#include "pthread_private.h"
20
21#define MAX_READER_COUNT	1000000
22
23#define RWLOCK_FLAG_SHARED	0x01
24
25
26struct Waiter : DoublyLinkedListLinkImpl<Waiter> {
27	Waiter(bool writer)
28		:
29		userThread(get_user_thread()),
30		thread(find_thread(NULL)),
31		writer(writer),
32		queued(false)
33	{
34	}
35
36	user_thread*	userThread;
37	thread_id		thread;
38	status_t		status;
39	bool			writer;
40	bool			queued;
41};
42
43typedef DoublyLinkedList<Waiter> WaiterList;
44
45
46struct SharedRWLock {
47	uint32_t	flags;
48	int32_t		owner;
49	int32_t		sem;
50
51	status_t Init()
52	{
53		flags = RWLOCK_FLAG_SHARED;
54		owner = -1;
55		sem = create_sem(MAX_READER_COUNT, "pthread rwlock");
56
57		return sem >= 0 ? B_OK : EAGAIN;
58	}
59
60	status_t Destroy()
61	{
62		if (sem < 0)
63			return B_BAD_VALUE;
64		return delete_sem(sem) == B_OK ? B_OK : B_BAD_VALUE;
65	}
66
67	status_t ReadLock(uint32 flags, bigtime_t timeout)
68	{
69		return acquire_sem_etc(sem, 1, flags, timeout);
70	}
71
72	status_t WriteLock(uint32 flags, bigtime_t timeout)
73	{
74		status_t error = acquire_sem_etc(sem, MAX_READER_COUNT,
75			flags, timeout);
76		if (error == B_OK)
77			owner = find_thread(NULL);
78		return error;
79	}
80
81	status_t Unlock()
82	{
83		if (find_thread(NULL) == owner) {
84			owner = -1;
85			return release_sem_etc(sem, MAX_READER_COUNT, 0);
86		} else
87			return release_sem(sem);
88	}
89};
90
91
92struct LocalRWLock {
93	uint32_t	flags;
94	int32_t		owner;
95	int32_t		mutex;
96	int32_t		unused;
97	int32_t		reader_count;
98	int32_t		writer_count;
99		// Note, that reader_count and writer_count are not used the same way.
100		// writer_count includes the write lock owner as well as waiting
101		// writers. reader_count includes read lock owners only.
102	WaiterList	waiters;
103
104	status_t Init()
105	{
106		flags = 0;
107		owner = -1;
108		mutex = 0;
109		reader_count = 0;
110		writer_count = 0;
111		new(&waiters) WaiterList;
112
113		return B_OK;
114	}
115
116	status_t Destroy()
117	{
118		Locker locker(this);
119		if (reader_count > 0 || waiters.Head() != NULL || writer_count > 0)
120			return EBUSY;
121		return B_OK;
122	}
123
124	bool StructureLock()
125	{
126		const int32 oldValue = atomic_test_and_set((int32*)&mutex, B_USER_MUTEX_LOCKED, 0);
127		if (oldValue != 0) {
128			status_t status;
129			do {
130				status = _kern_mutex_lock((int32*)&mutex, NULL, 0, 0);
131			} while (status == B_INTERRUPTED);
132
133			if (status != B_OK)
134				return false;
135		}
136		return true;
137	}
138
139	void StructureUnlock()
140	{
141		// Exit critical region: unlock the mutex
142		int32 status = atomic_and((int32*)&mutex,
143			~(int32)B_USER_MUTEX_LOCKED);
144		if ((status & B_USER_MUTEX_WAITING) != 0)
145			_kern_mutex_unblock((int32*)&mutex, 0);
146	}
147
148	status_t ReadLock(uint32 flags, bigtime_t timeout)
149	{
150		Locker locker(this);
151
152		if (writer_count == 0) {
153			reader_count++;
154			return B_OK;
155		}
156
157		return _Wait(false, flags, timeout);
158	}
159
160	status_t WriteLock(uint32 flags, bigtime_t timeout)
161	{
162		Locker locker(this);
163
164		if (reader_count == 0 && writer_count == 0) {
165			writer_count++;
166			owner = find_thread(NULL);
167			return B_OK;
168		}
169
170		return _Wait(true, flags, timeout);
171	}
172
173	status_t Unlock()
174	{
175		Locker locker(this);
176
177		if (find_thread(NULL) == owner) {
178			writer_count--;
179			owner = -1;
180		} else
181			reader_count--;
182
183		_Unblock();
184
185		return B_OK;
186	}
187
188private:
189	status_t _Wait(bool writer, uint32 flags, bigtime_t timeout)
190	{
191		if (timeout == 0)
192			return B_TIMED_OUT;
193
194		if (writer_count == 1 && owner == find_thread(NULL))
195			return EDEADLK;
196
197		Waiter waiter(writer);
198		waiters.Add(&waiter);
199		waiter.queued = true;
200		waiter.userThread->wait_status = 1;
201
202		if (writer)
203			writer_count++;
204
205		StructureUnlock();
206		status_t error = _kern_block_thread(flags, timeout);
207		StructureLock();
208
209		if (!waiter.queued)
210			return waiter.status;
211
212		// we're still queued, which means an error (timeout, interrupt)
213		// occurred
214		waiters.Remove(&waiter);
215
216		if (writer)
217			writer_count--;
218
219		_Unblock();
220
221		return error;
222	}
223
224	void _Unblock()
225	{
226		// Check whether there any waiting threads at all and whether anyone
227		// has the write lock
228		Waiter* waiter = waiters.Head();
229		if (waiter == NULL || owner >= 0)
230			return;
231
232		// writer at head of queue?
233		if (waiter->writer) {
234			if (reader_count == 0) {
235				waiter->status = B_OK;
236				waiter->queued = false;
237				waiters.Remove(waiter);
238				owner = waiter->thread;
239
240				if (waiter->userThread->wait_status > 0)
241					_kern_unblock_thread(waiter->thread, B_OK);
242			}
243			return;
244		}
245
246		// wake up one or more readers -- we unblock more than one reader at
247		// a time to save trips to the kernel
248		while (!waiters.IsEmpty() && !waiters.Head()->writer) {
249			static const int kMaxReaderUnblockCount = 128;
250			thread_id readers[kMaxReaderUnblockCount];
251			int readerCount = 0;
252
253			while (readerCount < kMaxReaderUnblockCount
254					&& (waiter = waiters.Head()) != NULL
255					&& !waiter->writer) {
256				waiter->status = B_OK;
257				waiter->queued = false;
258				waiters.Remove(waiter);
259
260				if (waiter->userThread->wait_status > 0) {
261					readers[readerCount++] = waiter->thread;
262					reader_count++;
263				}
264			}
265
266			if (readerCount > 0)
267				_kern_unblock_threads(readers, readerCount, B_OK);
268		}
269	}
270
271
272	struct Locking {
273		inline bool Lock(LocalRWLock* lockable)
274		{
275			return lockable->StructureLock();
276		}
277
278		inline void Unlock(LocalRWLock* lockable)
279		{
280			lockable->StructureUnlock();
281		}
282	};
283	typedef AutoLocker<LocalRWLock, Locking> Locker;
284};
285
286
287static void inline
288assert_dummy()
289{
290	STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(SharedRWLock));
291	STATIC_ASSERT(sizeof(pthread_rwlock_t) >= sizeof(LocalRWLock));
292}
293
294
295// #pragma mark - public lock functions
296
297
298int
299pthread_rwlock_init(pthread_rwlock_t* lock, const pthread_rwlockattr_t* _attr)
300{
301	pthread_rwlockattr* attr = _attr != NULL ? *_attr : NULL;
302	bool shared = attr != NULL && (attr->flags & RWLOCK_FLAG_SHARED) != 0;
303
304	if (shared)
305		return ((SharedRWLock*)lock)->Init();
306	else
307		return ((LocalRWLock*)lock)->Init();
308}
309
310
311int
312pthread_rwlock_destroy(pthread_rwlock_t* lock)
313{
314	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
315		return ((SharedRWLock*)lock)->Destroy();
316	else
317		return ((LocalRWLock*)lock)->Destroy();
318}
319
320
321int
322pthread_rwlock_rdlock(pthread_rwlock_t* lock)
323{
324	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
325		return ((SharedRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
326	else
327		return ((LocalRWLock*)lock)->ReadLock(0, B_INFINITE_TIMEOUT);
328}
329
330
331int
332pthread_rwlock_tryrdlock(pthread_rwlock_t* lock)
333{
334	status_t error;
335	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
336		error = ((SharedRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
337	else
338		error = ((LocalRWLock*)lock)->ReadLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
339
340	return error == B_TIMED_OUT ? EBUSY : error;
341}
342
343
344int
345pthread_rwlock_clockrdlock(pthread_rwlock_t* lock, clockid_t clock_id,
346            const struct timespec *abstime)
347{
348	bigtime_t timeout = abstime->tv_sec * 1000000LL
349		+ abstime->tv_nsec / 1000LL;
350	uint32 flags = 0;
351	if (timeout >= 0) {
352		switch (clock_id) {
353			case CLOCK_REALTIME:
354				flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
355				break;
356			case CLOCK_MONOTONIC:
357				flags = B_ABSOLUTE_TIMEOUT;
358				break;
359			default:
360				return EINVAL;
361		}
362	}
363
364	status_t error;
365	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
366		error = ((SharedRWLock*)lock)->ReadLock(flags, timeout);
367	else
368		error = ((LocalRWLock*)lock)->ReadLock(flags, timeout);
369
370	return error == B_TIMED_OUT ? EBUSY : error;
371}
372
373
374int
375pthread_rwlock_timedrdlock(pthread_rwlock_t* lock,
376	const struct timespec *abstime)
377{
378	return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, abstime);
379}
380
381
382int
383pthread_rwlock_wrlock(pthread_rwlock_t* lock)
384{
385	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
386		return ((SharedRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
387	else
388		return ((LocalRWLock*)lock)->WriteLock(0, B_INFINITE_TIMEOUT);
389}
390
391
392int
393pthread_rwlock_trywrlock(pthread_rwlock_t* lock)
394{
395	status_t error;
396	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
397		error = ((SharedRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
398	else
399		error = ((LocalRWLock*)lock)->WriteLock(B_ABSOLUTE_REAL_TIME_TIMEOUT, 0);
400
401	return error == B_TIMED_OUT ? EBUSY : error;
402}
403
404
405int
406pthread_rwlock_clockwrlock (pthread_rwlock_t* lock, clockid_t clock_id,
407	const struct timespec *abstime)
408{
409	bigtime_t timeout = abstime->tv_sec * 1000000LL
410		+ abstime->tv_nsec / 1000LL;
411	uint32 flags = 0;
412	if (timeout >= 0) {
413		switch (clock_id) {
414			case CLOCK_REALTIME:
415				flags = B_ABSOLUTE_REAL_TIME_TIMEOUT;
416				break;
417			case CLOCK_MONOTONIC:
418				flags = B_ABSOLUTE_TIMEOUT;
419				break;
420			default:
421				return EINVAL;
422		}
423	}
424
425	status_t error;
426	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
427		error = ((SharedRWLock*)lock)->WriteLock(flags, timeout);
428	else
429		error = ((LocalRWLock*)lock)->WriteLock(flags, timeout);
430
431	return error == B_TIMED_OUT ? EBUSY : error;
432}
433
434
435int
436pthread_rwlock_timedwrlock(pthread_rwlock_t* lock,
437	const struct timespec *abstime)
438{
439	return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, abstime);
440}
441
442
443int
444pthread_rwlock_unlock(pthread_rwlock_t* lock)
445{
446	if ((lock->flags & RWLOCK_FLAG_SHARED) != 0)
447		return ((SharedRWLock*)lock)->Unlock();
448	else
449		return ((LocalRWLock*)lock)->Unlock();
450}
451
452
453// #pragma mark - public attribute functions
454
455
456int
457pthread_rwlockattr_init(pthread_rwlockattr_t* _attr)
458{
459	pthread_rwlockattr* attr = (pthread_rwlockattr*)malloc(
460		sizeof(pthread_rwlockattr));
461	if (attr == NULL)
462		return B_NO_MEMORY;
463
464	attr->flags = 0;
465	*_attr = attr;
466
467	return 0;
468}
469
470
471int
472pthread_rwlockattr_destroy(pthread_rwlockattr_t* _attr)
473{
474	pthread_rwlockattr* attr = *_attr;
475
476	free(attr);
477	return 0;
478}
479
480
481int
482pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* _attr, int* shared)
483{
484	pthread_rwlockattr* attr = *_attr;
485
486	*shared = (attr->flags & RWLOCK_FLAG_SHARED) != 0
487		? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
488	return 0;
489}
490
491
492int
493pthread_rwlockattr_setpshared(pthread_rwlockattr_t* _attr, int shared)
494{
495	pthread_rwlockattr* attr = *_attr;
496
497	if (shared == PTHREAD_PROCESS_SHARED)
498		attr->flags |= RWLOCK_FLAG_SHARED;
499	else
500		attr->flags &= ~RWLOCK_FLAG_SHARED;
501
502	return 0;
503}
504
505