1/*	$OpenBSD: rthread_sync.c,v 1.6 2024/01/10 04:28:43 cheloha Exp $ */
2/*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
5 * All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19/*
20 * Mutexes and conditions - synchronization functions.
21 */
22
23#include <assert.h>
24#include <errno.h>
25#include <pthread.h>
26#include <stdlib.h>
27#include <string.h>
28#include <unistd.h>
29
30#include "rthread.h"
31#include "cancel.h"		/* in libc/include */
32
33static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
34
35/*
36 * mutexen
37 */
38int
39pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
40{
41	struct pthread_mutex *mutex;
42
43	mutex = calloc(1, sizeof(*mutex));
44	if (!mutex)
45		return (errno);
46	mutex->lock = _SPINLOCK_UNLOCKED;
47	TAILQ_INIT(&mutex->lockers);
48	if (attr == NULL) {
49		mutex->type = PTHREAD_MUTEX_DEFAULT;
50		mutex->prioceiling = -1;
51	} else {
52		mutex->type = (*attr)->ma_type;
53		mutex->prioceiling = (*attr)->ma_protocol ==
54		    PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
55	}
56	*mutexp = mutex;
57
58	return (0);
59}
60DEF_STRONG(pthread_mutex_init);
61
62int
63pthread_mutex_destroy(pthread_mutex_t *mutexp)
64{
65	struct pthread_mutex *mutex;
66
67	assert(mutexp);
68	mutex = (struct pthread_mutex *)*mutexp;
69	if (mutex) {
70		if (mutex->count || mutex->owner != NULL ||
71		    !TAILQ_EMPTY(&mutex->lockers)) {
72#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
73			write(2, MSG, sizeof(MSG) - 1);
74#undef MSG
75			return (EBUSY);
76		}
77		free(mutex);
78		*mutexp = NULL;
79	}
80	return (0);
81}
82DEF_STRONG(pthread_mutex_destroy);
83
84static int
85_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
86    const struct timespec *abstime)
87{
88	struct pthread_mutex *mutex;
89	pthread_t self = pthread_self();
90	int ret = 0;
91
92	/*
93	 * If the mutex is statically initialized, perform the dynamic
94	 * initialization. Note: _thread_mutex_lock() in libc requires
95	 * _rthread_mutex_lock() to perform the mutex init when *mutexp
96	 * is NULL.
97	 */
98	if (*mutexp == NULL) {
99		_spinlock(&static_init_lock);
100		if (*mutexp == NULL)
101			ret = pthread_mutex_init(mutexp, NULL);
102		_spinunlock(&static_init_lock);
103		if (ret != 0)
104			return (EINVAL);
105	}
106	mutex = (struct pthread_mutex *)*mutexp;
107
108	_rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
109	_spinlock(&mutex->lock);
110	if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
111		assert(mutex->count == 0);
112		mutex->owner = self;
113	} else if (mutex->owner == self) {
114		assert(mutex->count > 0);
115
116		/* already owner?  handle recursive behavior */
117		if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
118		{
119			if (trywait ||
120			    mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
121				_spinunlock(&mutex->lock);
122				return (trywait ? EBUSY : EDEADLK);
123			}
124
125			/* self-deadlock is disallowed by strict */
126			if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
127			    abstime == NULL)
128				abort();
129
130			/* self-deadlock, possibly until timeout */
131			while (__thrsleep(self, CLOCK_REALTIME, abstime,
132			    &mutex->lock, NULL) != EWOULDBLOCK)
133				_spinlock(&mutex->lock);
134			return (ETIMEDOUT);
135		}
136		if (mutex->count == INT_MAX) {
137			_spinunlock(&mutex->lock);
138			return (EAGAIN);
139		}
140	} else if (trywait) {
141		/* try failed */
142		_spinunlock(&mutex->lock);
143		return (EBUSY);
144	} else {
145		/* add to the wait queue and block until at the head */
146		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
147		while (mutex->owner != self) {
148			ret = __thrsleep(self, CLOCK_REALTIME, abstime,
149			    &mutex->lock, NULL);
150			_spinlock(&mutex->lock);
151			assert(mutex->owner != NULL);
152			if (ret == EWOULDBLOCK) {
153				if (mutex->owner == self)
154					break;
155				TAILQ_REMOVE(&mutex->lockers, self, waiting);
156				_spinunlock(&mutex->lock);
157				return (ETIMEDOUT);
158			}
159		}
160	}
161
162	mutex->count++;
163	_spinunlock(&mutex->lock);
164
165	return (0);
166}
167
168int
169pthread_mutex_lock(pthread_mutex_t *p)
170{
171	return (_rthread_mutex_lock(p, 0, NULL));
172}
173DEF_STRONG(pthread_mutex_lock);
174
175int
176pthread_mutex_trylock(pthread_mutex_t *p)
177{
178	return (_rthread_mutex_lock(p, 1, NULL));
179}
180
181int
182pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime)
183{
184	return (_rthread_mutex_lock(p, 0, abstime));
185}
186
187int
188pthread_mutex_unlock(pthread_mutex_t *mutexp)
189{
190	pthread_t self = pthread_self();
191	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
192
193	_rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
194	    (void *)mutex);
195
196	if (mutex == NULL)
197#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
198		return (EPERM);
199#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
200		return(0);
201#else
202		abort();
203#endif
204
205	if (mutex->owner != self) {
206		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
207		    mutex->type == PTHREAD_MUTEX_RECURSIVE)
208			return (EPERM);
209		else {
210			/*
211			 * For mutex type NORMAL our undefined behavior for
212			 * unlocking an unlocked mutex is to succeed without
213			 * error.  All other undefined behaviors are to
214			 * abort() immediately.
215			 */
216			if (mutex->owner == NULL &&
217			    mutex->type == PTHREAD_MUTEX_NORMAL)
218				return (0);
219			else
220				abort();
221		}
222	}
223
224	if (--mutex->count == 0) {
225		pthread_t next;
226
227		_spinlock(&mutex->lock);
228		mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
229		if (next != NULL)
230			TAILQ_REMOVE(&mutex->lockers, next, waiting);
231		_spinunlock(&mutex->lock);
232		if (next != NULL)
233			__thrwakeup(next, 1);
234	}
235
236	return (0);
237}
238DEF_STRONG(pthread_mutex_unlock);
239
240/*
241 * condition variables
242 */
243int
244pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
245{
246	pthread_cond_t cond;
247
248	cond = calloc(1, sizeof(*cond));
249	if (!cond)
250		return (errno);
251	cond->lock = _SPINLOCK_UNLOCKED;
252	TAILQ_INIT(&cond->waiters);
253	if (attr == NULL)
254		cond->clock = CLOCK_REALTIME;
255	else
256		cond->clock = (*attr)->ca_clock;
257	*condp = cond;
258
259	return (0);
260}
261DEF_STRONG(pthread_cond_init);
262
263int
264pthread_cond_destroy(pthread_cond_t *condp)
265{
266	pthread_cond_t cond;
267
268	assert(condp);
269	cond = *condp;
270	if (cond) {
271		if (!TAILQ_EMPTY(&cond->waiters)) {
272#define MSG "pthread_cond_destroy on condvar with waiters!\n"
273			write(2, MSG, sizeof(MSG) - 1);
274#undef MSG
275			return (EBUSY);
276		}
277		free(cond);
278	}
279	*condp = NULL;
280
281	return (0);
282}
283
284int
285pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
286    const struct timespec *abstime)
287{
288	pthread_cond_t cond;
289	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
290	struct tib *tib = TIB_GET();
291	pthread_t self = tib->tib_thread;
292	pthread_t next;
293	int mutex_count;
294	int canceled = 0;
295	int rv = 0;
296	int error;
297	PREP_CANCEL_POINT(tib);
298
299	if (!*condp)
300		if ((error = pthread_cond_init(condp, NULL)))
301			return (error);
302	cond = *condp;
303	_rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
304	    (void *)cond, (void *)mutex);
305
306	if (mutex == NULL)
307#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
308		return (EPERM);
309#else
310		abort();
311#endif
312
313	if (mutex->owner != self) {
314		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
315			return (EPERM);
316		else
317			abort();
318	}
319
320	if (abstime == NULL || abstime->tv_nsec < 0 ||
321	    abstime->tv_nsec >= 1000000000)
322		return (EINVAL);
323
324	ENTER_DELAYED_CANCEL_POINT(tib, self);
325
326	_spinlock(&cond->lock);
327
328	/* mark the condvar as being associated with this mutex */
329	if (cond->mutex == NULL) {
330		cond->mutex = mutex;
331		assert(TAILQ_EMPTY(&cond->waiters));
332	} else if (cond->mutex != mutex) {
333		assert(cond->mutex == mutex);
334		_spinunlock(&cond->lock);
335		LEAVE_CANCEL_POINT_INNER(tib, 1);
336		return (EINVAL);
337	} else
338		assert(! TAILQ_EMPTY(&cond->waiters));
339
340	/* snag the count in case this is a recursive mutex */
341	mutex_count = mutex->count;
342
343	/* transfer from the mutex queue to the condvar queue */
344	_spinlock(&mutex->lock);
345	self->blocking_cond = cond;
346	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
347	_spinunlock(&cond->lock);
348
349	/* wake the next guy blocked on the mutex */
350	mutex->count = 0;
351	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
352	if (next != NULL) {
353		TAILQ_REMOVE(&mutex->lockers, next, waiting);
354		__thrwakeup(next, 1);
355	}
356
357	/* wait until we're the owner of the mutex again */
358	while (mutex->owner != self) {
359		error = __thrsleep(self, cond->clock, abstime,
360		    &mutex->lock, &self->delayed_cancel);
361
362		/*
363		 * If abstime == NULL, then we're definitely waiting
364		 * on the mutex instead of the condvar, and are
365		 * just waiting for mutex ownership, regardless of
366		 * why we woke up.
367		 */
368		if (abstime == NULL) {
369			_spinlock(&mutex->lock);
370			continue;
371		}
372
373		/*
374		 * If we took a normal signal (not from
375		 * cancellation) then we should just go back to
376		 * sleep without changing state (timeouts, etc).
377		 */
378		if ((error == EINTR || error == ECANCELED) &&
379		    (tib->tib_canceled == 0 ||
380		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
381			_spinlock(&mutex->lock);
382			continue;
383		}
384
385		/*
386		 * The remaining reasons for waking up (normal
387		 * wakeup, timeout, and cancellation) all mean that
388		 * we won't be staying in the condvar queue and
389		 * we'll no longer time out or be cancelable.
390		 */
391		abstime = NULL;
392		LEAVE_CANCEL_POINT_INNER(tib, 0);
393
394		/*
395		 * If we're no longer in the condvar's queue then
396		 * we're just waiting for mutex ownership.  Need
397		 * cond->lock here to prevent race with cond_signal().
398		 */
399		_spinlock(&cond->lock);
400		if (self->blocking_cond == NULL) {
401			_spinunlock(&cond->lock);
402			_spinlock(&mutex->lock);
403			continue;
404		}
405		assert(self->blocking_cond == cond);
406
407		/* if timeout or canceled, make note of that */
408		if (error == EWOULDBLOCK)
409			rv = ETIMEDOUT;
410		else if (error == EINTR)
411			canceled = 1;
412
413		/* transfer between the queues */
414		TAILQ_REMOVE(&cond->waiters, self, waiting);
415		assert(mutex == cond->mutex);
416		if (TAILQ_EMPTY(&cond->waiters))
417			cond->mutex = NULL;
418		self->blocking_cond = NULL;
419		_spinunlock(&cond->lock);
420		_spinlock(&mutex->lock);
421
422		/* mutex unlocked right now? */
423		if (mutex->owner == NULL &&
424		    TAILQ_EMPTY(&mutex->lockers)) {
425			assert(mutex->count == 0);
426			mutex->owner = self;
427			break;
428		}
429		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
430	}
431
432	/* restore the mutex's count */
433	mutex->count = mutex_count;
434	_spinunlock(&mutex->lock);
435
436	LEAVE_CANCEL_POINT_INNER(tib, canceled);
437
438	return (rv);
439}
440
441int
442pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
443{
444	pthread_cond_t cond;
445	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
446	struct tib *tib = TIB_GET();
447	pthread_t self = tib->tib_thread;
448	pthread_t next;
449	int mutex_count;
450	int canceled = 0;
451	int error;
452	PREP_CANCEL_POINT(tib);
453
454	if (!*condp)
455		if ((error = pthread_cond_init(condp, NULL)))
456			return (error);
457	cond = *condp;
458	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
459	    (void *)cond, (void *)mutex);
460
461	if (mutex == NULL)
462#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
463		return (EPERM);
464#else
465		abort();
466#endif
467
468	if (mutex->owner != self) {
469		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
470			return (EPERM);
471		else
472			abort();
473	}
474
475	ENTER_DELAYED_CANCEL_POINT(tib, self);
476
477	_spinlock(&cond->lock);
478
479	/* mark the condvar as being associated with this mutex */
480	if (cond->mutex == NULL) {
481		cond->mutex = mutex;
482		assert(TAILQ_EMPTY(&cond->waiters));
483	} else if (cond->mutex != mutex) {
484		assert(cond->mutex == mutex);
485		_spinunlock(&cond->lock);
486		LEAVE_CANCEL_POINT_INNER(tib, 1);
487		return (EINVAL);
488	} else
489		assert(! TAILQ_EMPTY(&cond->waiters));
490
491	/* snag the count in case this is a recursive mutex */
492	mutex_count = mutex->count;
493
494	/* transfer from the mutex queue to the condvar queue */
495	_spinlock(&mutex->lock);
496	self->blocking_cond = cond;
497	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
498	_spinunlock(&cond->lock);
499
500	/* wake the next guy blocked on the mutex */
501	mutex->count = 0;
502	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
503	if (next != NULL) {
504		TAILQ_REMOVE(&mutex->lockers, next, waiting);
505		__thrwakeup(next, 1);
506	}
507
508	/* wait until we're the owner of the mutex again */
509	while (mutex->owner != self) {
510		error = __thrsleep(self, 0, NULL, &mutex->lock,
511		    &self->delayed_cancel);
512
513		/*
514		 * If we took a normal signal (not from
515		 * cancellation) then we should just go back to
516		 * sleep without changing state (timeouts, etc).
517		 */
518		if ((error == EINTR || error == ECANCELED) &&
519		    (tib->tib_canceled == 0 ||
520		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
521			_spinlock(&mutex->lock);
522			continue;
523		}
524
525		/*
526		 * The remaining reasons for waking up (normal
527		 * wakeup and cancellation) all mean that we won't
528		 * be staying in the condvar queue and we'll no
529		 * longer be cancelable.
530		 */
531		LEAVE_CANCEL_POINT_INNER(tib, 0);
532
533		/*
534		 * If we're no longer in the condvar's queue then
535		 * we're just waiting for mutex ownership.  Need
536		 * cond->lock here to prevent race with cond_signal().
537		 */
538		_spinlock(&cond->lock);
539		if (self->blocking_cond == NULL) {
540			_spinunlock(&cond->lock);
541			_spinlock(&mutex->lock);
542			continue;
543		}
544		assert(self->blocking_cond == cond);
545
546		/* if canceled, make note of that */
547		if (error == EINTR)
548			canceled = 1;
549
550		/* transfer between the queues */
551		TAILQ_REMOVE(&cond->waiters, self, waiting);
552		assert(mutex == cond->mutex);
553		if (TAILQ_EMPTY(&cond->waiters))
554			cond->mutex = NULL;
555		self->blocking_cond = NULL;
556		_spinunlock(&cond->lock);
557		_spinlock(&mutex->lock);
558
559		/* mutex unlocked right now? */
560		if (mutex->owner == NULL &&
561		    TAILQ_EMPTY(&mutex->lockers)) {
562			assert(mutex->count == 0);
563			mutex->owner = self;
564			break;
565		}
566		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
567	}
568
569	/* restore the mutex's count */
570	mutex->count = mutex_count;
571	_spinunlock(&mutex->lock);
572
573	LEAVE_CANCEL_POINT_INNER(tib, canceled);
574
575	return (0);
576}
577
578
579int
580pthread_cond_signal(pthread_cond_t *condp)
581{
582	pthread_cond_t cond;
583	struct pthread_mutex *mutex;
584	pthread_t thread;
585	int wakeup;
586
587	/* uninitialized?  Then there's obviously no one waiting! */
588	if (!*condp)
589		return 0;
590
591	cond = *condp;
592	_rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
593	    (void *)cond, (void *)cond->mutex);
594	_spinlock(&cond->lock);
595	thread = TAILQ_FIRST(&cond->waiters);
596	if (thread == NULL) {
597		assert(cond->mutex == NULL);
598		_spinunlock(&cond->lock);
599		return (0);
600	}
601
602	assert(thread->blocking_cond == cond);
603	TAILQ_REMOVE(&cond->waiters, thread, waiting);
604	thread->blocking_cond = NULL;
605
606	mutex = cond->mutex;
607	assert(mutex != NULL);
608	if (TAILQ_EMPTY(&cond->waiters))
609		cond->mutex = NULL;
610
611	/* link locks to prevent race with timedwait */
612	_spinlock(&mutex->lock);
613	_spinunlock(&cond->lock);
614
615	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
616	if (wakeup)
617		mutex->owner = thread;
618	else
619		TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
620	_spinunlock(&mutex->lock);
621	if (wakeup)
622		__thrwakeup(thread, 1);
623
624	return (0);
625}
626
627int
628pthread_cond_broadcast(pthread_cond_t *condp)
629{
630	pthread_cond_t cond;
631	struct pthread_mutex *mutex;
632	pthread_t thread;
633	pthread_t p;
634	int wakeup;
635
636	/* uninitialized?  Then there's obviously no one waiting! */
637	if (!*condp)
638		return 0;
639
640	cond = *condp;
641	_rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
642	    (void *)cond, (void *)cond->mutex);
643	_spinlock(&cond->lock);
644	thread = TAILQ_FIRST(&cond->waiters);
645	if (thread == NULL) {
646		assert(cond->mutex == NULL);
647		_spinunlock(&cond->lock);
648		return (0);
649	}
650
651	mutex = cond->mutex;
652	assert(mutex != NULL);
653
654	/* walk the list, clearing the "blocked on condvar" pointer */
655	p = thread;
656	do
657		p->blocking_cond = NULL;
658	while ((p = TAILQ_NEXT(p, waiting)) != NULL);
659
660	/*
661	 * We want to transfer all the threads from the condvar's list
662	 * to the mutex's list.  The TAILQ_* macros don't let us do that
663	 * efficiently, so this is direct list surgery.  Pay attention!
664	 */
665
666	/* 1) attach the first thread to the end of the mutex's list */
667	_spinlock(&mutex->lock);
668	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
669	thread->waiting.tqe_prev = mutex->lockers.tqh_last;
670	*(mutex->lockers.tqh_last) = thread;
671
672	/* 2) fix up the end pointer for the mutex's list */
673	mutex->lockers.tqh_last = cond->waiters.tqh_last;
674
675	if (wakeup) {
676		TAILQ_REMOVE(&mutex->lockers, thread, waiting);
677		mutex->owner = thread;
678		_spinunlock(&mutex->lock);
679		__thrwakeup(thread, 1);
680	} else
681		_spinunlock(&mutex->lock);
682
683	/* 3) reset the condvar's list and mutex pointer */
684	TAILQ_INIT(&cond->waiters);
685	assert(cond->mutex != NULL);
686	cond->mutex = NULL;
687	_spinunlock(&cond->lock);
688
689	return (0);
690}
691