1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * Private thread definitions for the uthread kernel.
30 *
31 * $FreeBSD$
32 */
33
34#ifndef _THR_PRIVATE_H
35#define _THR_PRIVATE_H
36
37/*
38 * Include files.
39 */
40#include <setjmp.h>
41#include <signal.h>
42#include <stdio.h>
43#include <sys/queue.h>
44#include <sys/types.h>
45#include <sys/time.h>
46#include <sys/cdefs.h>
47#include <sys/kse.h>
48#include <sched.h>
49#include <ucontext.h>
50#include <unistd.h>
51#include <pthread.h>
52#include <pthread_np.h>
53
54#ifndef LIBTHREAD_DB
55#include "lock.h"
56#include "pthread_md.h"
57#endif
58
59
60/*
61 * Evaluate the storage class specifier.
62 */
63#ifdef GLOBAL_PTHREAD_PRIVATE
64#define SCLASS
65#define SCLASS_PRESET(x...)	= x
66#else
67#define SCLASS			extern
68#define SCLASS_PRESET(x...)
69#endif
70
71/*
72 * Kernel fatal error handler macro.
73 */
74#define PANIC(string)   _thr_exit(__FILE__, __LINE__, string)
75
76
77/* Output debug messages like this: */
78#ifdef STDOUT_FILENO
79#define stdout_debug(...)	_thread_printf(STDOUT_FILENO, __VA_ARGS__)
80#endif
81#ifdef STDERR_FILENO
82#define stderr_debug(...)	_thread_printf(STDERR_FILENO, __VA_ARGS__)
83#endif
84
85#define	DBG_MUTEX	0x0001
86#define	DBG_SIG		0x0002
87#define	DBG_INFO_DUMP	0x0004
88
89#ifdef _PTHREADS_INVARIANTS
90#define THR_ASSERT(cond, msg) do {	\
91	if (!(cond))			\
92		PANIC(msg);		\
93} while (0)
94#else
95#define THR_ASSERT(cond, msg)
96#endif
97
98/*
99 * State change macro without scheduling queue change:
100 */
101#define THR_SET_STATE(thrd, newstate) do {				\
102	(thrd)->state = newstate;					\
103	(thrd)->fname = __FILE__;					\
104	(thrd)->lineno = __LINE__;					\
105} while (0)
106
107
108#define	TIMESPEC_ADD(dst, src, val)				\
109	do { 							\
110		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
111		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
112		if ((dst)->tv_nsec >= 1000000000) {		\
113			(dst)->tv_sec++;			\
114			(dst)->tv_nsec -= 1000000000;		\
115		}						\
116	} while (0)
117
118#define	TIMESPEC_SUB(dst, src, val)				\
119	do { 							\
120		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
121		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
122		if ((dst)->tv_nsec < 0) {			\
123			(dst)->tv_sec--;			\
124			(dst)->tv_nsec += 1000000000;		\
125		}						\
126	} while (0)
127
128/*
129 * Priority queues.
130 *
131 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
132 */
133typedef struct pq_list {
134	TAILQ_HEAD(, pthread)	pl_head; /* list of threads at this priority */
135	TAILQ_ENTRY(pq_list)	pl_link; /* link for queue of priority lists */
136	int			pl_prio; /* the priority of this list */
137	int			pl_queued; /* is this in the priority queue */
138} pq_list_t;
139
140typedef struct pq_queue {
141	TAILQ_HEAD(, pq_list)	 pq_queue; /* queue of priority lists */
142	pq_list_t		*pq_lists; /* array of all priority lists */
143	int			 pq_size;  /* number of priority lists */
144#define	PQF_ACTIVE	0x0001
145	int			 pq_flags;
146	int			 pq_threads;
147} pq_queue_t;
148
149/*
150 * Each KSEG has a scheduling queue.  For now, threads that exist in their
151 * own KSEG (system scope) will get a full priority queue.  In the future
152 * this can be optimized for the single thread per KSEG case.
153 */
154struct sched_queue {
155	pq_queue_t		sq_runq;
156	TAILQ_HEAD(, pthread)	sq_waitq;	/* waiting in userland */
157};
158
159typedef struct kse_thr_mailbox *kse_critical_t;
160
161struct kse_group;
162
163#define	MAX_KSE_LOCKLEVEL	5
164struct kse {
165	/* -- location and order specific items for gdb -- */
166	struct kcb		*k_kcb;
167	struct pthread		*k_curthread;	/* current thread */
168	struct kse_group	*k_kseg;	/* parent KSEG */
169	struct sched_queue	*k_schedq;	/* scheduling queue */
170	/* -- end of location and order specific items -- */
171	TAILQ_ENTRY(kse)	k_qe;		/* KSE list link entry */
172	TAILQ_ENTRY(kse)	k_kgqe;		/* KSEG's KSE list entry */
173	/*
174	 * Items that are only modified by the kse, or that otherwise
175	 * don't need to be locked when accessed
176	 */
177	struct lock		k_lock;
178	struct lockuser		k_lockusers[MAX_KSE_LOCKLEVEL];
179	int			k_locklevel;
180	stack_t			k_stack;
181	int			k_flags;
182#define	KF_STARTED			0x0001	/* kernel kse created */
183#define	KF_INITIALIZED			0x0002	/* initialized on 1st upcall */
184#define	KF_TERMINATED			0x0004	/* kse is terminated */
185#define	KF_IDLE				0x0008	/* kse is idle */
186#define	KF_SWITCH			0x0010	/* thread switch in UTS */
187	int			k_error;	/* syscall errno in critical */
188	int			k_cpu;		/* CPU ID when bound */
189	int			k_sigseqno;	/* signal buffered count */
190};
191
192#define	KSE_SET_IDLE(kse)	((kse)->k_flags |= KF_IDLE)
193#define	KSE_CLEAR_IDLE(kse)	((kse)->k_flags &= ~KF_IDLE)
194#define	KSE_IS_IDLE(kse)	(((kse)->k_flags & KF_IDLE) != 0)
195#define	KSE_SET_SWITCH(kse)	((kse)->k_flags |= KF_SWITCH)
196#define	KSE_CLEAR_SWITCH(kse)	((kse)->k_flags &= ~KF_SWITCH)
197#define	KSE_IS_SWITCH(kse)	(((kse)->k_flags & KF_SWITCH) != 0)
198
199/*
200 * Each KSE group contains one or more KSEs in which threads can run.
201 * At least for now, there is one scheduling queue per KSE group; KSEs
202 * within the same KSE group compete for threads from the same scheduling
203 * queue.  A scope system thread has one KSE in one KSE group; the group
204 * does not use its scheduling queue.
205 */
206struct kse_group {
207	TAILQ_HEAD(, kse)	kg_kseq;	/* list of KSEs in group */
208	TAILQ_HEAD(, pthread)	kg_threadq;	/* list of threads in group */
209	TAILQ_ENTRY(kse_group)  kg_qe;		/* link entry */
210	struct sched_queue	kg_schedq;	/* scheduling queue */
211	struct lock		kg_lock;
212	int			kg_threadcount;	/* # of assigned threads */
213	int			kg_ksecount;	/* # of assigned KSEs */
214	int			kg_idle_kses;
215	int			kg_flags;
216#define	KGF_SINGLE_THREAD		0x0001	/* scope system kse group */
217#define	KGF_SCHEDQ_INITED		0x0002	/* has an initialized schedq */
218};
219
220/*
221 * Add/remove threads from a KSE's scheduling queue.
222 * For now the scheduling queue is hung off the KSEG.
223 */
224#define	KSEG_THRQ_ADD(kseg, thr)			\
225do {							\
226	TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
227	(kseg)->kg_threadcount++;			\
228} while (0)
229
230#define	KSEG_THRQ_REMOVE(kseg, thr)			\
231do {							\
232	TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle);	\
233	(kseg)->kg_threadcount--;			\
234} while (0)
235
236
237/*
238 * Lock acquire and release for KSEs.
239 */
240#define	KSE_LOCK_ACQUIRE(kse, lck)					\
241do {									\
242	if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) {			\
243		(kse)->k_locklevel++;					\
244		_lock_acquire((lck),					\
245		    &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0);	\
246	}								\
247	else 								\
248		PANIC("Exceeded maximum lock level");			\
249} while (0)
250
251#define	KSE_LOCK_RELEASE(kse, lck)					\
252do {									\
253	if ((kse)->k_locklevel > 0) {					\
254		_lock_release((lck),					\
255		    &(kse)->k_lockusers[(kse)->k_locklevel - 1]);	\
256		(kse)->k_locklevel--;					\
257	}								\
258} while (0)
259
260/*
261 * Lock our own KSEG.
262 */
263#define	KSE_LOCK(curkse)		\
264	KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
265#define	KSE_UNLOCK(curkse)		\
266	KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
267
268/*
269 * Lock a potentially different KSEG.
270 */
271#define	KSE_SCHED_LOCK(curkse, kseg)	\
272	KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
273#define	KSE_SCHED_UNLOCK(curkse, kseg)	\
274	KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
275
276/*
277 * Waiting queue manipulation macros (using pqe link):
278 */
279#define KSE_WAITQ_REMOVE(kse, thrd) \
280do { \
281	if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
282		TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
283		(thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
284	} \
285} while (0)
286#define KSE_WAITQ_INSERT(kse, thrd)	kse_waitq_insert(thrd)
287#define	KSE_WAITQ_FIRST(kse)		TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
288
289#define	KSE_WAKEUP(kse)		kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
290
291/*
292 * TailQ initialization values.
293 */
294#define TAILQ_INITIALIZER	{ NULL, NULL }
295
296/*
297 * lock initialization values.
298 */
299#define	LCK_INITIALIZER		{ NULL, NULL, LCK_DEFAULT }
300
301struct pthread_mutex {
302	/*
303	 * Lock for accesses to this structure.
304	 */
305	struct lock			m_lock;
306	enum pthread_mutextype		m_type;
307	int				m_protocol;
308	TAILQ_HEAD(mutex_head, pthread)	m_queue;
309	struct pthread			*m_owner;
310	long				m_flags;
311	int				m_count;
312	int				m_refcount;
313
314	/*
315	 * Used for priority inheritence and protection.
316	 *
317	 *   m_prio       - For priority inheritence, the highest active
318	 *                  priority (threads locking the mutex inherit
319	 *                  this priority).  For priority protection, the
320	 *                  ceiling priority of this mutex.
321	 *   m_saved_prio - mutex owners inherited priority before
322	 *                  taking the mutex, restored when the owner
323	 *                  unlocks the mutex.
324	 */
325	int				m_prio;
326	int				m_saved_prio;
327
328	/*
329	 * Link for list of all mutexes a thread currently owns.
330	 */
331	TAILQ_ENTRY(pthread_mutex)	m_qe;
332};
333
334/*
335 * Flags for mutexes.
336 */
337#define MUTEX_FLAGS_PRIVATE	0x01
338#define MUTEX_FLAGS_INITED	0x02
339#define MUTEX_FLAGS_BUSY	0x04
340
341/*
342 * Static mutex initialization values.
343 */
344#define PTHREAD_MUTEX_STATIC_INITIALIZER				\
345	{ LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE,	\
346	TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0,	\
347	TAILQ_INITIALIZER }
348
349struct pthread_mutex_attr {
350	enum pthread_mutextype	m_type;
351	int			m_protocol;
352	int			m_ceiling;
353	long			m_flags;
354};
355
356#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
357	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
358
359/*
360 * Condition variable definitions.
361 */
362enum pthread_cond_type {
363	COND_TYPE_FAST,
364	COND_TYPE_MAX
365};
366
367struct pthread_cond {
368	/*
369	 * Lock for accesses to this structure.
370	 */
371	struct lock			c_lock;
372	enum pthread_cond_type		c_type;
373	TAILQ_HEAD(cond_head, pthread)	c_queue;
374	struct pthread_mutex		*c_mutex;
375	long				c_flags;
376	long				c_seqno;
377};
378
379struct pthread_cond_attr {
380	enum pthread_cond_type	c_type;
381	long			c_flags;
382};
383
384struct pthread_barrier {
385	pthread_mutex_t	b_lock;
386	pthread_cond_t	b_cond;
387	int		b_count;
388	int		b_waiters;
389	int		b_generation;
390};
391
392struct pthread_barrierattr {
393	int		pshared;
394};
395
396struct pthread_spinlock {
397	volatile int	s_lock;
398	pthread_t	s_owner;
399};
400
401/*
402 * Flags for condition variables.
403 */
404#define COND_FLAGS_PRIVATE	0x01
405#define COND_FLAGS_INITED	0x02
406#define COND_FLAGS_BUSY		0x04
407
408/*
409 * Static cond initialization values.
410 */
411#define PTHREAD_COND_STATIC_INITIALIZER				\
412	{ LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER,	\
413	NULL, NULL, 0, 0 }
414
415/*
416 * Cleanup definitions.
417 */
418struct pthread_cleanup {
419	struct pthread_cleanup	*next;
420	void			(*routine) (void *);
421	void			*routine_arg;
422	int			onstack;
423};
424
425#define	THR_CLEANUP_PUSH(td, func, arg) {		\
426	struct pthread_cleanup __cup;			\
427							\
428	__cup.routine = func;				\
429	__cup.routine_arg = arg;			\
430	__cup.onstack = 1;				\
431	__cup.next = (td)->cleanup;			\
432	(td)->cleanup = &__cup;
433
434#define	THR_CLEANUP_POP(td, exec)			\
435	(td)->cleanup = __cup.next;			\
436	if ((exec) != 0)				\
437		__cup.routine(__cup.routine_arg);	\
438}
439
440struct pthread_atfork {
441	TAILQ_ENTRY(pthread_atfork) qe;
442	void (*prepare)(void);
443	void (*parent)(void);
444	void (*child)(void);
445};
446
447struct pthread_attr {
448	int	sched_policy;
449	int	sched_inherit;
450	int	sched_interval;
451	int	prio;
452	int	suspend;
453#define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
454#define	THR_SIGNAL_THREAD	0x200	/* This is a signal thread */
455	int	flags;
456	void	*arg_attr;
457	void	(*cleanup_attr) (void *);
458	void	*stackaddr_attr;
459	size_t	stacksize_attr;
460	size_t	guardsize_attr;
461};
462
463/*
464 * Thread creation state attributes.
465 */
466#define THR_CREATE_RUNNING		0
467#define THR_CREATE_SUSPENDED		1
468
469/*
470 * Miscellaneous definitions.
471 */
472#define THR_STACK32_DEFAULT			(1 * 1024 * 1024)
473#define THR_STACK64_DEFAULT			(2 * 1024 * 1024)
474
475/*
476 * Maximum size of initial thread's stack.  This perhaps deserves to be larger
477 * than the stacks of other threads, since many applications are likely to run
478 * almost entirely on this stack.
479 */
480#define THR_STACK32_INITIAL			(2 * 1024 * 1024)
481#define THR_STACK64_INITIAL			(4 * 1024 * 1024)
482
483/*
484 * Define the different priority ranges.  All applications have thread
485 * priorities constrained within 0-31.  The threads library raises the
486 * priority when delivering signals in order to ensure that signal
487 * delivery happens (from the POSIX spec) "as soon as possible".
488 * In the future, the threads library will also be able to map specific
489 * threads into real-time (cooperating) processes or kernel threads.
490 * The RT and SIGNAL priorities will be used internally and added to
491 * thread base priorities so that the scheduling queue can handle both
492 * normal and RT priority threads with and without signal handling.
493 *
494 * The approach taken is that, within each class, signal delivery
495 * always has priority over thread execution.
496 */
497#define THR_DEFAULT_PRIORITY			15
498#define THR_MIN_PRIORITY			0
499#define THR_MAX_PRIORITY			31	/* 0x1F */
500#define THR_SIGNAL_PRIORITY			32	/* 0x20 */
501#define THR_RT_PRIORITY				64	/* 0x40 */
502#define THR_FIRST_PRIORITY			THR_MIN_PRIORITY
503#define THR_LAST_PRIORITY	\
504	(THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
505#define THR_BASE_PRIORITY(prio)	((prio) & THR_MAX_PRIORITY)
506
507/*
508 * Clock resolution in microseconds.
509 */
510#define CLOCK_RES_USEC				10000
511
512/*
513 * Time slice period in microseconds.
514 */
515#define TIMESLICE_USEC				20000
516
517/*
518 * XXX - Define a thread-safe macro to get the current time of day
519 *       which is updated at regular intervals by something.
520 *
521 * For now, we just make the system call to get the time.
522 */
523#define	KSE_GET_TOD(curkse, tsp) \
524do {							\
525	*tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday;	\
526	if ((tsp)->tv_sec == 0)				\
527		clock_gettime(CLOCK_REALTIME, tsp);	\
528} while (0)
529
530struct pthread_rwlockattr {
531	int		pshared;
532};
533
534struct pthread_rwlock {
535	pthread_mutex_t	lock;	/* monitor lock */
536	pthread_cond_t	read_signal;
537	pthread_cond_t	write_signal;
538	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
539	int		blocked_writers;
540};
541
542/*
543 * Thread states.
544 */
545enum pthread_state {
546	PS_RUNNING,
547	PS_LOCKWAIT,
548	PS_MUTEX_WAIT,
549	PS_COND_WAIT,
550	PS_SLEEP_WAIT,
551	PS_SIGSUSPEND,
552	PS_SIGWAIT,
553	PS_JOIN,
554	PS_SUSPENDED,
555	PS_DEAD,
556	PS_DEADLOCK,
557	PS_STATE_MAX
558};
559
560struct sigwait_data {
561	sigset_t	*waitset;
562	siginfo_t	*siginfo;	/* used to save siginfo for sigwaitinfo() */
563};
564
565union pthread_wait_data {
566	pthread_mutex_t	mutex;
567	pthread_cond_t	cond;
568	struct lock	*lock;
569	struct sigwait_data *sigwait;
570};
571
572/*
573 * Define a continuation routine that can be used to perform a
574 * transfer of control:
575 */
576typedef void	(*thread_continuation_t) (void *);
577
578/*
579 * This stores a thread's state prior to running a signal handler.
580 * It is used when a signal is delivered to a thread blocked in
581 * userland.  If the signal handler returns normally, the thread's
582 * state is restored from here.
583 */
584struct pthread_sigframe {
585	int			psf_valid;
586	int			psf_flags;
587	int			psf_cancelflags;
588	int			psf_interrupted;
589	int			psf_timeout;
590	int			psf_signo;
591	enum pthread_state	psf_state;
592	union pthread_wait_data psf_wait_data;
593	struct timespec		psf_wakeup_time;
594	sigset_t		psf_sigset;
595	sigset_t		psf_sigmask;
596	int			psf_seqno;
597	thread_continuation_t	psf_continuation;
598};
599
600struct join_status {
601	struct pthread	*thread;
602	void		*ret;
603	int		error;
604};
605
606struct pthread_specific_elem {
607	void	*data;
608	int	seqno;
609};
610
611typedef void (*const_key_destructor_t)(const void *);
612typedef void (*key_destructor_t)(void *);
613
614struct pthread_key {
615	volatile int	allocated;
616	volatile int	count;
617	int		seqno;
618	key_destructor_t destructor;
619};
620
621#define	MAX_THR_LOCKLEVEL	5
622/*
623 * Thread structure.
624 */
625struct pthread {
626	/* Thread control block */
627	struct tcb		*tcb;
628
629	/*
630	 * Magic value to help recognize a valid thread structure
631	 * from an invalid one:
632	 */
633#define	THR_MAGIC		((u_int32_t) 0xd09ba115)
634	u_int32_t		magic;
635	char			*name;
636	u_int64_t		uniqueid; /* for gdb */
637
638	/* Queue entry for list of all threads: */
639	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
640	TAILQ_ENTRY(pthread)	kle;	/* link for all threads in KSE/KSEG */
641
642	/* Queue entry for GC lists: */
643	TAILQ_ENTRY(pthread)	gcle;
644
645	/* Hash queue entry */
646	LIST_ENTRY(pthread)	hle;
647
648	/*
649	 * Lock for accesses to this thread structure.
650	 */
651	struct lock		lock;
652	struct lockuser		lockusers[MAX_THR_LOCKLEVEL];
653	int			locklevel;
654	kse_critical_t		critical[MAX_KSE_LOCKLEVEL];
655	struct kse		*kse;
656	struct kse_group	*kseg;
657
658	/*
659	 * Thread start routine, argument, stack pointer and thread
660	 * attributes.
661	 */
662	void			*(*start_routine)(void *);
663	void			*arg;
664	struct pthread_attr	attr;
665
666	int			active;		/* thread running */
667	int			blocked;	/* thread blocked in kernel */
668	int			need_switchout;
669
670	/*
671	 * Used for tracking delivery of signal handlers.
672	 */
673	siginfo_t		*siginfo;
674	thread_continuation_t	sigbackout;
675
676 	/*
677	 * Cancelability flags - the lower 2 bits are used by cancel
678	 * definitions in pthread.h
679	 */
680#define THR_AT_CANCEL_POINT		0x0004
681#define THR_CANCELLING			0x0008
682#define THR_CANCEL_NEEDED		0x0010
683	int			cancelflags;
684
685	thread_continuation_t	continuation;
686
687	/*
688	 * The thread's base and pending signal masks.  The active
689	 * signal mask is stored in the thread's context (in mailbox).
690	 */
691	sigset_t		sigmask;
692	sigset_t		sigpend;
693	sigset_t		*oldsigmask;
694	volatile int		check_pending;
695	int			refcount;
696
697	/* Thread state: */
698	enum pthread_state	state;
699	volatile int		lock_switch;
700
701	/*
702	 * Number of microseconds accumulated by this thread when
703	 * time slicing is active.
704	 */
705	long			slice_usec;
706
707	/*
708	 * Time to wake up thread. This is used for sleeping threads and
709	 * for any operation which may time out (such as select).
710	 */
711	struct timespec		wakeup_time;
712
713	/* TRUE if operation has timed out. */
714	int			timeout;
715
716	/*
717	 * Error variable used instead of errno. The function __error()
718	 * returns a pointer to this.
719	 */
720	int			error;
721
722	/*
723	 * The joiner is the thread that is joining to this thread.  The
724	 * join status keeps track of a join operation to another thread.
725	 */
726	struct pthread		*joiner;
727	struct join_status	join_status;
728
729	/*
730	 * The current thread can belong to only one scheduling queue at
731	 * a time (ready or waiting queue).  It can also belong to:
732	 *
733	 *   o A queue of threads waiting for a mutex
734	 *   o A queue of threads waiting for a condition variable
735	 *
736	 * It is possible for a thread to belong to more than one of the
737	 * above queues if it is handling a signal.  A thread may only
738	 * enter a mutex or condition variable queue when it is not
739	 * being called from a signal handler.  If a thread is a member
740	 * of one of these queues when a signal handler is invoked, it
741	 * must be removed from the queue before invoking the handler
742	 * and then added back to the queue after return from the handler.
743	 *
744	 * Use pqe for the scheduling queue link (both ready and waiting),
745	 * sqe for synchronization (mutex, condition variable, and join)
746	 * queue links, and qe for all other links.
747	 */
748	TAILQ_ENTRY(pthread)	pqe;	/* priority, wait queues link */
749	TAILQ_ENTRY(pthread)	sqe;	/* synchronization queue link */
750
751	/* Wait data. */
752	union pthread_wait_data data;
753
754	/*
755	 * Set to TRUE if a blocking operation was
756	 * interrupted by a signal:
757	 */
758	int			interrupted;
759
760	/*
761	 * Set to non-zero when this thread has entered a critical
762	 * region.  We allow for recursive entries into critical regions.
763	 */
764	int			critical_count;
765
766	/*
767	 * Set to TRUE if this thread should yield after leaving a
768	 * critical region to check for signals, messages, etc.
769	 */
770	int			critical_yield;
771
772	int			sflags;
773#define THR_FLAGS_IN_SYNCQ	0x0001
774
775	/* Miscellaneous flags; only set with scheduling lock held. */
776	int			flags;
777#define THR_FLAGS_PRIVATE	0x0001
778#define THR_FLAGS_IN_WAITQ	0x0002	/* in waiting queue using pqe link */
779#define THR_FLAGS_IN_RUNQ	0x0004	/* in run queue using pqe link */
780#define	THR_FLAGS_EXITING	0x0008	/* thread is exiting */
781#define	THR_FLAGS_SUSPENDED	0x0010	/* thread is suspended */
782
783	/* Thread list flags; only set with thread list lock held. */
784#define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
785#define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
786#define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
787	int			tlflags;
788
789	/*
790	 * Base priority is the user setable and retrievable priority
791	 * of the thread.  It is only affected by explicit calls to
792	 * set thread priority and upon thread creation via a thread
793	 * attribute or default priority.
794	 */
795	char			base_priority;
796
797	/*
798	 * Inherited priority is the priority a thread inherits by
799	 * taking a priority inheritence or protection mutex.  It
800	 * is not affected by base priority changes.  Inherited
801	 * priority defaults to and remains 0 until a mutex is taken
802	 * that is being waited on by any other thread whose priority
803	 * is non-zero.
804	 */
805	char			inherited_priority;
806
807	/*
808	 * Active priority is always the maximum of the threads base
809	 * priority and inherited priority.  When there is a change
810	 * in either the base or inherited priority, the active
811	 * priority must be recalculated.
812	 */
813	char			active_priority;
814
815	/* Number of priority ceiling or protection mutexes owned. */
816	int			priority_mutex_count;
817
818	/* Number rwlocks rdlocks held. */
819	int			rdlock_count;
820
821	/*
822	 * Queue of currently owned mutexes.
823	 */
824	TAILQ_HEAD(, pthread_mutex)	mutexq;
825
826	void				*ret;
827	struct pthread_specific_elem	*specific;
828	int				specific_data_count;
829
830	/* Alternative stack for sigaltstack() */
831	stack_t				sigstk;
832
833	/*
834	 * Current locks bitmap for rtld.
835	 */
836	int	rtld_bits;
837
838	/* Cleanup handlers Link List */
839	struct pthread_cleanup *cleanup;
840	const char		*fname;	/* Ptr to source file name  */
841	int			lineno;	/* Source line number.      */
842};
843
844/*
845 * Critical regions can also be detected by looking at the threads
846 * current lock level.  Ensure these macros increment and decrement
847 * the lock levels such that locks can not be held with a lock level
848 * of 0.
849 */
850#define	THR_IN_CRITICAL(thrd)					\
851	(((thrd)->locklevel > 0) ||				\
852	((thrd)->critical_count > 0))
853
854#define	THR_YIELD_CHECK(thrd)					\
855do {								\
856	if (!THR_IN_CRITICAL(thrd)) {				\
857		if (__predict_false(_libkse_debug))		\
858			_thr_debug_check_yield(thrd);		\
859		if ((thrd)->critical_yield != 0)		\
860			_thr_sched_switch(thrd);		\
861		if ((thrd)->check_pending != 0) 		\
862			_thr_sig_check_pending(thrd);		\
863	}							\
864} while (0)
865
866#define	THR_LOCK_ACQUIRE(thrd, lck)				\
867do {								\
868	if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) {		\
869		THR_DEACTIVATE_LAST_LOCK(thrd);			\
870		(thrd)->locklevel++;				\
871		_lock_acquire((lck),				\
872		    &(thrd)->lockusers[(thrd)->locklevel - 1],	\
873		    (thrd)->active_priority);			\
874	} else 							\
875		PANIC("Exceeded maximum lock level");		\
876} while (0)
877
878#define	THR_LOCK_RELEASE(thrd, lck)				\
879do {								\
880	if ((thrd)->locklevel > 0) {				\
881		_lock_release((lck),				\
882		    &(thrd)->lockusers[(thrd)->locklevel - 1]);	\
883		(thrd)->locklevel--;				\
884		THR_ACTIVATE_LAST_LOCK(thrd);			\
885		if ((thrd)->locklevel == 0)			\
886			THR_YIELD_CHECK(thrd);			\
887	}							\
888} while (0)
889
890#define THR_ACTIVATE_LAST_LOCK(thrd)					\
891do {									\
892	if ((thrd)->locklevel > 0)					\
893		_lockuser_setactive(					\
894		    &(thrd)->lockusers[(thrd)->locklevel - 1], 1);	\
895} while (0)
896
897#define	THR_DEACTIVATE_LAST_LOCK(thrd)					\
898do {									\
899	if ((thrd)->locklevel > 0)					\
900		_lockuser_setactive(					\
901		    &(thrd)->lockusers[(thrd)->locklevel - 1], 0);	\
902} while (0)
903
904/*
905 * For now, threads will have their own lock separate from their
906 * KSE scheduling lock.
907 */
908#define	THR_LOCK(thr)			THR_LOCK_ACQUIRE(thr, &(thr)->lock)
909#define	THR_UNLOCK(thr)			THR_LOCK_RELEASE(thr, &(thr)->lock)
910#define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
911#define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
912
913/*
914 * Priority queue manipulation macros (using pqe link).  We use
915 * the thread's kseg link instead of the kse link because a thread
916 * does not (currently) have a statically assigned kse.
917 */
918#define THR_RUNQ_INSERT_HEAD(thrd)	\
919	_pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
920#define THR_RUNQ_INSERT_TAIL(thrd)	\
921	_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
922#define THR_RUNQ_REMOVE(thrd)		\
923	_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
924
925/*
926 * Macros to insert/remove threads to the all thread list and
927 * the gc list.
928 */
929#define	THR_LIST_ADD(thrd) do {					\
930	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
931		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
932		_thr_hash_add(thrd);				\
933		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
934	}							\
935} while (0)
936#define	THR_LIST_REMOVE(thrd) do {				\
937	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
938		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
939		_thr_hash_remove(thrd);				\
940		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
941	}							\
942} while (0)
943#define	THR_GCLIST_ADD(thrd) do {				\
944	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
945		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
946		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
947		_gc_count++;					\
948	}							\
949} while (0)
950#define	THR_GCLIST_REMOVE(thrd) do {				\
951	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
952		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
953		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
954		_gc_count--;					\
955	}							\
956} while (0)
957
958#define GC_NEEDED()	(atomic_load_acq_int(&_gc_count) >= 5)
959
960/*
961 * Locking the scheduling queue for another thread uses that thread's
962 * KSEG lock.
963 */
964#define	THR_SCHED_LOCK(curthr, thr) do {		\
965	(curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
966	(curthr)->locklevel++;				\
967	KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg);	\
968} while (0)
969
970#define	THR_SCHED_UNLOCK(curthr, thr) do {		\
971	KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg);	\
972	(curthr)->locklevel--;				\
973	_kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
974} while (0)
975
976/* Take the scheduling lock with the intent to call the scheduler. */
977#define	THR_LOCK_SWITCH(curthr) do {			\
978	(void)_kse_critical_enter();			\
979	KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg);	\
980} while (0)
981#define	THR_UNLOCK_SWITCH(curthr) do {			\
982	KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
983} while (0)
984
985#define	THR_CRITICAL_ENTER(thr)		(thr)->critical_count++
986#define	THR_CRITICAL_LEAVE(thr)	do {		\
987	(thr)->critical_count--;		\
988	if (((thr)->critical_yield != 0) &&	\
989	    ((thr)->critical_count == 0)) {	\
990		(thr)->critical_yield = 0;	\
991		_thr_sched_switch(thr);		\
992	}					\
993} while (0)
994
995#define	THR_IS_ACTIVE(thrd) \
996	((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
997
998#define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
999
1000#define	THR_IS_SUSPENDED(thrd) \
1001	(((thrd)->state == PS_SUSPENDED) || \
1002	(((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
1003#define	THR_IS_EXITING(thrd)	(((thrd)->flags & THR_FLAGS_EXITING) != 0)
1004#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
1005	TMDF_SUSPEND) == 0)
1006
1007extern int __isthreaded;
1008
1009static inline int
1010_kse_isthreaded(void)
1011{
1012	return (__isthreaded != 0);
1013}
1014
1015/*
1016 * Global variables for the pthread kernel.
1017 */
1018
1019SCLASS void		*_usrstack	SCLASS_PRESET(NULL);
1020SCLASS struct kse	*_kse_initial	SCLASS_PRESET(NULL);
1021SCLASS struct pthread	*_thr_initial	SCLASS_PRESET(NULL);
1022/* For debugger */
1023SCLASS int		_libkse_debug		SCLASS_PRESET(0);
1024SCLASS int		_thread_activated	SCLASS_PRESET(0);
1025SCLASS int		_thread_scope_system	SCLASS_PRESET(0);
1026
1027/* List of all threads: */
1028SCLASS TAILQ_HEAD(, pthread)	_thread_list
1029    SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
1030
1031/* List of threads needing GC: */
1032SCLASS TAILQ_HEAD(, pthread)	_thread_gc_list
1033    SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
1034
1035SCLASS int	_thread_active_threads  SCLASS_PRESET(1);
1036
1037SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1038SCLASS pthread_mutex_t		_thr_atfork_mutex;
1039
1040/* Default thread attributes: */
1041SCLASS struct pthread_attr _pthread_attr_default
1042    SCLASS_PRESET({
1043	SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1044	THR_CREATE_RUNNING,	PTHREAD_CREATE_JOINABLE, NULL,
1045	NULL, NULL, /* stacksize */0, /* guardsize */0
1046    });
1047
1048/* Default mutex attributes: */
1049SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1050    SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1051
1052/* Default condition variable attributes: */
1053SCLASS struct pthread_cond_attr _pthread_condattr_default
1054    SCLASS_PRESET({COND_TYPE_FAST, 0});
1055
1056/* Clock resolution in usec.	*/
1057SCLASS int		_clock_res_usec		SCLASS_PRESET(CLOCK_RES_USEC);
1058
1059/* Array of signal actions for this process: */
1060SCLASS struct sigaction	_thread_sigact[_SIG_MAXSIG];
1061
1062/*
1063 * Lock for above count of dummy handlers and for the process signal
1064 * mask and pending signal sets.
1065 */
1066SCLASS struct lock	_thread_signal_lock;
1067
1068/* Pending signals and mask for this process: */
1069SCLASS sigset_t		_thr_proc_sigpending;
1070SCLASS siginfo_t	_thr_proc_siginfo[_SIG_MAXSIG];
1071
1072SCLASS pid_t		_thr_pid		SCLASS_PRESET(0);
1073
1074/* Garbage collector lock. */
1075SCLASS struct lock	_gc_lock;
1076SCLASS int		_gc_check		SCLASS_PRESET(0);
1077SCLASS int		_gc_count		SCLASS_PRESET(0);
1078
1079SCLASS struct lock	_mutex_static_lock;
1080SCLASS struct lock	_rwlock_static_lock;
1081SCLASS struct lock	_keytable_lock;
1082SCLASS struct lock	_thread_list_lock;
1083SCLASS size_t		_thr_guard_default;
1084SCLASS size_t		_thr_stack_default;
1085SCLASS size_t		_thr_stack_initial;
1086SCLASS int		_thr_page_size;
1087SCLASS pthread_t	_thr_sig_daemon;
1088SCLASS int		_thr_debug_flags	SCLASS_PRESET(0);
1089
1090/* Undefine the storage class and preset specifiers: */
1091#undef  SCLASS
1092#undef	SCLASS_PRESET
1093
1094
1095/*
1096 * Function prototype definitions.
1097 */
1098__BEGIN_DECLS
1099int	_cond_reinit(pthread_cond_t *);
1100struct kse *_kse_alloc(struct pthread *, int sys_scope);
1101kse_critical_t _kse_critical_enter(void);
1102void	_kse_critical_leave(kse_critical_t);
1103int	_kse_in_critical(void);
1104void	_kse_free(struct pthread *, struct kse *);
1105void	_kse_init(void);
1106struct kse_group *_kseg_alloc(struct pthread *);
1107void	_kse_lock_wait(struct lock *, struct lockuser *lu);
1108void	_kse_lock_wakeup(struct lock *, struct lockuser *lu);
1109void	_kse_single_thread(struct pthread *);
1110int	_kse_setthreaded(int);
1111void	_kseg_free(struct kse_group *);
1112int	_mutex_cv_lock(pthread_mutex_t *);
1113int	_mutex_cv_unlock(pthread_mutex_t *);
1114void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
1115int	_mutex_reinit(struct pthread_mutex *);
1116void	_mutex_unlock_private(struct pthread *);
1117void	_libpthread_init(struct pthread *);
1118int	_pq_alloc(struct pq_queue *, int, int);
1119void	_pq_free(struct pq_queue *);
1120int	_pq_init(struct pq_queue *);
1121void	_pq_remove(struct pq_queue *pq, struct pthread *);
1122void	_pq_insert_head(struct pq_queue *pq, struct pthread *);
1123void	_pq_insert_tail(struct pq_queue *pq, struct pthread *);
1124struct pthread *_pq_first(struct pq_queue *pq);
1125struct pthread *_pq_first_debug(struct pq_queue *pq);
1126void	*_pthread_getspecific(pthread_key_t);
1127int	_pthread_key_create(pthread_key_t *, void (*) (void *));
1128int	_pthread_key_delete(pthread_key_t);
1129int	_pthread_mutex_destroy(pthread_mutex_t *);
1130int	_pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1131int	_pthread_mutex_lock(pthread_mutex_t *);
1132int	_pthread_mutex_trylock(pthread_mutex_t *);
1133int	_pthread_mutex_unlock(pthread_mutex_t *);
1134int	_pthread_mutexattr_init(pthread_mutexattr_t *);
1135int	_pthread_mutexattr_destroy(pthread_mutexattr_t *);
1136int	_pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1137int	_pthread_once(pthread_once_t *, void (*) (void));
1138int	_pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1139int	_pthread_rwlock_destroy (pthread_rwlock_t *);
1140struct pthread *_pthread_self(void);
1141int	_pthread_setspecific(pthread_key_t, const void *);
1142void	_pthread_yield(void);
1143void	_pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1144void	_pthread_cleanup_pop(int execute);
1145struct pthread *_thr_alloc(struct pthread *);
1146void	_thr_exit(const char *, int, const char *) __dead2;
1147void	_thr_exit_cleanup(void);
1148void	_thr_lock_wait(struct lock *lock, struct lockuser *lu);
1149void	_thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1150void	_thr_mutex_reinit(pthread_mutex_t *);
1151int	_thr_ref_add(struct pthread *, struct pthread *, int);
1152void	_thr_ref_delete(struct pthread *, struct pthread *);
1153void	_thr_rtld_init(void);
1154void	_thr_rtld_fini(void);
1155int	_thr_schedule_add(struct pthread *, struct pthread *);
1156void	_thr_schedule_remove(struct pthread *, struct pthread *);
1157void	_thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1158struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1159struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1160void	_thr_sig_dispatch(struct kse *, int, siginfo_t *);
1161int	_thr_stack_alloc(struct pthread_attr *);
1162void	_thr_stack_free(struct pthread_attr *);
1163void    _thr_exit_cleanup(void);
1164void	_thr_free(struct pthread *, struct pthread *);
1165void	_thr_gc(struct pthread *);
1166void    _thr_panic_exit(char *, int, char *);
1167void    _thread_cleanupspecific(void);
1168void    _thread_dump_info(void);
1169void	_thread_printf(int, const char *, ...);
1170void	_thr_sched_switch(struct pthread *);
1171void	_thr_sched_switch_unlocked(struct pthread *);
1172void    _thr_set_timeout(const struct timespec *);
1173void	_thr_seterrno(struct pthread *, int);
1174void    _thr_sig_handler(int, siginfo_t *, void *);
1175void    _thr_sig_check_pending(struct pthread *);
1176void	_thr_sig_rundown(struct pthread *, ucontext_t *);
1177void	_thr_sig_send(struct pthread *pthread, int sig);
1178void	_thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1179void	_thr_spinlock_init(void);
1180void	_thr_cancel_enter(struct pthread *);
1181void	_thr_cancel_leave(struct pthread *, int);
1182int	_thr_setconcurrency(int new_level);
1183int	_thr_setmaxconcurrency(void);
1184void	_thr_critical_enter(struct pthread *);
1185void	_thr_critical_leave(struct pthread *);
1186int	_thr_start_sig_daemon(void);
1187int	_thr_getprocsig(int sig, siginfo_t *siginfo);
1188int	_thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1189void	_thr_signal_init(void);
1190void	_thr_signal_deinit(void);
1191void	_thr_hash_add(struct pthread *);
1192void	_thr_hash_remove(struct pthread *);
1193struct pthread *_thr_hash_find(struct pthread *);
1194void	_thr_finish_cancellation(void *arg);
1195int	_thr_sigonstack(void *sp);
1196void	_thr_debug_check_yield(struct pthread *);
1197
1198/*
1199 * Aliases for _pthread functions. Should be called instead of
1200 * originals if PLT replocation is unwanted at runtme.
1201 */
1202int	_thr_cond_broadcast(pthread_cond_t *);
1203int	_thr_cond_signal(pthread_cond_t *);
1204int	_thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1205int	_thr_mutex_lock(pthread_mutex_t *);
1206int	_thr_mutex_unlock(pthread_mutex_t *);
1207int	_thr_rwlock_rdlock (pthread_rwlock_t *);
1208int	_thr_rwlock_wrlock (pthread_rwlock_t *);
1209int	_thr_rwlock_unlock (pthread_rwlock_t *);
1210
1211/* #include <sys/aio.h> */
1212#ifdef _SYS_AIO_H_
1213int	__sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1214#endif
1215
1216/* #include <fcntl.h> */
1217#ifdef  _SYS_FCNTL_H_
1218int     __sys_fcntl(int, int, ...);
1219int     __sys_open(const char *, int, ...);
1220#endif
1221
1222/* #include <sys/ioctl.h> */
1223#ifdef _SYS_IOCTL_H_
1224int	__sys_ioctl(int, unsigned long, ...);
1225#endif
1226
1227/* #inclde <sched.h> */
1228#ifdef	_SCHED_H_
1229int	__sys_sched_yield(void);
1230#endif
1231
1232/* #include <signal.h> */
1233#ifdef _SIGNAL_H_
1234int	__sys_kill(pid_t, int);
1235int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1236int     __sys_sigpending(sigset_t *);
1237int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1238int     __sys_sigsuspend(const sigset_t *);
1239int     __sys_sigreturn(ucontext_t *);
1240int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1241#endif
1242
1243/* #include <sys/socket.h> */
1244#ifdef _SYS_SOCKET_H_
1245int	__sys_accept(int, struct sockaddr *, socklen_t *);
1246int	__sys_connect(int, const struct sockaddr *, socklen_t);
1247int	__sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1248	    off_t *, int);
1249#endif
1250
1251/* #include <sys/uio.h> */
1252#ifdef  _SYS_UIO_H_
1253ssize_t __sys_readv(int, const struct iovec *, int);
1254ssize_t __sys_writev(int, const struct iovec *, int);
1255#endif
1256
1257/* #include <time.h> */
1258#ifdef	_TIME_H_
1259int	__sys_nanosleep(const struct timespec *, struct timespec *);
1260#endif
1261
1262/* #include <unistd.h> */
1263#ifdef  _UNISTD_H_
1264int     __sys_close(int);
1265int     __sys_execve(const char *, char * const *, char * const *);
1266int	__sys_fork(void);
1267int	__sys_fsync(int);
1268pid_t	__sys_getpid(void);
1269int     __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1270ssize_t __sys_read(int, void *, size_t);
1271ssize_t __sys_write(int, const void *, size_t);
1272void	__sys_exit(int);
1273int	__sys_sigwait(const sigset_t *, int *);
1274int	__sys_sigtimedwait(const sigset_t *, siginfo_t *, const struct timespec *);
1275#endif
1276
1277/* #include <poll.h> */
1278#ifdef _SYS_POLL_H_
1279int 	__sys_poll(struct pollfd *, unsigned, int);
1280#endif
1281
1282/* #include <sys/mman.h> */
1283#ifdef _SYS_MMAN_H_
1284int	__sys_msync(void *, size_t, int);
1285#endif
1286
1287static __inline int
1288_thr_dump_enabled(void)
1289{
1290
1291	return ((_thr_debug_flags & DBG_INFO_DUMP) != 0);
1292}
1293
1294#endif  /* !_THR_PRIVATE_H */
1295