thr_private.h revision 123975
1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Private thread definitions for the uthread kernel.
33 *
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 123975 2003-12-29 23:33:51Z davidxu $
35 */
36
37#ifndef _THR_PRIVATE_H
38#define _THR_PRIVATE_H
39
40/*
41 * Include files.
42 */
43#include <setjmp.h>
44#include <signal.h>
45#include <stdio.h>
46#include <sys/queue.h>
47#include <sys/types.h>
48#include <sys/time.h>
49#include <sys/cdefs.h>
50#include <sys/kse.h>
51#include <sched.h>
52#include <ucontext.h>
53#include <unistd.h>
54#include <pthread.h>
55#include <pthread_np.h>
56
57#include "lock.h"
58#include "pthread_md.h"
59
60/*
61 * Evaluate the storage class specifier.
62 */
63#ifdef GLOBAL_PTHREAD_PRIVATE
64#define SCLASS
65#define SCLASS_PRESET(x...)	= x
66#else
67#define SCLASS			extern
68#define SCLASS_PRESET(x...)
69#endif
70
71/*
72 * Kernel fatal error handler macro.
73 */
74#define PANIC(string)   _thr_exit(__FILE__,__LINE__,string)
75
76
77/* Output debug messages like this: */
78#define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
79#define stderr_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
80
81#define	DBG_MUTEX	0x0001
82#define	DBG_SIG		0x0002
83
84#ifdef _PTHREADS_INVARIANTS
85#define THR_ASSERT(cond, msg) do {	\
86	if (!(cond))			\
87		PANIC(msg);		\
88} while (0)
89#else
90#define THR_ASSERT(cond, msg)
91#endif
92
93/*
94 * State change macro without scheduling queue change:
95 */
96#define THR_SET_STATE(thrd, newstate) do {				\
97	(thrd)->state = newstate;					\
98	(thrd)->fname = __FILE__;					\
99	(thrd)->lineno = __LINE__;					\
100} while (0)
101
102
103#define	TIMESPEC_ADD(dst, src, val)				\
104	do { 							\
105		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
106		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
107		if ((dst)->tv_nsec > 1000000000) {		\
108			(dst)->tv_sec++;			\
109			(dst)->tv_nsec -= 1000000000;		\
110		}						\
111	} while (0)
112
113#define	TIMESPEC_SUB(dst, src, val)				\
114	do { 							\
115		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
116		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
117		if ((dst)->tv_nsec < 0) {			\
118			(dst)->tv_sec--;			\
119			(dst)->tv_nsec += 1000000000;		\
120		}						\
121	} while (0)
122
123/*
124 * Priority queues.
125 *
126 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
127 */
128typedef struct pq_list {
129	TAILQ_HEAD(, pthread)	pl_head; /* list of threads at this priority */
130	TAILQ_ENTRY(pq_list)	pl_link; /* link for queue of priority lists */
131	int			pl_prio; /* the priority of this list */
132	int			pl_queued; /* is this in the priority queue */
133} pq_list_t;
134
135typedef struct pq_queue {
136	TAILQ_HEAD(, pq_list)	 pq_queue; /* queue of priority lists */
137	pq_list_t		*pq_lists; /* array of all priority lists */
138	int			 pq_size;  /* number of priority lists */
139#define	PQF_ACTIVE	0x0001
140	int			 pq_flags;
141	int			 pq_threads;
142} pq_queue_t;
143
144/*
145 * Each KSEG has a scheduling queue.  For now, threads that exist in their
146 * own KSEG (system scope) will get a full priority queue.  In the future
147 * this can be optimized for the single thread per KSEG case.
148 */
149struct sched_queue {
150	pq_queue_t		sq_runq;
151	TAILQ_HEAD(, pthread)	sq_waitq;	/* waiting in userland */
152};
153
154typedef struct kse_thr_mailbox *kse_critical_t;
155
156struct kse_group;
157
158#define	MAX_KSE_LOCKLEVEL	5
159struct kse {
160	/* -- location and order specific items for gdb -- */
161	struct kcb		*k_kcb;
162	struct pthread		*k_curthread;	/* current thread */
163	struct kse_group	*k_kseg;	/* parent KSEG */
164	struct sched_queue	*k_schedq;	/* scheduling queue */
165	/* -- end of location and order specific items -- */
166	TAILQ_ENTRY(kse)	k_qe;		/* KSE list link entry */
167	TAILQ_ENTRY(kse)	k_kgqe;		/* KSEG's KSE list entry */
168	/*
169	 * Items that are only modified by the kse, or that otherwise
170	 * don't need to be locked when accessed
171	 */
172	struct lock		k_lock;
173	struct lockuser		k_lockusers[MAX_KSE_LOCKLEVEL];
174	int			k_locklevel;
175	stack_t			k_stack;
176	int			k_flags;
177#define	KF_STARTED			0x0001	/* kernel kse created */
178#define	KF_INITIALIZED			0x0002	/* initialized on 1st upcall */
179#define	KF_TERMINATED			0x0004	/* kse is terminated */
180#define	KF_IDLE				0x0008	/* kse is idle */
181#define	KF_SWITCH			0x0010	/* thread switch in UTS */
182	int			k_error;	/* syscall errno in critical */
183	int			k_cpu;		/* CPU ID when bound */
184	int			k_sigseqno;	/* signal buffered count */
185};
186
187#define	KSE_SET_IDLE(kse)	((kse)->k_flags |= KF_IDLE)
188#define	KSE_CLEAR_IDLE(kse)	((kse)->k_flags &= ~KF_IDLE)
189#define	KSE_IS_IDLE(kse)	(((kse)->k_flags & KF_IDLE) != 0)
190#define	KSE_SET_SWITCH(kse)	((kse)->k_flags |= KF_SWITCH)
191#define	KSE_CLEAR_SWITCH(kse)	((kse)->k_flags &= ~KF_SWITCH)
192#define	KSE_IS_SWITCH(kse)	(((kse)->k_flags & KF_SWITCH) != 0)
193
194/*
195 * Each KSE group contains one or more KSEs in which threads can run.
196 * At least for now, there is one scheduling queue per KSE group; KSEs
197 * within the same KSE group compete for threads from the same scheduling
198 * queue.  A scope system thread has one KSE in one KSE group; the group
199 * does not use its scheduling queue.
200 */
201struct kse_group {
202	TAILQ_HEAD(, kse)	kg_kseq;	/* list of KSEs in group */
203	TAILQ_HEAD(, pthread)	kg_threadq;	/* list of threads in group */
204	TAILQ_ENTRY(kse_group)  kg_qe;		/* link entry */
205	struct sched_queue	kg_schedq;	/* scheduling queue */
206	struct lock		kg_lock;
207	int			kg_threadcount;	/* # of assigned threads */
208	int			kg_ksecount;	/* # of assigned KSEs */
209	int			kg_idle_kses;
210	int			kg_flags;
211#define	KGF_SINGLE_THREAD		0x0001	/* scope system kse group */
212#define	KGF_SCHEDQ_INITED		0x0002	/* has an initialized schedq */
213};
214
215/*
216 * Add/remove threads from a KSE's scheduling queue.
217 * For now the scheduling queue is hung off the KSEG.
218 */
219#define	KSEG_THRQ_ADD(kseg, thr)			\
220do {							\
221	TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
222	(kseg)->kg_threadcount++;			\
223} while (0)
224
225#define	KSEG_THRQ_REMOVE(kseg, thr)			\
226do {							\
227	TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle);	\
228	(kseg)->kg_threadcount--;			\
229} while (0)
230
231
232/*
233 * Lock acquire and release for KSEs.
234 */
235#define	KSE_LOCK_ACQUIRE(kse, lck)					\
236do {									\
237	if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) {			\
238		(kse)->k_locklevel++;					\
239		_lock_acquire((lck),					\
240		    &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0);	\
241	}								\
242	else 								\
243		PANIC("Exceeded maximum lock level");			\
244} while (0)
245
246#define	KSE_LOCK_RELEASE(kse, lck)					\
247do {									\
248	if ((kse)->k_locklevel > 0) {					\
249		_lock_release((lck),					\
250		    &(kse)->k_lockusers[(kse)->k_locklevel - 1]);	\
251		(kse)->k_locklevel--;					\
252	}								\
253} while (0)
254
255/*
256 * Lock our own KSEG.
257 */
258#define	KSE_LOCK(curkse)		\
259	KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
260#define	KSE_UNLOCK(curkse)		\
261	KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
262
263/*
264 * Lock a potentially different KSEG.
265 */
266#define	KSE_SCHED_LOCK(curkse, kseg)	\
267	KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
268#define	KSE_SCHED_UNLOCK(curkse, kseg)	\
269	KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
270
271/*
272 * Waiting queue manipulation macros (using pqe link):
273 */
274#define KSE_WAITQ_REMOVE(kse, thrd) \
275do { \
276	if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
277		TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
278		(thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
279	} \
280} while (0)
281#define KSE_WAITQ_INSERT(kse, thrd)	kse_waitq_insert(thrd)
282#define	KSE_WAITQ_FIRST(kse)		TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
283
284#define	KSE_WAKEUP(kse)		kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
285
286/*
287 * TailQ initialization values.
288 */
289#define TAILQ_INITIALIZER	{ NULL, NULL }
290
291/*
292 * lock initialization values.
293 */
294#define	LCK_INITIALIZER		{ NULL, NULL, LCK_DEFAULT }
295
296struct pthread_mutex {
297	/*
298	 * Lock for accesses to this structure.
299	 */
300	struct lock			m_lock;
301	enum pthread_mutextype		m_type;
302	int				m_protocol;
303	TAILQ_HEAD(mutex_head, pthread)	m_queue;
304	struct pthread			*m_owner;
305	long				m_flags;
306	int				m_count;
307	int				m_refcount;
308
309	/*
310	 * Used for priority inheritence and protection.
311	 *
312	 *   m_prio       - For priority inheritence, the highest active
313	 *                  priority (threads locking the mutex inherit
314	 *                  this priority).  For priority protection, the
315	 *                  ceiling priority of this mutex.
316	 *   m_saved_prio - mutex owners inherited priority before
317	 *                  taking the mutex, restored when the owner
318	 *                  unlocks the mutex.
319	 */
320	int				m_prio;
321	int				m_saved_prio;
322
323	/*
324	 * Link for list of all mutexes a thread currently owns.
325	 */
326	TAILQ_ENTRY(pthread_mutex)	m_qe;
327};
328
329/*
330 * Flags for mutexes.
331 */
332#define MUTEX_FLAGS_PRIVATE	0x01
333#define MUTEX_FLAGS_INITED	0x02
334#define MUTEX_FLAGS_BUSY	0x04
335
336/*
337 * Static mutex initialization values.
338 */
339#define PTHREAD_MUTEX_STATIC_INITIALIZER				\
340	{ LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE,	\
341	TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0,	\
342	TAILQ_INITIALIZER }
343
344struct pthread_mutex_attr {
345	enum pthread_mutextype	m_type;
346	int			m_protocol;
347	int			m_ceiling;
348	long			m_flags;
349};
350
351#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
352	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
353
354/*
355 * Condition variable definitions.
356 */
357enum pthread_cond_type {
358	COND_TYPE_FAST,
359	COND_TYPE_MAX
360};
361
362struct pthread_cond {
363	/*
364	 * Lock for accesses to this structure.
365	 */
366	struct lock			c_lock;
367	enum pthread_cond_type		c_type;
368	TAILQ_HEAD(cond_head, pthread)	c_queue;
369	struct pthread_mutex		*c_mutex;
370	long				c_flags;
371	long				c_seqno;
372};
373
374struct pthread_cond_attr {
375	enum pthread_cond_type	c_type;
376	long			c_flags;
377};
378
379struct pthread_barrier {
380	pthread_mutex_t	b_lock;
381	pthread_cond_t	b_cond;
382	int		b_count;
383	int		b_waiters;
384	int		b_generation;
385};
386
387struct pthread_barrierattr {
388	int		pshared;
389};
390
391struct pthread_spinlock {
392	volatile int	s_lock;
393	pthread_t	s_owner;
394};
395
396/*
397 * Flags for condition variables.
398 */
399#define COND_FLAGS_PRIVATE	0x01
400#define COND_FLAGS_INITED	0x02
401#define COND_FLAGS_BUSY		0x04
402
403/*
404 * Static cond initialization values.
405 */
406#define PTHREAD_COND_STATIC_INITIALIZER				\
407	{ LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER,	\
408	NULL, NULL, 0, 0 }
409
410/*
411 * Semaphore definitions.
412 */
413struct sem {
414#define	SEM_MAGIC	((u_int32_t) 0x09fa4012)
415	u_int32_t	magic;
416	pthread_mutex_t	lock;
417	pthread_cond_t	gtzero;
418	u_int32_t	count;
419	u_int32_t	nwaiters;
420};
421
422/*
423 * Cleanup definitions.
424 */
425struct pthread_cleanup {
426	struct pthread_cleanup	*next;
427	void			(*routine) ();
428	void			*routine_arg;
429};
430
431struct pthread_atfork {
432	TAILQ_ENTRY(pthread_atfork) qe;
433	void (*prepare)(void);
434	void (*parent)(void);
435	void (*child)(void);
436};
437
438struct pthread_attr {
439	int	sched_policy;
440	int	sched_inherit;
441	int	sched_interval;
442	int	prio;
443	int	suspend;
444#define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
445	int	flags;
446	void	*arg_attr;
447	void	(*cleanup_attr) ();
448	void	*stackaddr_attr;
449	size_t	stacksize_attr;
450	size_t	guardsize_attr;
451};
452
453/*
454 * Thread creation state attributes.
455 */
456#define THR_CREATE_RUNNING		0
457#define THR_CREATE_SUSPENDED		1
458
459/*
460 * Miscellaneous definitions.
461 */
462#define THR_STACK_DEFAULT			65536
463
464/*
465 * Maximum size of initial thread's stack.  This perhaps deserves to be larger
466 * than the stacks of other threads, since many applications are likely to run
467 * almost entirely on this stack.
468 */
469#define THR_STACK_INITIAL			0x100000
470
471/*
472 * Define the different priority ranges.  All applications have thread
473 * priorities constrained within 0-31.  The threads library raises the
474 * priority when delivering signals in order to ensure that signal
475 * delivery happens (from the POSIX spec) "as soon as possible".
476 * In the future, the threads library will also be able to map specific
477 * threads into real-time (cooperating) processes or kernel threads.
478 * The RT and SIGNAL priorities will be used internally and added to
479 * thread base priorities so that the scheduling queue can handle both
480 * normal and RT priority threads with and without signal handling.
481 *
482 * The approach taken is that, within each class, signal delivery
483 * always has priority over thread execution.
484 */
485#define THR_DEFAULT_PRIORITY			15
486#define THR_MIN_PRIORITY			0
487#define THR_MAX_PRIORITY			31	/* 0x1F */
488#define THR_SIGNAL_PRIORITY			32	/* 0x20 */
489#define THR_RT_PRIORITY				64	/* 0x40 */
490#define THR_FIRST_PRIORITY			THR_MIN_PRIORITY
491#define THR_LAST_PRIORITY	\
492	(THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
493#define THR_BASE_PRIORITY(prio)	((prio) & THR_MAX_PRIORITY)
494
495/*
496 * Clock resolution in microseconds.
497 */
498#define CLOCK_RES_USEC				10000
499
500/*
501 * Time slice period in microseconds.
502 */
503#define TIMESLICE_USEC				20000
504
505/*
506 * XXX - Define a thread-safe macro to get the current time of day
507 *       which is updated at regular intervals by something.
508 *
509 * For now, we just make the system call to get the time.
510 */
511#define	KSE_GET_TOD(curkse, tsp) \
512do {							\
513	*tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday;	\
514	if ((tsp)->tv_sec == 0)				\
515		clock_gettime(CLOCK_REALTIME, tsp);	\
516} while (0)
517
518struct pthread_rwlockattr {
519	int		pshared;
520};
521
522struct pthread_rwlock {
523	pthread_mutex_t	lock;	/* monitor lock */
524	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
525	pthread_cond_t	read_signal;
526	pthread_cond_t	write_signal;
527	int		blocked_writers;
528};
529
530/*
531 * Thread states.
532 */
533enum pthread_state {
534	PS_RUNNING,
535	PS_LOCKWAIT,
536	PS_MUTEX_WAIT,
537	PS_COND_WAIT,
538	PS_SLEEP_WAIT,
539	PS_SIGSUSPEND,
540	PS_SIGWAIT,
541	PS_JOIN,
542	PS_SUSPENDED,
543	PS_DEAD,
544	PS_DEADLOCK,
545	PS_STATE_MAX
546};
547
548struct sigwait_data {
549	sigset_t	*waitset;
550	siginfo_t	*siginfo;	/* used to save siginfo for sigwaitinfo() */
551};
552
553union pthread_wait_data {
554	pthread_mutex_t	mutex;
555	pthread_cond_t	cond;
556	struct lock	*lock;
557	struct sigwait_data *sigwait;
558};
559
560/*
561 * Define a continuation routine that can be used to perform a
562 * transfer of control:
563 */
564typedef void	(*thread_continuation_t) (void *);
565
566/*
567 * This stores a thread's state prior to running a signal handler.
568 * It is used when a signal is delivered to a thread blocked in
569 * userland.  If the signal handler returns normally, the thread's
570 * state is restored from here.
571 */
572struct pthread_sigframe {
573	int			psf_valid;
574	int			psf_flags;
575	int			psf_interrupted;
576	int			psf_timeout;
577	int			psf_signo;
578	enum pthread_state	psf_state;
579	union pthread_wait_data psf_wait_data;
580	struct timespec		psf_wakeup_time;
581	sigset_t		psf_sigset;
582	sigset_t		psf_sigmask;
583	int			psf_seqno;
584};
585
586struct join_status {
587	struct pthread	*thread;
588	void		*ret;
589	int		error;
590};
591
592struct pthread_specific_elem {
593	const void	*data;
594	int		seqno;
595};
596
597
598#define	MAX_THR_LOCKLEVEL	5
599/*
600 * Thread structure.
601 */
602struct pthread {
603	/*
604	 * Thread mailbox is first so it cal be aligned properly.
605	 */
606	struct tcb		*tcb;
607
608	/*
609	 * Magic value to help recognize a valid thread structure
610	 * from an invalid one:
611	 */
612#define	THR_MAGIC		((u_int32_t) 0xd09ba115)
613	u_int32_t		magic;
614	char			*name;
615	u_int64_t		uniqueid; /* for gdb */
616
617	/* Queue entry for list of all threads: */
618	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
619	TAILQ_ENTRY(pthread)	kle;	/* link for all threads in KSE/KSEG */
620
621	/* Queue entry for GC lists: */
622	TAILQ_ENTRY(pthread)	gcle;
623
624	/* Hash queue entry */
625	LIST_ENTRY(pthread)	hle;
626
627	/*
628	 * Lock for accesses to this thread structure.
629	 */
630	struct lock		lock;
631	struct lockuser		lockusers[MAX_THR_LOCKLEVEL];
632	int			locklevel;
633	kse_critical_t		critical[MAX_KSE_LOCKLEVEL];
634	struct kse		*kse;
635	struct kse_group	*kseg;
636
637	/*
638	 * Thread start routine, argument, stack pointer and thread
639	 * attributes.
640	 */
641	void			*(*start_routine)(void *);
642	void			*arg;
643	struct pthread_attr	attr;
644
645	int			active;		/* thread running */
646	int			blocked;	/* thread blocked in kernel */
647	int			need_switchout;
648
649	/*
650	 * Used for tracking delivery of signal handlers.
651	 */
652	struct pthread_sigframe	*curframe;
653	siginfo_t		*siginfo;
654
655 	/*
656	 * Cancelability flags - the lower 2 bits are used by cancel
657	 * definitions in pthread.h
658	 */
659#define THR_AT_CANCEL_POINT		0x0004
660#define THR_CANCELLING			0x0008
661#define THR_CANCEL_NEEDED		0x0010
662	int			cancelflags;
663
664	thread_continuation_t	continuation;
665
666	/*
667	 * The thread's base and pending signal masks.  The active
668	 * signal mask is stored in the thread's context (in mailbox).
669	 */
670	sigset_t		sigmask;
671	sigset_t		sigpend;
672	volatile int		check_pending;
673	int			refcount;
674
675	/* Thread state: */
676	enum pthread_state	state;
677	volatile int		lock_switch;
678
679	/*
680	 * Number of microseconds accumulated by this thread when
681	 * time slicing is active.
682	 */
683	long			slice_usec;
684
685	/*
686	 * Time to wake up thread. This is used for sleeping threads and
687	 * for any operation which may time out (such as select).
688	 */
689	struct timespec		wakeup_time;
690
691	/* TRUE if operation has timed out. */
692	int			timeout;
693
694	/*
695	 * Error variable used instead of errno. The function __error()
696	 * returns a pointer to this.
697	 */
698	int			error;
699
700	/*
701	 * The joiner is the thread that is joining to this thread.  The
702	 * join status keeps track of a join operation to another thread.
703	 */
704	struct pthread		*joiner;
705	struct join_status	join_status;
706
707	/*
708	 * The current thread can belong to only one scheduling queue at
709	 * a time (ready or waiting queue).  It can also belong to:
710	 *
711	 *   o A queue of threads waiting for a mutex
712	 *   o A queue of threads waiting for a condition variable
713	 *
714	 * It is possible for a thread to belong to more than one of the
715	 * above queues if it is handling a signal.  A thread may only
716	 * enter a mutex or condition variable queue when it is not
717	 * being called from a signal handler.  If a thread is a member
718	 * of one of these queues when a signal handler is invoked, it
719	 * must be removed from the queue before invoking the handler
720	 * and then added back to the queue after return from the handler.
721	 *
722	 * Use pqe for the scheduling queue link (both ready and waiting),
723	 * sqe for synchronization (mutex, condition variable, and join)
724	 * queue links, and qe for all other links.
725	 */
726	TAILQ_ENTRY(pthread)	pqe;	/* priority, wait queues link */
727	TAILQ_ENTRY(pthread)	sqe;	/* synchronization queue link */
728
729	/* Wait data. */
730	union pthread_wait_data data;
731
732	/*
733	 * Set to TRUE if a blocking operation was
734	 * interrupted by a signal:
735	 */
736	int			interrupted;
737
738	/*
739	 * Set to non-zero when this thread has entered a critical
740	 * region.  We allow for recursive entries into critical regions.
741	 */
742	int			critical_count;
743
744	/*
745	 * Set to TRUE if this thread should yield after leaving a
746	 * critical region to check for signals, messages, etc.
747	 */
748	int			critical_yield;
749
750	int			sflags;
751#define THR_FLAGS_IN_SYNCQ	0x0001
752
753	/* Miscellaneous flags; only set with scheduling lock held. */
754	int			flags;
755#define THR_FLAGS_PRIVATE	0x0001
756#define THR_FLAGS_IN_WAITQ	0x0002	/* in waiting queue using pqe link */
757#define THR_FLAGS_IN_RUNQ	0x0004	/* in run queue using pqe link */
758#define	THR_FLAGS_EXITING	0x0008	/* thread is exiting */
759#define	THR_FLAGS_SUSPENDED	0x0010	/* thread is suspended */
760#define	THR_FLAGS_GC_SAFE	0x0020	/* thread safe for cleaning */
761#define	THR_FLAGS_IN_TDLIST	0x0040	/* thread in all thread list */
762#define	THR_FLAGS_IN_GCLIST	0x0080	/* thread in gc list */
763	/*
764	 * Base priority is the user setable and retrievable priority
765	 * of the thread.  It is only affected by explicit calls to
766	 * set thread priority and upon thread creation via a thread
767	 * attribute or default priority.
768	 */
769	char			base_priority;
770
771	/*
772	 * Inherited priority is the priority a thread inherits by
773	 * taking a priority inheritence or protection mutex.  It
774	 * is not affected by base priority changes.  Inherited
775	 * priority defaults to and remains 0 until a mutex is taken
776	 * that is being waited on by any other thread whose priority
777	 * is non-zero.
778	 */
779	char			inherited_priority;
780
781	/*
782	 * Active priority is always the maximum of the threads base
783	 * priority and inherited priority.  When there is a change
784	 * in either the base or inherited priority, the active
785	 * priority must be recalculated.
786	 */
787	char			active_priority;
788
789	/* Number of priority ceiling or protection mutexes owned. */
790	int			priority_mutex_count;
791
792	/*
793	 * Queue of currently owned mutexes.
794	 */
795	TAILQ_HEAD(, pthread_mutex)	mutexq;
796
797	void				*ret;
798	struct pthread_specific_elem	*specific;
799	int				specific_data_count;
800
801	/* Alternative stack for sigaltstack() */
802	stack_t				sigstk;
803
804	/*
805	 * Current locks bitmap for rtld.
806	 */
807	int	rtld_bits;
808
809	/* Cleanup handlers Link List */
810	struct pthread_cleanup *cleanup;
811	char			*fname;	/* Ptr to source file name  */
812	int			lineno;	/* Source line number.      */
813};
814
815/*
816 * Critical regions can also be detected by looking at the threads
817 * current lock level.  Ensure these macros increment and decrement
818 * the lock levels such that locks can not be held with a lock level
819 * of 0.
820 */
821#define	THR_IN_CRITICAL(thrd)					\
822	(((thrd)->locklevel > 0) ||				\
823	((thrd)->critical_count > 0))
824
825#define	THR_YIELD_CHECK(thrd)					\
826do {								\
827	if (((thrd)->critical_yield != 0) &&			\
828	    !(THR_IN_CRITICAL(thrd)))				\
829		_thr_sched_switch(thrd);			\
830	else if (((thrd)->check_pending != 0) &&		\
831	    !(THR_IN_CRITICAL(thrd)))				\
832		_thr_sig_check_pending(thrd);			\
833} while (0)
834
835#define	THR_LOCK_ACQUIRE(thrd, lck)				\
836do {								\
837	if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) {		\
838		THR_DEACTIVATE_LAST_LOCK(thrd);			\
839		(thrd)->locklevel++;				\
840		_lock_acquire((lck),				\
841		    &(thrd)->lockusers[(thrd)->locklevel - 1],	\
842		    (thrd)->active_priority);			\
843	} else 							\
844		PANIC("Exceeded maximum lock level");		\
845} while (0)
846
847#define	THR_LOCK_RELEASE(thrd, lck)				\
848do {								\
849	if ((thrd)->locklevel > 0) {				\
850		_lock_release((lck),				\
851		    &(thrd)->lockusers[(thrd)->locklevel - 1]);	\
852		(thrd)->locklevel--;				\
853		THR_ACTIVATE_LAST_LOCK(thrd);			\
854		if ((thrd)->locklevel == 0)			\
855			THR_YIELD_CHECK(thrd);			\
856	}							\
857} while (0)
858
859#define THR_ACTIVATE_LAST_LOCK(thrd)					\
860do {									\
861	if ((thrd)->locklevel > 0)					\
862		_lockuser_setactive(					\
863		    &(thrd)->lockusers[(thrd)->locklevel - 1], 1);	\
864} while (0)
865
866#define	THR_DEACTIVATE_LAST_LOCK(thrd)					\
867do {									\
868	if ((thrd)->locklevel > 0)					\
869		_lockuser_setactive(					\
870		    &(thrd)->lockusers[(thrd)->locklevel - 1], 0);	\
871} while (0)
872
873/*
874 * For now, threads will have their own lock separate from their
875 * KSE scheduling lock.
876 */
877#define	THR_LOCK(thr)			THR_LOCK_ACQUIRE(thr, &(thr)->lock)
878#define	THR_UNLOCK(thr)			THR_LOCK_RELEASE(thr, &(thr)->lock)
879#define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
880#define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
881
882/*
883 * Priority queue manipulation macros (using pqe link).  We use
884 * the thread's kseg link instead of the kse link because a thread
885 * does not (currently) have a statically assigned kse.
886 */
887#define THR_RUNQ_INSERT_HEAD(thrd)	\
888	_pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
889#define THR_RUNQ_INSERT_TAIL(thrd)	\
890	_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
891#define THR_RUNQ_REMOVE(thrd)		\
892	_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
893#define THR_RUNQ_FIRST(thrd)		\
894	_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
895
896/*
897 * Macros to insert/remove threads to the all thread list and
898 * the gc list.
899 */
900#define	THR_LIST_ADD(thrd) do {					\
901	if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) {	\
902		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
903		_thr_hash_add(thrd);				\
904		(thrd)->flags |= THR_FLAGS_IN_TDLIST;		\
905	}							\
906} while (0)
907#define	THR_LIST_REMOVE(thrd) do {				\
908	if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) {	\
909		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
910		_thr_hash_remove(thrd);				\
911		(thrd)->flags &= ~THR_FLAGS_IN_TDLIST;		\
912	}							\
913} while (0)
914#define	THR_GCLIST_ADD(thrd) do {				\
915	if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) {	\
916		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
917		(thrd)->flags |= THR_FLAGS_IN_GCLIST;		\
918		_gc_count++;					\
919	}							\
920} while (0)
921#define	THR_GCLIST_REMOVE(thrd) do {				\
922	if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) {	\
923		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
924		(thrd)->flags &= ~THR_FLAGS_IN_GCLIST;		\
925		_gc_count--;					\
926	}							\
927} while (0)
928
929#define GC_NEEDED()	(atomic_load_acq_int(&_gc_count) >= 5)
930
931/*
932 * Locking the scheduling queue for another thread uses that thread's
933 * KSEG lock.
934 */
935#define	THR_SCHED_LOCK(curthr, thr) do {		\
936	(curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
937	(curthr)->locklevel++;				\
938	KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg);	\
939} while (0)
940
941#define	THR_SCHED_UNLOCK(curthr, thr) do {		\
942	KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg);	\
943	(curthr)->locklevel--;				\
944	_kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
945} while (0)
946
947/* Take the scheduling lock with the intent to call the scheduler. */
948#define	THR_LOCK_SWITCH(curthr) do {			\
949	(void)_kse_critical_enter();			\
950	KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg);	\
951} while (0)
952#define	THR_UNLOCK_SWITCH(curthr) do {			\
953	KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
954} while (0)
955
956#define	THR_CRITICAL_ENTER(thr)		(thr)->critical_count++
957#define	THR_CRITICAL_LEAVE(thr)	do {		\
958	(thr)->critical_count--;		\
959	if (((thr)->critical_yield != 0) &&	\
960	    ((thr)->critical_count == 0)) {	\
961		(thr)->critical_yield = 0;	\
962		_thr_sched_switch(thr);		\
963	}					\
964} while (0)
965
966#define	THR_IS_ACTIVE(thrd) \
967	((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
968
969#define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
970
971#define	THR_IS_SUSPENDED(thrd) \
972	(((thrd)->state == PS_SUSPENDED) || \
973	(((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
974#define	THR_IS_EXITING(thrd)	(((thrd)->flags & THR_FLAGS_EXITING) != 0)
975
976extern int __isthreaded;
977
978static inline int
979_kse_isthreaded(void)
980{
981	return (__isthreaded != 0);
982}
983
984/*
985 * Global variables for the pthread kernel.
986 */
987
988SCLASS void		*_usrstack	SCLASS_PRESET(NULL);
989SCLASS struct kse	*_kse_initial	SCLASS_PRESET(NULL);
990SCLASS struct pthread	*_thr_initial	SCLASS_PRESET(NULL);
991
992/* List of all threads: */
993SCLASS TAILQ_HEAD(, pthread)	_thread_list
994    SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
995
996/* List of threads needing GC: */
997SCLASS TAILQ_HEAD(, pthread)	_thread_gc_list
998    SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
999
1000SCLASS int	_thr_active_threads  SCLASS_PRESET(1);
1001
1002SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1003SCLASS pthread_mutex_t		_thr_atfork_mutex;
1004
1005/* Default thread attributes: */
1006SCLASS struct pthread_attr _pthread_attr_default
1007    SCLASS_PRESET({
1008	SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1009	THR_CREATE_RUNNING,	PTHREAD_CREATE_JOINABLE, NULL,
1010	NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
1011    });
1012
1013/* Default mutex attributes: */
1014SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1015    SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1016
1017/* Default condition variable attributes: */
1018SCLASS struct pthread_cond_attr _pthread_condattr_default
1019    SCLASS_PRESET({COND_TYPE_FAST, 0});
1020
1021/* Clock resolution in usec.	*/
1022SCLASS int		_clock_res_usec		SCLASS_PRESET(CLOCK_RES_USEC);
1023
1024/* Array of signal actions for this process: */
1025SCLASS struct sigaction	_thread_sigact[_SIG_MAXSIG];
1026
1027/*
1028 * Lock for above count of dummy handlers and for the process signal
1029 * mask and pending signal sets.
1030 */
1031SCLASS struct lock	_thread_signal_lock;
1032
1033/* Pending signals and mask for this process: */
1034SCLASS sigset_t		_thr_proc_sigpending;
1035SCLASS siginfo_t	_thr_proc_siginfo[_SIG_MAXSIG];
1036
1037SCLASS pid_t		_thr_pid		SCLASS_PRESET(0);
1038
1039/* Garbage collector lock. */
1040SCLASS struct lock	_gc_lock;
1041SCLASS int		_gc_check		SCLASS_PRESET(0);
1042SCLASS int		_gc_count		SCLASS_PRESET(0);
1043
1044SCLASS struct lock	_mutex_static_lock;
1045SCLASS struct lock	_rwlock_static_lock;
1046SCLASS struct lock	_keytable_lock;
1047SCLASS struct lock	_thread_list_lock;
1048SCLASS int		_thr_guard_default;
1049SCLASS int		_thr_page_size;
1050SCLASS pthread_t	_thr_sig_daemon;
1051SCLASS int		_thr_debug_flags	SCLASS_PRESET(0);
1052
1053/* Undefine the storage class and preset specifiers: */
1054#undef  SCLASS
1055#undef	SCLASS_PRESET
1056
1057
1058/*
1059 * Function prototype definitions.
1060 */
1061__BEGIN_DECLS
1062int	_cond_reinit(pthread_cond_t *);
1063void	_cond_wait_backout(struct pthread *);
1064struct kse *_kse_alloc(struct pthread *, int sys_scope);
1065kse_critical_t _kse_critical_enter(void);
1066void	_kse_critical_leave(kse_critical_t);
1067int	_kse_in_critical(void);
1068void	_kse_free(struct pthread *, struct kse *);
1069void	_kse_init();
1070struct kse_group *_kseg_alloc(struct pthread *);
1071void	_kse_lock_wait(struct lock *, struct lockuser *lu);
1072void	_kse_lock_wakeup(struct lock *, struct lockuser *lu);
1073void	_kse_single_thread(struct pthread *);
1074int	_kse_setthreaded(int);
1075void	_kseg_free(struct kse_group *);
1076int	_mutex_cv_lock(pthread_mutex_t *);
1077int	_mutex_cv_unlock(pthread_mutex_t *);
1078void	_mutex_lock_backout(struct pthread *);
1079void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
1080int	_mutex_reinit(struct pthread_mutex *);
1081void	_mutex_unlock_private(struct pthread *);
1082void	_libpthread_init(struct pthread *);
1083int	_pq_alloc(struct pq_queue *, int, int);
1084void	_pq_free(struct pq_queue *);
1085int	_pq_init(struct pq_queue *);
1086void	_pq_remove(struct pq_queue *pq, struct pthread *);
1087void	_pq_insert_head(struct pq_queue *pq, struct pthread *);
1088void	_pq_insert_tail(struct pq_queue *pq, struct pthread *);
1089struct pthread *_pq_first(struct pq_queue *pq);
1090void	*_pthread_getspecific(pthread_key_t);
1091int	_pthread_key_create(pthread_key_t *, void (*) (void *));
1092int	_pthread_key_delete(pthread_key_t);
1093int	_pthread_mutex_destroy(pthread_mutex_t *);
1094int	_pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1095int	_pthread_mutex_lock(pthread_mutex_t *);
1096int	_pthread_mutex_trylock(pthread_mutex_t *);
1097int	_pthread_mutex_unlock(pthread_mutex_t *);
1098int	_pthread_mutexattr_init(pthread_mutexattr_t *);
1099int	_pthread_mutexattr_destroy(pthread_mutexattr_t *);
1100int	_pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1101int	_pthread_once(pthread_once_t *, void (*) (void));
1102int	_pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1103int	_pthread_rwlock_destroy (pthread_rwlock_t *);
1104struct pthread *_pthread_self(void);
1105int	_pthread_setspecific(pthread_key_t, const void *);
1106void	_pthread_yield(void);
1107void	_pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1108void	_pthread_cleanup_pop(int execute);
1109struct pthread *_thr_alloc(struct pthread *);
1110void	_thr_exit(char *, int, char *);
1111void	_thr_exit_cleanup(void);
1112void	_thr_lock_wait(struct lock *lock, struct lockuser *lu);
1113void	_thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1114void	_thr_mutex_reinit(pthread_mutex_t *);
1115int	_thr_ref_add(struct pthread *, struct pthread *, int);
1116void	_thr_ref_delete(struct pthread *, struct pthread *);
1117void	_thr_rtld_init(void);
1118void	_thr_rtld_fini(void);
1119int	_thr_schedule_add(struct pthread *, struct pthread *);
1120void	_thr_schedule_remove(struct pthread *, struct pthread *);
1121void	_thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1122struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1123struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1124void	_thr_sig_dispatch(struct kse *, int, siginfo_t *);
1125int	_thr_stack_alloc(struct pthread_attr *);
1126void	_thr_stack_free(struct pthread_attr *);
1127void    _thr_exit_cleanup(void);
1128void	_thr_free(struct pthread *, struct pthread *);
1129void	_thr_gc(struct pthread *);
1130void    _thr_panic_exit(char *, int, char *);
1131void    _thread_cleanupspecific(void);
1132void    _thread_dump_info(void);
1133void	_thread_printf(int, const char *, ...);
1134void	_thr_sched_switch(struct pthread *);
1135void	_thr_sched_switch_unlocked(struct pthread *);
1136void    _thr_set_timeout(const struct timespec *);
1137void	_thr_seterrno(struct pthread *, int);
1138void    _thr_sig_handler(int, siginfo_t *, ucontext_t *);
1139void    _thr_sig_check_pending(struct pthread *);
1140void	_thr_sig_rundown(struct pthread *, ucontext_t *,
1141	    struct pthread_sigframe *);
1142void	_thr_sig_send(struct pthread *pthread, int sig);
1143void	_thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1144void	_thr_spinlock_init(void);
1145void	_thr_cancel_enter(struct pthread *);
1146void	_thr_cancel_leave(struct pthread *, int);
1147int	_thr_setconcurrency(int new_level);
1148int	_thr_setmaxconcurrency(void);
1149void	_thr_critical_enter(struct pthread *);
1150void	_thr_critical_leave(struct pthread *);
1151int	_thr_start_sig_daemon(void);
1152int	_thr_getprocsig(int sig, siginfo_t *siginfo);
1153int	_thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1154void	_thr_signal_init(void);
1155void	_thr_signal_deinit(void);
1156void	_thr_hash_add(struct pthread *);
1157void	_thr_hash_remove(struct pthread *);
1158struct pthread *_thr_hash_find(struct pthread *);
1159void	_thr_finish_cancellation(void *arg);
1160int	_thr_sigonstack(void *sp);
1161
1162/*
1163 * Aliases for _pthread functions. Should be called instead of
1164 * originals if PLT replocation is unwanted at runtme.
1165 */
1166int	_thr_cond_broadcast(pthread_cond_t *);
1167int	_thr_cond_signal(pthread_cond_t *);
1168int	_thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1169int	_thr_mutex_lock(pthread_mutex_t *);
1170int	_thr_mutex_unlock(pthread_mutex_t *);
1171int	_thr_rwlock_rdlock (pthread_rwlock_t *);
1172int	_thr_rwlock_wrlock (pthread_rwlock_t *);
1173int	_thr_rwlock_unlock (pthread_rwlock_t *);
1174
1175/* #include <sys/aio.h> */
1176#ifdef _SYS_AIO_H_
1177int	__sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1178#endif
1179
1180/* #include <fcntl.h> */
1181#ifdef  _SYS_FCNTL_H_
1182int     __sys_fcntl(int, int, ...);
1183int     __sys_open(const char *, int, ...);
1184#endif
1185
1186/* #include <sys/ioctl.h> */
1187#ifdef _SYS_IOCTL_H_
1188int	__sys_ioctl(int, unsigned long, ...);
1189#endif
1190
1191/* #inclde <sched.h> */
1192#ifdef	_SCHED_H_
1193int	__sys_sched_yield(void);
1194#endif
1195
1196/* #include <signal.h> */
1197#ifdef _SIGNAL_H_
1198int	__sys_kill(pid_t, int);
1199int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1200int     __sys_sigpending(sigset_t *);
1201int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1202int     __sys_sigsuspend(const sigset_t *);
1203int     __sys_sigreturn(ucontext_t *);
1204int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1205#endif
1206
1207/* #include <sys/socket.h> */
1208#ifdef _SYS_SOCKET_H_
1209int	__sys_accept(int, struct sockaddr *, socklen_t *);
1210int	__sys_connect(int, const struct sockaddr *, socklen_t);
1211int	__sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1212	    off_t *, int);
1213#endif
1214
1215/* #include <sys/uio.h> */
1216#ifdef  _SYS_UIO_H_
1217ssize_t __sys_readv(int, const struct iovec *, int);
1218ssize_t __sys_writev(int, const struct iovec *, int);
1219#endif
1220
1221/* #include <time.h> */
1222#ifdef	_TIME_H_
1223int	__sys_nanosleep(const struct timespec *, struct timespec *);
1224#endif
1225
1226/* #include <unistd.h> */
1227#ifdef  _UNISTD_H_
1228int     __sys_close(int);
1229int     __sys_execve(const char *, char * const *, char * const *);
1230int	__sys_fork(void);
1231int	__sys_fsync(int);
1232pid_t	__sys_getpid(void);
1233int     __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1234ssize_t __sys_read(int, void *, size_t);
1235ssize_t __sys_write(int, const void *, size_t);
1236void	__sys_exit(int);
1237int	__sys_sigwait(const sigset_t *, int *);
1238int	__sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *);
1239#endif
1240
1241/* #include <poll.h> */
1242#ifdef _SYS_POLL_H_
1243int 	__sys_poll(struct pollfd *, unsigned, int);
1244#endif
1245
1246/* #include <sys/mman.h> */
1247#ifdef _SYS_MMAN_H_
1248int	__sys_msync(void *, size_t, int);
1249#endif
1250
1251#endif  /* !_THR_PRIVATE_H */
1252