Deleted Added
full compact
thr_private.h (80021) thr_private.h (81750)
1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Private thread definitions for the uthread kernel.
33 *
1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Private thread definitions for the uthread kernel.
33 *
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 80021 2001-07-20 04:23:11Z jasone $
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 81750 2001-08-16 06:31:32Z jasone $
35 */
36
37#ifndef _PTHREAD_PRIVATE_H
38#define _PTHREAD_PRIVATE_H
39
40/*
41 * Evaluate the storage class specifier.
42 */
43#ifdef GLOBAL_PTHREAD_PRIVATE
44#define SCLASS
45#else
46#define SCLASS extern
47#endif
48
49/*
50 * Include files.
51 */
52#include <setjmp.h>
53#include <signal.h>
54#include <stdio.h>
55#include <sys/queue.h>
56#include <sys/types.h>
57#include <sys/time.h>
58#include <sys/cdefs.h>
59#include <sched.h>
60#include <spinlock.h>
61#include <pthread_np.h>
62
63/*
64 * Define machine dependent macros to get and set the stack pointer
65 * from the supported contexts. Also define a macro to set the return
66 * address in a jmp_buf context.
67 *
68 * XXX - These need to be moved into architecture dependent support files.
69 */
70#if defined(__i386__)
71#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
72#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
73#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
74#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
75#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
76#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
77#define FP_SAVE_UC(ucp) do { \
78 char *fdata; \
79 fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
80 __asm__("fnsave %0": :"m"(*fdata)); \
81} while (0)
82#define FP_RESTORE_UC(ucp) do { \
83 char *fdata; \
84 fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
85 __asm__("frstor %0": :"m"(*fdata)); \
86} while (0)
87#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
88#elif defined(__alpha__)
89#include <machine/reg.h>
90#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
91#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
92#define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP])
93#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
94#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
95#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
96#define FP_SAVE_UC(ucp)
97#define FP_RESTORE_UC(ucp)
98#define SET_RETURN_ADDR_JB(jb, ra) do { \
99 (jb)[0]._jb[2] = (unsigned long)(ra) + 8UL; \
100 (jb)[0]._jb[R_RA + 4] = 0; \
101 (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
102} while (0)
103#else
104#error "Don't recognize this architecture!"
105#endif
106
107/*
108 * Kernel fatal error handler macro.
109 */
110#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
111
112
113/* Output debug messages like this: */
114#define stdout_debug(args...) do { \
115 char buf[128]; \
116 snprintf(buf, sizeof(buf), ##args); \
117 __sys_write(1, buf, strlen(buf)); \
118} while (0)
119#define stderr_debug(args...) do { \
120 char buf[128]; \
121 snprintf(buf, sizeof(buf), ##args); \
122 __sys_write(2, buf, strlen(buf)); \
123} while (0)
124
125
126
127/*
128 * Priority queue manipulation macros (using pqe link):
129 */
130#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
131#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
132#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
133#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
134
135/*
136 * Waiting queue manipulation macros (using pqe link):
137 */
138#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
139#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
140
141#if defined(_PTHREADS_INVARIANTS)
142#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
143#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
144#else
145#define PTHREAD_WAITQ_CLEARACTIVE()
146#define PTHREAD_WAITQ_SETACTIVE()
147#endif
148
149/*
150 * Work queue manipulation macros (using qe link):
151 */
152#define PTHREAD_WORKQ_INSERT(thrd) do { \
153 TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
154 (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
155} while (0)
156#define PTHREAD_WORKQ_REMOVE(thrd) do { \
157 TAILQ_REMOVE(&_workq,thrd,qe); \
158 (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
159} while (0)
160
161
162/*
163 * State change macro without scheduling queue change:
164 */
165#define PTHREAD_SET_STATE(thrd, newstate) do { \
166 (thrd)->state = newstate; \
167 (thrd)->fname = __FILE__; \
168 (thrd)->lineno = __LINE__; \
169} while (0)
170
171/*
172 * State change macro with scheduling queue change - This must be
173 * called with preemption deferred (see thread_kern_sched_[un]defer).
174 */
175#if defined(_PTHREADS_INVARIANTS)
176#include <assert.h>
177#define PTHREAD_ASSERT(cond, msg) do { \
178 if (!(cond)) \
179 PANIC(msg); \
180} while (0)
181#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
182 PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
183 "Illegal call from signal handler");
184#define PTHREAD_NEW_STATE(thrd, newstate) do { \
185 if (_thread_kern_new_state != 0) \
186 PANIC("Recursive PTHREAD_NEW_STATE"); \
187 _thread_kern_new_state = 1; \
188 if ((thrd)->state != newstate) { \
189 if ((thrd)->state == PS_RUNNING) { \
190 PTHREAD_PRIOQ_REMOVE(thrd); \
191 PTHREAD_WAITQ_INSERT(thrd); \
192 } else if (newstate == PS_RUNNING) { \
193 PTHREAD_WAITQ_REMOVE(thrd); \
194 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
195 } \
196 } \
197 _thread_kern_new_state = 0; \
198 PTHREAD_SET_STATE(thrd, newstate); \
199} while (0)
200#else
201#define PTHREAD_ASSERT(cond, msg)
202#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
203#define PTHREAD_NEW_STATE(thrd, newstate) do { \
204 if ((thrd)->state != newstate) { \
205 if ((thrd)->state == PS_RUNNING) { \
206 PTHREAD_PRIOQ_REMOVE(thrd); \
207 PTHREAD_WAITQ_INSERT(thrd); \
208 } else if (newstate == PS_RUNNING) { \
209 PTHREAD_WAITQ_REMOVE(thrd); \
210 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
211 } \
212 } \
213 PTHREAD_SET_STATE(thrd, newstate); \
214} while (0)
215#endif
216
217/*
218 * Define the signals to be used for scheduling.
219 */
220#if defined(_PTHREADS_COMPAT_SCHED)
221#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
222#define _SCHED_SIGNAL SIGVTALRM
223#else
224#define _ITIMER_SCHED_TIMER ITIMER_PROF
225#define _SCHED_SIGNAL SIGPROF
226#endif
227
228/*
229 * Priority queues.
230 *
231 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
232 */
233typedef struct pq_list {
234 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
235 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
236 int pl_prio; /* the priority of this list */
237 int pl_queued; /* is this in the priority queue */
238} pq_list_t;
239
240typedef struct pq_queue {
241 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
242 pq_list_t *pq_lists; /* array of all priority lists */
243 int pq_size; /* number of priority lists */
244} pq_queue_t;
245
246
247/*
248 * TailQ initialization values.
249 */
250#define TAILQ_INITIALIZER { NULL, NULL }
251
252/*
253 * Mutex definitions.
254 */
255union pthread_mutex_data {
256 void *m_ptr;
257 int m_count;
258};
259
260struct pthread_mutex {
261 enum pthread_mutextype m_type;
262 int m_protocol;
263 TAILQ_HEAD(mutex_head, pthread) m_queue;
264 struct pthread *m_owner;
265 union pthread_mutex_data m_data;
266 long m_flags;
267 int m_refcount;
268
269 /*
270 * Used for priority inheritence and protection.
271 *
272 * m_prio - For priority inheritence, the highest active
273 * priority (threads locking the mutex inherit
274 * this priority). For priority protection, the
275 * ceiling priority of this mutex.
276 * m_saved_prio - mutex owners inherited priority before
277 * taking the mutex, restored when the owner
278 * unlocks the mutex.
279 */
280 int m_prio;
281 int m_saved_prio;
282
283 /*
284 * Link for list of all mutexes a thread currently owns.
285 */
286 TAILQ_ENTRY(pthread_mutex) m_qe;
287
288 /*
289 * Lock for accesses to this structure.
290 */
291 spinlock_t lock;
292};
293
294/*
295 * Flags for mutexes.
296 */
297#define MUTEX_FLAGS_PRIVATE 0x01
298#define MUTEX_FLAGS_INITED 0x02
299#define MUTEX_FLAGS_BUSY 0x04
300
301/*
302 * Static mutex initialization values.
303 */
304#define PTHREAD_MUTEX_STATIC_INITIALIZER \
305 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
306 NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
307 _SPINLOCK_INITIALIZER }
308
309struct pthread_mutex_attr {
310 enum pthread_mutextype m_type;
311 int m_protocol;
312 int m_ceiling;
313 long m_flags;
314};
315
316#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
317 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
318
319/*
320 * Condition variable definitions.
321 */
322enum pthread_cond_type {
323 COND_TYPE_FAST,
324 COND_TYPE_MAX
325};
326
327struct pthread_cond {
328 enum pthread_cond_type c_type;
329 TAILQ_HEAD(cond_head, pthread) c_queue;
330 pthread_mutex_t c_mutex;
331 void *c_data;
332 long c_flags;
333 int c_seqno;
334
335 /*
336 * Lock for accesses to this structure.
337 */
338 spinlock_t lock;
339};
340
341struct pthread_cond_attr {
342 enum pthread_cond_type c_type;
343 long c_flags;
344};
345
346/*
347 * Flags for condition variables.
348 */
349#define COND_FLAGS_PRIVATE 0x01
350#define COND_FLAGS_INITED 0x02
351#define COND_FLAGS_BUSY 0x04
352
353/*
354 * Static cond initialization values.
355 */
356#define PTHREAD_COND_STATIC_INITIALIZER \
357 { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
358 0, 0, _SPINLOCK_INITIALIZER }
359
360/*
361 * Semaphore definitions.
362 */
363struct sem {
364#define SEM_MAGIC ((u_int32_t) 0x09fa4012)
365 u_int32_t magic;
366 pthread_mutex_t lock;
367 pthread_cond_t gtzero;
368 u_int32_t count;
369 u_int32_t nwaiters;
370};
371
372/*
373 * Cleanup definitions.
374 */
375struct pthread_cleanup {
376 struct pthread_cleanup *next;
377 void (*routine) ();
378 void *routine_arg;
379};
380
381struct pthread_attr {
382 int sched_policy;
383 int sched_inherit;
384 int sched_interval;
385 int prio;
386 int suspend;
387 int flags;
388 void *arg_attr;
389 void (*cleanup_attr) ();
390 void *stackaddr_attr;
391 size_t stacksize_attr;
392 size_t guardsize_attr;
393};
394
395/*
396 * Thread creation state attributes.
397 */
398#define PTHREAD_CREATE_RUNNING 0
399#define PTHREAD_CREATE_SUSPENDED 1
400
401/*
402 * Additional state for a thread suspended with pthread_suspend_np().
403 */
404enum pthread_susp {
405 SUSP_NO, /* Not suspended. */
406 SUSP_YES, /* Suspended. */
407 SUSP_JOIN, /* Suspended, joining. */
408 SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */
409 SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */
410 SUSP_COND_WAIT /* Suspended, still in a condition queue. */
411};
412
413/*
414 * Miscellaneous definitions.
415 */
416#define PTHREAD_STACK_DEFAULT 65536
417/*
418 * Size of default red zone at the end of each stack. In actuality, this "red
419 * zone" is merely an unmapped region, except in the case of the initial stack.
420 * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
421 * region, an unmapped gap between thread stacks achieves the same effect as
422 * explicitly mapped red zones.
423 */
424#define PTHREAD_GUARD_DEFAULT PAGE_SIZE
425
426/*
427 * Maximum size of initial thread's stack. This perhaps deserves to be larger
428 * than the stacks of other threads, since many applications are likely to run
429 * almost entirely on this stack.
430 */
431#define PTHREAD_STACK_INITIAL 0x100000
432
433/* Size of the scheduler stack: */
434#define SCHED_STACK_SIZE PAGE_SIZE
435
436/*
437 * Define the different priority ranges. All applications have thread
438 * priorities constrained within 0-31. The threads library raises the
439 * priority when delivering signals in order to ensure that signal
440 * delivery happens (from the POSIX spec) "as soon as possible".
441 * In the future, the threads library will also be able to map specific
442 * threads into real-time (cooperating) processes or kernel threads.
443 * The RT and SIGNAL priorities will be used internally and added to
444 * thread base priorities so that the scheduling queue can handle both
445 * normal and RT priority threads with and without signal handling.
446 *
447 * The approach taken is that, within each class, signal delivery
448 * always has priority over thread execution.
449 */
450#define PTHREAD_DEFAULT_PRIORITY 15
451#define PTHREAD_MIN_PRIORITY 0
452#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
453#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
454#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
455#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
456#define PTHREAD_LAST_PRIORITY \
457 (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
458#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
459
460/*
461 * Clock resolution in microseconds.
462 */
463#define CLOCK_RES_USEC 10000
464#define CLOCK_RES_USEC_MIN 1000
465
466/*
467 * Time slice period in microseconds.
468 */
469#define TIMESLICE_USEC 20000
470
471/*
472 * Define a thread-safe macro to get the current time of day
473 * which is updated at regular intervals by the scheduling signal
474 * handler.
475 */
476#define GET_CURRENT_TOD(tv) \
477 do { \
478 tv.tv_sec = _sched_tod.tv_sec; \
479 tv.tv_usec = _sched_tod.tv_usec; \
480 } while (tv.tv_sec != _sched_tod.tv_sec)
481
482
483struct pthread_key {
484 spinlock_t lock;
485 volatile int allocated;
486 volatile int count;
487 void (*destructor) ();
488};
489
490struct pthread_rwlockattr {
491 int pshared;
492};
493
494struct pthread_rwlock {
495 pthread_mutex_t lock; /* monitor lock */
496 int state; /* 0 = idle >0 = # of readers -1 = writer */
497 pthread_cond_t read_signal;
498 pthread_cond_t write_signal;
499 int blocked_writers;
500};
501
502/*
503 * Thread states.
504 */
505enum pthread_state {
506 PS_RUNNING,
507 PS_SIGTHREAD,
508 PS_MUTEX_WAIT,
509 PS_COND_WAIT,
510 PS_FDLR_WAIT,
511 PS_FDLW_WAIT,
512 PS_FDR_WAIT,
513 PS_FDW_WAIT,
514 PS_FILE_WAIT,
515 PS_POLL_WAIT,
516 PS_SELECT_WAIT,
517 PS_SLEEP_WAIT,
518 PS_WAIT_WAIT,
519 PS_SIGSUSPEND,
520 PS_SIGWAIT,
521 PS_SPINBLOCK,
522 PS_JOIN,
523 PS_SUSPENDED,
524 PS_DEAD,
525 PS_DEADLOCK,
526 PS_STATE_MAX
527};
528
529
530/*
531 * File descriptor locking definitions.
532 */
533#define FD_READ 0x1
534#define FD_WRITE 0x2
535#define FD_RDWR (FD_READ | FD_WRITE)
536
537/*
538 * File descriptor table structure.
539 */
540struct fd_table_entry {
541 /*
542 * Lock for accesses to this file descriptor table
543 * entry. This is passed to _spinlock() to provide atomic
544 * access to this structure. It does *not* represent the
545 * state of the lock on the file descriptor.
546 */
547 spinlock_t lock;
548 TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
549 TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
550 struct pthread *r_owner; /* Ptr to thread owning read lock. */
551 struct pthread *w_owner; /* Ptr to thread owning write lock. */
552 char *r_fname; /* Ptr to read lock source file name */
553 int r_lineno; /* Read lock source line number. */
554 char *w_fname; /* Ptr to write lock source file name */
555 int w_lineno; /* Write lock source line number. */
556 int r_lockcount; /* Count for FILE read locks. */
557 int w_lockcount; /* Count for FILE write locks. */
558 int flags; /* Flags used in open. */
559};
560
561struct pthread_poll_data {
562 int nfds;
563 struct pollfd *fds;
564};
565
566union pthread_wait_data {
567 pthread_mutex_t mutex;
568 pthread_cond_t cond;
569 const sigset_t *sigwait; /* Waiting on a signal in sigwait */
570 struct {
571 short fd; /* Used when thread waiting on fd */
572 short branch; /* Line number, for debugging. */
573 char *fname; /* Source file name for debugging.*/
574 } fd;
575 FILE *fp;
576 struct pthread_poll_data *poll_data;
577 spinlock_t *spinlock;
35 */
36
37#ifndef _PTHREAD_PRIVATE_H
38#define _PTHREAD_PRIVATE_H
39
40/*
41 * Evaluate the storage class specifier.
42 */
43#ifdef GLOBAL_PTHREAD_PRIVATE
44#define SCLASS
45#else
46#define SCLASS extern
47#endif
48
49/*
50 * Include files.
51 */
52#include <setjmp.h>
53#include <signal.h>
54#include <stdio.h>
55#include <sys/queue.h>
56#include <sys/types.h>
57#include <sys/time.h>
58#include <sys/cdefs.h>
59#include <sched.h>
60#include <spinlock.h>
61#include <pthread_np.h>
62
63/*
64 * Define machine dependent macros to get and set the stack pointer
65 * from the supported contexts. Also define a macro to set the return
66 * address in a jmp_buf context.
67 *
68 * XXX - These need to be moved into architecture dependent support files.
69 */
70#if defined(__i386__)
71#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[2]))
72#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[2]))
73#define GET_STACK_UC(ucp) ((unsigned long)((ucp)->uc_mcontext.mc_esp))
74#define SET_STACK_JB(jb, stk) (jb)[0]._jb[2] = (int)(stk)
75#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[2] = (int)(stk)
76#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_esp = (int)(stk)
77#define FP_SAVE_UC(ucp) do { \
78 char *fdata; \
79 fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
80 __asm__("fnsave %0": :"m"(*fdata)); \
81} while (0)
82#define FP_RESTORE_UC(ucp) do { \
83 char *fdata; \
84 fdata = (char *) (ucp)->uc_mcontext.mc_fpregs; \
85 __asm__("frstor %0": :"m"(*fdata)); \
86} while (0)
87#define SET_RETURN_ADDR_JB(jb, ra) (jb)[0]._jb[0] = (int)(ra)
88#elif defined(__alpha__)
89#include <machine/reg.h>
90#define GET_STACK_JB(jb) ((unsigned long)((jb)[0]._jb[R_SP + 4]))
91#define GET_STACK_SJB(sjb) ((unsigned long)((sjb)[0]._sjb[R_SP + 4]))
92#define GET_STACK_UC(ucp) ((ucp)->uc_mcontext.mc_regs[R_SP])
93#define SET_STACK_JB(jb, stk) (jb)[0]._jb[R_SP + 4] = (long)(stk)
94#define SET_STACK_SJB(sjb, stk) (sjb)[0]._sjb[R_SP + 4] = (long)(stk)
95#define SET_STACK_UC(ucp, stk) (ucp)->uc_mcontext.mc_regs[R_SP] = (unsigned long)(stk)
96#define FP_SAVE_UC(ucp)
97#define FP_RESTORE_UC(ucp)
98#define SET_RETURN_ADDR_JB(jb, ra) do { \
99 (jb)[0]._jb[2] = (unsigned long)(ra) + 8UL; \
100 (jb)[0]._jb[R_RA + 4] = 0; \
101 (jb)[0]._jb[R_T12 + 4] = (long)(ra); \
102} while (0)
103#else
104#error "Don't recognize this architecture!"
105#endif
106
107/*
108 * Kernel fatal error handler macro.
109 */
110#define PANIC(string) _thread_exit(__FILE__,__LINE__,string)
111
112
113/* Output debug messages like this: */
114#define stdout_debug(args...) do { \
115 char buf[128]; \
116 snprintf(buf, sizeof(buf), ##args); \
117 __sys_write(1, buf, strlen(buf)); \
118} while (0)
119#define stderr_debug(args...) do { \
120 char buf[128]; \
121 snprintf(buf, sizeof(buf), ##args); \
122 __sys_write(2, buf, strlen(buf)); \
123} while (0)
124
125
126
127/*
128 * Priority queue manipulation macros (using pqe link):
129 */
130#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd)
131#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd)
132#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd)
133#define PTHREAD_PRIOQ_FIRST() _pq_first(&_readyq)
134
135/*
136 * Waiting queue manipulation macros (using pqe link):
137 */
138#define PTHREAD_WAITQ_REMOVE(thrd) _waitq_remove(thrd)
139#define PTHREAD_WAITQ_INSERT(thrd) _waitq_insert(thrd)
140
141#if defined(_PTHREADS_INVARIANTS)
142#define PTHREAD_WAITQ_CLEARACTIVE() _waitq_clearactive()
143#define PTHREAD_WAITQ_SETACTIVE() _waitq_setactive()
144#else
145#define PTHREAD_WAITQ_CLEARACTIVE()
146#define PTHREAD_WAITQ_SETACTIVE()
147#endif
148
149/*
150 * Work queue manipulation macros (using qe link):
151 */
152#define PTHREAD_WORKQ_INSERT(thrd) do { \
153 TAILQ_INSERT_TAIL(&_workq,thrd,qe); \
154 (thrd)->flags |= PTHREAD_FLAGS_IN_WORKQ; \
155} while (0)
156#define PTHREAD_WORKQ_REMOVE(thrd) do { \
157 TAILQ_REMOVE(&_workq,thrd,qe); \
158 (thrd)->flags &= ~PTHREAD_FLAGS_IN_WORKQ; \
159} while (0)
160
161
162/*
163 * State change macro without scheduling queue change:
164 */
165#define PTHREAD_SET_STATE(thrd, newstate) do { \
166 (thrd)->state = newstate; \
167 (thrd)->fname = __FILE__; \
168 (thrd)->lineno = __LINE__; \
169} while (0)
170
171/*
172 * State change macro with scheduling queue change - This must be
173 * called with preemption deferred (see thread_kern_sched_[un]defer).
174 */
175#if defined(_PTHREADS_INVARIANTS)
176#include <assert.h>
177#define PTHREAD_ASSERT(cond, msg) do { \
178 if (!(cond)) \
179 PANIC(msg); \
180} while (0)
181#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd) \
182 PTHREAD_ASSERT((((thrd)->flags & PTHREAD_FLAGS_IN_SYNCQ) == 0), \
183 "Illegal call from signal handler");
184#define PTHREAD_NEW_STATE(thrd, newstate) do { \
185 if (_thread_kern_new_state != 0) \
186 PANIC("Recursive PTHREAD_NEW_STATE"); \
187 _thread_kern_new_state = 1; \
188 if ((thrd)->state != newstate) { \
189 if ((thrd)->state == PS_RUNNING) { \
190 PTHREAD_PRIOQ_REMOVE(thrd); \
191 PTHREAD_WAITQ_INSERT(thrd); \
192 } else if (newstate == PS_RUNNING) { \
193 PTHREAD_WAITQ_REMOVE(thrd); \
194 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
195 } \
196 } \
197 _thread_kern_new_state = 0; \
198 PTHREAD_SET_STATE(thrd, newstate); \
199} while (0)
200#else
201#define PTHREAD_ASSERT(cond, msg)
202#define PTHREAD_ASSERT_NOT_IN_SYNCQ(thrd)
203#define PTHREAD_NEW_STATE(thrd, newstate) do { \
204 if ((thrd)->state != newstate) { \
205 if ((thrd)->state == PS_RUNNING) { \
206 PTHREAD_PRIOQ_REMOVE(thrd); \
207 PTHREAD_WAITQ_INSERT(thrd); \
208 } else if (newstate == PS_RUNNING) { \
209 PTHREAD_WAITQ_REMOVE(thrd); \
210 PTHREAD_PRIOQ_INSERT_TAIL(thrd); \
211 } \
212 } \
213 PTHREAD_SET_STATE(thrd, newstate); \
214} while (0)
215#endif
216
217/*
218 * Define the signals to be used for scheduling.
219 */
220#if defined(_PTHREADS_COMPAT_SCHED)
221#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL
222#define _SCHED_SIGNAL SIGVTALRM
223#else
224#define _ITIMER_SCHED_TIMER ITIMER_PROF
225#define _SCHED_SIGNAL SIGPROF
226#endif
227
228/*
229 * Priority queues.
230 *
231 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
232 */
233typedef struct pq_list {
234 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
235 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
236 int pl_prio; /* the priority of this list */
237 int pl_queued; /* is this in the priority queue */
238} pq_list_t;
239
240typedef struct pq_queue {
241 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
242 pq_list_t *pq_lists; /* array of all priority lists */
243 int pq_size; /* number of priority lists */
244} pq_queue_t;
245
246
247/*
248 * TailQ initialization values.
249 */
250#define TAILQ_INITIALIZER { NULL, NULL }
251
252/*
253 * Mutex definitions.
254 */
255union pthread_mutex_data {
256 void *m_ptr;
257 int m_count;
258};
259
260struct pthread_mutex {
261 enum pthread_mutextype m_type;
262 int m_protocol;
263 TAILQ_HEAD(mutex_head, pthread) m_queue;
264 struct pthread *m_owner;
265 union pthread_mutex_data m_data;
266 long m_flags;
267 int m_refcount;
268
269 /*
270 * Used for priority inheritence and protection.
271 *
272 * m_prio - For priority inheritence, the highest active
273 * priority (threads locking the mutex inherit
274 * this priority). For priority protection, the
275 * ceiling priority of this mutex.
276 * m_saved_prio - mutex owners inherited priority before
277 * taking the mutex, restored when the owner
278 * unlocks the mutex.
279 */
280 int m_prio;
281 int m_saved_prio;
282
283 /*
284 * Link for list of all mutexes a thread currently owns.
285 */
286 TAILQ_ENTRY(pthread_mutex) m_qe;
287
288 /*
289 * Lock for accesses to this structure.
290 */
291 spinlock_t lock;
292};
293
294/*
295 * Flags for mutexes.
296 */
297#define MUTEX_FLAGS_PRIVATE 0x01
298#define MUTEX_FLAGS_INITED 0x02
299#define MUTEX_FLAGS_BUSY 0x04
300
301/*
302 * Static mutex initialization values.
303 */
304#define PTHREAD_MUTEX_STATIC_INITIALIZER \
305 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
306 NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER, \
307 _SPINLOCK_INITIALIZER }
308
309struct pthread_mutex_attr {
310 enum pthread_mutextype m_type;
311 int m_protocol;
312 int m_ceiling;
313 long m_flags;
314};
315
316#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
317 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
318
319/*
320 * Condition variable definitions.
321 */
322enum pthread_cond_type {
323 COND_TYPE_FAST,
324 COND_TYPE_MAX
325};
326
327struct pthread_cond {
328 enum pthread_cond_type c_type;
329 TAILQ_HEAD(cond_head, pthread) c_queue;
330 pthread_mutex_t c_mutex;
331 void *c_data;
332 long c_flags;
333 int c_seqno;
334
335 /*
336 * Lock for accesses to this structure.
337 */
338 spinlock_t lock;
339};
340
341struct pthread_cond_attr {
342 enum pthread_cond_type c_type;
343 long c_flags;
344};
345
346/*
347 * Flags for condition variables.
348 */
349#define COND_FLAGS_PRIVATE 0x01
350#define COND_FLAGS_INITED 0x02
351#define COND_FLAGS_BUSY 0x04
352
353/*
354 * Static cond initialization values.
355 */
356#define PTHREAD_COND_STATIC_INITIALIZER \
357 { COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
358 0, 0, _SPINLOCK_INITIALIZER }
359
360/*
361 * Semaphore definitions.
362 */
363struct sem {
364#define SEM_MAGIC ((u_int32_t) 0x09fa4012)
365 u_int32_t magic;
366 pthread_mutex_t lock;
367 pthread_cond_t gtzero;
368 u_int32_t count;
369 u_int32_t nwaiters;
370};
371
372/*
373 * Cleanup definitions.
374 */
375struct pthread_cleanup {
376 struct pthread_cleanup *next;
377 void (*routine) ();
378 void *routine_arg;
379};
380
381struct pthread_attr {
382 int sched_policy;
383 int sched_inherit;
384 int sched_interval;
385 int prio;
386 int suspend;
387 int flags;
388 void *arg_attr;
389 void (*cleanup_attr) ();
390 void *stackaddr_attr;
391 size_t stacksize_attr;
392 size_t guardsize_attr;
393};
394
395/*
396 * Thread creation state attributes.
397 */
398#define PTHREAD_CREATE_RUNNING 0
399#define PTHREAD_CREATE_SUSPENDED 1
400
401/*
402 * Additional state for a thread suspended with pthread_suspend_np().
403 */
404enum pthread_susp {
405 SUSP_NO, /* Not suspended. */
406 SUSP_YES, /* Suspended. */
407 SUSP_JOIN, /* Suspended, joining. */
408 SUSP_NOWAIT, /* Suspended, was in a mutex or condition queue. */
409 SUSP_MUTEX_WAIT,/* Suspended, still in a mutex queue. */
410 SUSP_COND_WAIT /* Suspended, still in a condition queue. */
411};
412
413/*
414 * Miscellaneous definitions.
415 */
416#define PTHREAD_STACK_DEFAULT 65536
417/*
418 * Size of default red zone at the end of each stack. In actuality, this "red
419 * zone" is merely an unmapped region, except in the case of the initial stack.
420 * Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
421 * region, an unmapped gap between thread stacks achieves the same effect as
422 * explicitly mapped red zones.
423 */
424#define PTHREAD_GUARD_DEFAULT PAGE_SIZE
425
426/*
427 * Maximum size of initial thread's stack. This perhaps deserves to be larger
428 * than the stacks of other threads, since many applications are likely to run
429 * almost entirely on this stack.
430 */
431#define PTHREAD_STACK_INITIAL 0x100000
432
433/* Size of the scheduler stack: */
434#define SCHED_STACK_SIZE PAGE_SIZE
435
436/*
437 * Define the different priority ranges. All applications have thread
438 * priorities constrained within 0-31. The threads library raises the
439 * priority when delivering signals in order to ensure that signal
440 * delivery happens (from the POSIX spec) "as soon as possible".
441 * In the future, the threads library will also be able to map specific
442 * threads into real-time (cooperating) processes or kernel threads.
443 * The RT and SIGNAL priorities will be used internally and added to
444 * thread base priorities so that the scheduling queue can handle both
445 * normal and RT priority threads with and without signal handling.
446 *
447 * The approach taken is that, within each class, signal delivery
448 * always has priority over thread execution.
449 */
450#define PTHREAD_DEFAULT_PRIORITY 15
451#define PTHREAD_MIN_PRIORITY 0
452#define PTHREAD_MAX_PRIORITY 31 /* 0x1F */
453#define PTHREAD_SIGNAL_PRIORITY 32 /* 0x20 */
454#define PTHREAD_RT_PRIORITY 64 /* 0x40 */
455#define PTHREAD_FIRST_PRIORITY PTHREAD_MIN_PRIORITY
456#define PTHREAD_LAST_PRIORITY \
457 (PTHREAD_MAX_PRIORITY + PTHREAD_SIGNAL_PRIORITY + PTHREAD_RT_PRIORITY)
458#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
459
460/*
461 * Clock resolution in microseconds.
462 */
463#define CLOCK_RES_USEC 10000
464#define CLOCK_RES_USEC_MIN 1000
465
466/*
467 * Time slice period in microseconds.
468 */
469#define TIMESLICE_USEC 20000
470
471/*
472 * Define a thread-safe macro to get the current time of day
473 * which is updated at regular intervals by the scheduling signal
474 * handler.
475 */
476#define GET_CURRENT_TOD(tv) \
477 do { \
478 tv.tv_sec = _sched_tod.tv_sec; \
479 tv.tv_usec = _sched_tod.tv_usec; \
480 } while (tv.tv_sec != _sched_tod.tv_sec)
481
482
483struct pthread_key {
484 spinlock_t lock;
485 volatile int allocated;
486 volatile int count;
487 void (*destructor) ();
488};
489
490struct pthread_rwlockattr {
491 int pshared;
492};
493
494struct pthread_rwlock {
495 pthread_mutex_t lock; /* monitor lock */
496 int state; /* 0 = idle >0 = # of readers -1 = writer */
497 pthread_cond_t read_signal;
498 pthread_cond_t write_signal;
499 int blocked_writers;
500};
501
502/*
503 * Thread states.
504 */
505enum pthread_state {
506 PS_RUNNING,
507 PS_SIGTHREAD,
508 PS_MUTEX_WAIT,
509 PS_COND_WAIT,
510 PS_FDLR_WAIT,
511 PS_FDLW_WAIT,
512 PS_FDR_WAIT,
513 PS_FDW_WAIT,
514 PS_FILE_WAIT,
515 PS_POLL_WAIT,
516 PS_SELECT_WAIT,
517 PS_SLEEP_WAIT,
518 PS_WAIT_WAIT,
519 PS_SIGSUSPEND,
520 PS_SIGWAIT,
521 PS_SPINBLOCK,
522 PS_JOIN,
523 PS_SUSPENDED,
524 PS_DEAD,
525 PS_DEADLOCK,
526 PS_STATE_MAX
527};
528
529
530/*
531 * File descriptor locking definitions.
532 */
533#define FD_READ 0x1
534#define FD_WRITE 0x2
535#define FD_RDWR (FD_READ | FD_WRITE)
536
537/*
538 * File descriptor table structure.
539 */
540struct fd_table_entry {
541 /*
542 * Lock for accesses to this file descriptor table
543 * entry. This is passed to _spinlock() to provide atomic
544 * access to this structure. It does *not* represent the
545 * state of the lock on the file descriptor.
546 */
547 spinlock_t lock;
548 TAILQ_HEAD(, pthread) r_queue; /* Read queue. */
549 TAILQ_HEAD(, pthread) w_queue; /* Write queue. */
550 struct pthread *r_owner; /* Ptr to thread owning read lock. */
551 struct pthread *w_owner; /* Ptr to thread owning write lock. */
552 char *r_fname; /* Ptr to read lock source file name */
553 int r_lineno; /* Read lock source line number. */
554 char *w_fname; /* Ptr to write lock source file name */
555 int w_lineno; /* Write lock source line number. */
556 int r_lockcount; /* Count for FILE read locks. */
557 int w_lockcount; /* Count for FILE write locks. */
558 int flags; /* Flags used in open. */
559};
560
561struct pthread_poll_data {
562 int nfds;
563 struct pollfd *fds;
564};
565
566union pthread_wait_data {
567 pthread_mutex_t mutex;
568 pthread_cond_t cond;
569 const sigset_t *sigwait; /* Waiting on a signal in sigwait */
570 struct {
571 short fd; /* Used when thread waiting on fd */
572 short branch; /* Line number, for debugging. */
573 char *fname; /* Source file name for debugging.*/
574 } fd;
575 FILE *fp;
576 struct pthread_poll_data *poll_data;
577 spinlock_t *spinlock;
578 struct pthread *thread;
578};
579
580/*
581 * Define a continuation routine that can be used to perform a
582 * transfer of control:
583 */
584typedef void (*thread_continuation_t) (void *);
585
586struct pthread_signal_frame;
587
588struct pthread_state_data {
589 struct pthread_signal_frame *psd_curframe;
590 sigset_t psd_sigmask;
591 struct timespec psd_wakeup_time;
592 union pthread_wait_data psd_wait_data;
593 enum pthread_state psd_state;
594 int psd_flags;
595 int psd_interrupted;
596 int psd_longjmp_val;
597 int psd_sigmask_seqno;
598 int psd_signo;
599 int psd_sig_defer_count;
600 /* XXX - What about thread->timeout and/or thread->error? */
601};
602
603
604/*
605 * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(),
606 * but they may also be sigjmp_buf and ucontext_t. When a thread is
607 * interrupted by a signal, it's context is saved as a ucontext_t. An
608 * application is also free to use [_]longjmp()/[_]siglongjmp() to jump
609 * between contexts within the same thread. Future support will also
610 * include setcontext()/getcontext().
611 *
612 * Define an enumerated type that can identify the 4 different context
613 * types.
614 */
615typedef enum {
616 CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */
617 CTX_JB, /* context is jmp_buf (with saved sigset) */
618 CTX_SJB, /* context is sigjmp_buf (with saved sigset) */
619 CTX_UC /* context is ucontext_t (with saved sigset) */
620} thread_context_t;
621
622/*
623 * There are 2 basic contexts that a frame may contain at any
624 * one time:
625 *
626 * o ctx - The context that the thread should return to after normal
627 * completion of the signal handler.
628 * o sig_jb - The context just before the signal handler is invoked.
629 * Attempts at abnormal returns from user supplied signal handlers
630 * will return back to the signal context to perform any necessary
631 * cleanup.
632 */
633struct pthread_signal_frame {
634 /*
635 * This stores the threads state before the signal.
636 */
637 struct pthread_state_data saved_state;
638
639 /*
640 * Threads return context; ctxtype identifies the type of context.
641 * For signal frame 0, these point to the context storage area
642 * within the pthread structure. When handling signals (frame > 0),
643 * these point to a context storage area that is allocated off the
644 * threads stack.
645 */
646 union {
647 jmp_buf jb;
648 sigjmp_buf sigjb;
649 ucontext_t uc;
650 } ctx;
651 thread_context_t ctxtype;
652 int longjmp_val;
653 int signo; /* signal, arg 1 to sighandler */
654 int sig_has_args; /* use signal args if true */
655 ucontext_t uc;
656 siginfo_t siginfo;
657};
658
659/*
660 * Thread structure.
661 */
662struct pthread {
663 /*
664 * Magic value to help recognize a valid thread structure
665 * from an invalid one:
666 */
667#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
668 u_int32_t magic;
669 char *name;
670 u_int64_t uniqueid; /* for gdb */
671
672 /*
673 * Lock for accesses to this thread structure.
674 */
675 spinlock_t lock;
676
677 /* Queue entry for list of all threads: */
678 TAILQ_ENTRY(pthread) tle;
679
680 /* Queue entry for list of dead threads: */
681 TAILQ_ENTRY(pthread) dle;
682
683 /*
684 * Thread start routine, argument, stack pointer and thread
685 * attributes.
686 */
687 void *(*start_routine)(void *);
688 void *arg;
689 void *stack;
690 struct pthread_attr attr;
691
692 /*
693 * Threads return context; ctxtype identifies the type of context.
694 */
695 union {
696 jmp_buf jb;
697 sigjmp_buf sigjb;
698 ucontext_t uc;
699 } ctx;
700 thread_context_t ctxtype;
701 int longjmp_val;
702
703 /*
704 * Used for tracking delivery of signal handlers.
705 */
706 struct pthread_signal_frame *curframe;
707
708 /*
709 * Cancelability flags - the lower 2 bits are used by cancel
710 * definitions in pthread.h
711 */
712#define PTHREAD_AT_CANCEL_POINT 0x0004
713#define PTHREAD_CANCELLING 0x0008
714#define PTHREAD_CANCEL_NEEDED 0x0010
715 int cancelflags;
716
717 enum pthread_susp suspended;
718
719 thread_continuation_t continuation;
720
721 /*
722 * Current signal mask and pending signals.
723 */
724 sigset_t sigmask;
725 sigset_t sigpend;
726 int sigmask_seqno;
727 int check_pending;
728
729 /* Thread state: */
730 enum pthread_state state;
731
732 /* Scheduling clock when this thread was last made active. */
733 long last_active;
734
735 /* Scheduling clock when this thread was last made inactive. */
736 long last_inactive;
737
738 /*
739 * Number of microseconds accumulated by this thread when
740 * time slicing is active.
741 */
742 long slice_usec;
743
744 /*
745 * Time to wake up thread. This is used for sleeping threads and
746 * for any operation which may time out (such as select).
747 */
748 struct timespec wakeup_time;
749
750 /* TRUE if operation has timed out. */
751 int timeout;
752
753 /*
754 * Error variable used instead of errno. The function __error()
755 * returns a pointer to this.
756 */
757 int error;
758
759 /* Pointer to a thread that is waiting to join (NULL if no joiner). */
760 struct pthread *joiner;
761
762 /*
763 * The current thread can belong to only one scheduling queue at
764 * a time (ready or waiting queue). It can also belong to:
765 *
766 * o A queue of threads waiting for a mutex
767 * o A queue of threads waiting for a condition variable
768 * o A queue of threads waiting for a file descriptor lock
769 * o A queue of threads needing work done by the kernel thread
770 * (waiting for a spinlock or file I/O)
771 *
772 * A thread can also be joining a thread (the joiner field above).
773 *
774 * It must not be possible for a thread to belong to any of the
775 * above queues while it is handling a signal. Signal handlers
776 * may longjmp back to previous stack frames circumventing normal
777 * control flow. This could corrupt queue integrity if the thread
778 * retains membership in the queue. Therefore, if a thread is a
779 * member of one of these queues when a signal handler is invoked,
780 * it must remove itself from the queue before calling the signal
781 * handler and reinsert itself after normal return of the handler.
782 *
783 * Use pqe for the scheduling queue link (both ready and waiting),
784 * sqe for synchronization (mutex and condition variable) queue
785 * links, and qe for all other links.
786 */
787 TAILQ_ENTRY(pthread) pqe; /* priority queue link */
788 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
789 TAILQ_ENTRY(pthread) qe; /* all other queues link */
790
791 /* Wait data. */
792 union pthread_wait_data data;
793
794 /*
795 * Allocated for converting select into poll.
796 */
797 struct pthread_poll_data poll_data;
798
799 /*
800 * Set to TRUE if a blocking operation was
801 * interrupted by a signal:
802 */
803 int interrupted;
804
805 /* Signal number when in state PS_SIGWAIT: */
806 int signo;
807
808 /*
809 * Set to non-zero when this thread has deferred signals.
810 * We allow for recursive deferral.
811 */
812 int sig_defer_count;
813
814 /*
815 * Set to TRUE if this thread should yield after undeferring
816 * signals.
817 */
818 int yield_on_sig_undefer;
819
820 /* Miscellaneous flags; only set with signals deferred. */
821 int flags;
822#define PTHREAD_FLAGS_PRIVATE 0x0001
823#define PTHREAD_EXITING 0x0002
824#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
825#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
826#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
827#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
828#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
829#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
830#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
831#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
832#define PTHREAD_FLAGS_IN_SYNCQ \
833 (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
834
835 /*
836 * Base priority is the user setable and retrievable priority
837 * of the thread. It is only affected by explicit calls to
838 * set thread priority and upon thread creation via a thread
839 * attribute or default priority.
840 */
841 char base_priority;
842
843 /*
844 * Inherited priority is the priority a thread inherits by
845 * taking a priority inheritence or protection mutex. It
846 * is not affected by base priority changes. Inherited
847 * priority defaults to and remains 0 until a mutex is taken
848 * that is being waited on by any other thread whose priority
849 * is non-zero.
850 */
851 char inherited_priority;
852
853 /*
854 * Active priority is always the maximum of the threads base
855 * priority and inherited priority. When there is a change
856 * in either the base or inherited priority, the active
857 * priority must be recalculated.
858 */
859 char active_priority;
860
861 /* Number of priority ceiling or protection mutexes owned. */
862 int priority_mutex_count;
863
864 /*
865 * Queue of currently owned mutexes.
866 */
867 TAILQ_HEAD(, pthread_mutex) mutexq;
868
869 void *ret;
870 const void **specific_data;
871 int specific_data_count;
872
873 /* Cleanup handlers Link List */
874 struct pthread_cleanup *cleanup;
875 char *fname; /* Ptr to source file name */
876 int lineno; /* Source line number. */
877};
878
879/*
880 * Global variables for the uthread kernel.
881 */
882
883/* Kernel thread structure used when there are no running threads: */
884SCLASS struct pthread _thread_kern_thread;
885
886/* Ptr to the thread structure for the running thread: */
887SCLASS struct pthread * volatile _thread_run
888#ifdef GLOBAL_PTHREAD_PRIVATE
889= &_thread_kern_thread;
890#else
891;
892#endif
893
894/* Ptr to the thread structure for the last user thread to run: */
895SCLASS struct pthread * volatile _last_user_thread
896#ifdef GLOBAL_PTHREAD_PRIVATE
897= &_thread_kern_thread;
898#else
899;
900#endif
901
902/*
903 * Ptr to the thread running in single-threaded mode or NULL if
904 * running multi-threaded (default POSIX behaviour).
905 */
906SCLASS struct pthread * volatile _thread_single
907#ifdef GLOBAL_PTHREAD_PRIVATE
908= NULL;
909#else
910;
911#endif
912
913/* List of all threads: */
914SCLASS TAILQ_HEAD(, pthread) _thread_list
915#ifdef GLOBAL_PTHREAD_PRIVATE
916= TAILQ_HEAD_INITIALIZER(_thread_list);
917#else
918;
919#endif
920
921/*
922 * Array of kernel pipe file descriptors that are used to ensure that
923 * no signals are missed in calls to _select.
924 */
925SCLASS int _thread_kern_pipe[2]
926#ifdef GLOBAL_PTHREAD_PRIVATE
927= {
928 -1,
929 -1
930};
931#else
932;
933#endif
934SCLASS int volatile _queue_signals
935#ifdef GLOBAL_PTHREAD_PRIVATE
936= 0;
937#else
938;
939#endif
940SCLASS int _thread_kern_in_sched
941#ifdef GLOBAL_PTHREAD_PRIVATE
942= 0;
943#else
944;
945#endif
946
947SCLASS int _sig_in_handler
948#ifdef GLOBAL_PTHREAD_PRIVATE
949= 0;
950#else
951;
952#endif
953
954/* Time of day at last scheduling timer signal: */
955SCLASS struct timeval volatile _sched_tod
956#ifdef GLOBAL_PTHREAD_PRIVATE
957= { 0, 0 };
958#else
959;
960#endif
961
962/*
963 * Current scheduling timer ticks; used as resource usage.
964 */
965SCLASS unsigned int volatile _sched_ticks
966#ifdef GLOBAL_PTHREAD_PRIVATE
967= 0;
968#else
969;
970#endif
971
972/* Dead threads: */
973SCLASS TAILQ_HEAD(, pthread) _dead_list
974#ifdef GLOBAL_PTHREAD_PRIVATE
975= TAILQ_HEAD_INITIALIZER(_dead_list);
976#else
977;
978#endif
979
980/* Initial thread: */
981SCLASS struct pthread *_thread_initial
982#ifdef GLOBAL_PTHREAD_PRIVATE
983= NULL;
984#else
985;
986#endif
987
988/* Default thread attributes: */
989SCLASS struct pthread_attr pthread_attr_default
990#ifdef GLOBAL_PTHREAD_PRIVATE
991= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
992 PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
993 PTHREAD_STACK_DEFAULT, PTHREAD_GUARD_DEFAULT };
994#else
995;
996#endif
997
998/* Default mutex attributes: */
999SCLASS struct pthread_mutex_attr pthread_mutexattr_default
1000#ifdef GLOBAL_PTHREAD_PRIVATE
1001= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
1002#else
1003;
1004#endif
1005
1006/* Default condition variable attributes: */
1007SCLASS struct pthread_cond_attr pthread_condattr_default
1008#ifdef GLOBAL_PTHREAD_PRIVATE
1009= { COND_TYPE_FAST, 0 };
1010#else
1011;
1012#endif
1013
1014/*
1015 * Standard I/O file descriptors need special flag treatment since
1016 * setting one to non-blocking does all on *BSD. Sigh. This array
1017 * is used to store the initial flag settings.
1018 */
1019SCLASS int _pthread_stdio_flags[3];
1020
1021/* File table information: */
1022SCLASS struct fd_table_entry **_thread_fd_table
1023#ifdef GLOBAL_PTHREAD_PRIVATE
1024= NULL;
1025#else
1026;
1027#endif
1028
1029/* Table for polling file descriptors: */
1030SCLASS struct pollfd *_thread_pfd_table
1031#ifdef GLOBAL_PTHREAD_PRIVATE
1032= NULL;
1033#else
1034;
1035#endif
1036
1037SCLASS const int dtablecount
1038#ifdef GLOBAL_PTHREAD_PRIVATE
1039= 4096/sizeof(struct fd_table_entry);
1040#else
1041;
1042#endif
1043SCLASS int _thread_dtablesize /* Descriptor table size. */
1044#ifdef GLOBAL_PTHREAD_PRIVATE
1045= 0;
1046#else
1047;
1048#endif
1049
1050SCLASS int _clock_res_usec /* Clock resolution in usec. */
1051#ifdef GLOBAL_PTHREAD_PRIVATE
1052= CLOCK_RES_USEC;
1053#else
1054;
1055#endif
1056
1057/* Garbage collector mutex and condition variable. */
1058SCLASS pthread_mutex_t _gc_mutex
1059#ifdef GLOBAL_PTHREAD_PRIVATE
1060= NULL
1061#endif
1062;
1063SCLASS pthread_cond_t _gc_cond
1064#ifdef GLOBAL_PTHREAD_PRIVATE
1065= NULL
1066#endif
1067;
1068
1069/*
1070 * Array of signal actions for this process.
1071 */
1072SCLASS struct sigaction _thread_sigact[NSIG];
1073
1074/*
1075 * Array of counts of dummy handlers for SIG_DFL signals. This is used to
1076 * assure that there is always a dummy signal handler installed while there is a
1077 * thread sigwait()ing on the corresponding signal.
1078 */
1079SCLASS int _thread_dfl_count[NSIG];
1080
1081/*
1082 * Pending signals and mask for this process:
1083 */
1084SCLASS sigset_t _process_sigpending;
1085SCLASS sigset_t _process_sigmask
1086#ifdef GLOBAL_PTHREAD_PRIVATE
1087= { {0, 0, 0, 0} }
1088#endif
1089;
1090
1091/*
1092 * Scheduling queues:
1093 */
1094SCLASS pq_queue_t _readyq;
1095SCLASS TAILQ_HEAD(, pthread) _waitingq;
1096
1097/*
1098 * Work queue:
1099 */
1100SCLASS TAILQ_HEAD(, pthread) _workq;
1101
1102/* Tracks the number of threads blocked while waiting for a spinlock. */
1103SCLASS volatile int _spinblock_count
1104#ifdef GLOBAL_PTHREAD_PRIVATE
1105= 0
1106#endif
1107;
1108
1109/* Used to maintain pending and active signals: */
1110struct sigstatus {
1111 int pending; /* Is this a pending signal? */
1112 int blocked; /*
1113 * A handler is currently active for
1114 * this signal; ignore subsequent
1115 * signals until the handler is done.
1116 */
1117 int signo; /* arg 1 to signal handler */
1118 siginfo_t siginfo; /* arg 2 to signal handler */
1119 ucontext_t uc; /* arg 3 to signal handler */
1120};
1121
1122SCLASS struct sigstatus _thread_sigq[NSIG];
1123
1124/* Indicates that the signal queue needs to be checked. */
1125SCLASS volatile int _sigq_check_reqd
1126#ifdef GLOBAL_PTHREAD_PRIVATE
1127= 0
1128#endif
1129;
1130
1131/* The signal stack. */
1132SCLASS struct sigaltstack _thread_sigstack;
1133
1134/* Thread switch hook. */
1135SCLASS pthread_switch_routine_t _sched_switch_hook
1136#ifdef GLOBAL_PTHREAD_PRIVATE
1137= NULL
1138#endif
1139;
1140
1141/*
1142 * Declare the kernel scheduler jump buffer and stack:
1143 */
1144SCLASS jmp_buf _thread_kern_sched_jb;
1145
1146SCLASS void * _thread_kern_sched_stack
1147#ifdef GLOBAL_PTHREAD_PRIVATE
1148= NULL
1149#endif
1150;
1151
1152
1153/* Used for _PTHREADS_INVARIANTS checking. */
1154SCLASS int _thread_kern_new_state
1155#ifdef GLOBAL_PTHREAD_PRIVATE
1156= 0
1157#endif
1158;
1159
1160/* Undefine the storage class specifier: */
1161#undef SCLASS
1162
1163#ifdef _LOCK_DEBUG
1164#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
1165 _ts, __FILE__, __LINE__)
1166#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
1167 __FILE__, __LINE__)
1168#else
1169#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
1170#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
1171#endif
1172
1173/*
1174 * Function prototype definitions.
1175 */
1176__BEGIN_DECLS
1177char *__ttyname_basic(int);
1178char *__ttyname_r_basic(int, char *, size_t);
1179char *ttyname_r(int, char *, size_t);
1180void _cond_wait_backout(pthread_t);
1181void _fd_lock_backout(pthread_t);
1182int _find_thread(pthread_t);
1183struct pthread *_get_curthread(void);
1184void _set_curthread(struct pthread *);
1185void *_thread_stack_alloc(size_t, size_t);
1186void _thread_stack_free(void *, size_t, size_t);
1187int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
1188int _thread_fd_lock(int, int, struct timespec *);
1189int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
1190int _mutex_cv_lock(pthread_mutex_t *);
1191int _mutex_cv_unlock(pthread_mutex_t *);
1192void _mutex_lock_backout(pthread_t);
1193void _mutex_notify_priochange(pthread_t);
1194int _mutex_reinit(pthread_mutex_t *);
1195void _mutex_unlock_private(pthread_t);
1196int _cond_reinit(pthread_cond_t *);
1197int _pq_alloc(struct pq_queue *, int, int);
1198int _pq_init(struct pq_queue *);
1199void _pq_remove(struct pq_queue *pq, struct pthread *);
1200void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1201void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1202struct pthread *_pq_first(struct pq_queue *pq);
1203void *_pthread_getspecific(pthread_key_t);
1204int _pthread_key_create(pthread_key_t *, void (*) (void *));
1205int _pthread_key_delete(pthread_key_t);
1206int _pthread_mutex_destroy(pthread_mutex_t *);
1207int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1208int _pthread_mutex_lock(pthread_mutex_t *);
1209int _pthread_mutex_trylock(pthread_mutex_t *);
1210int _pthread_mutex_unlock(pthread_mutex_t *);
1211int _pthread_mutexattr_init(pthread_mutexattr_t *);
1212int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1213int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1214int _pthread_once(pthread_once_t *, void (*) (void));
1215pthread_t _pthread_self(void);
1216int _pthread_setspecific(pthread_key_t, const void *);
1217void _waitq_insert(pthread_t pthread);
1218void _waitq_remove(pthread_t pthread);
1219#if defined(_PTHREADS_INVARIANTS)
1220void _waitq_setactive(void);
1221void _waitq_clearactive(void);
1222#endif
1223void _thread_exit(char *, int, char *);
1224void _thread_exit_cleanup(void);
1225void _thread_fd_unlock(int, int);
1226void _thread_fd_unlock_debug(int, int, char *, int);
1227void _thread_fd_unlock_owned(pthread_t);
1228void *_thread_cleanup(pthread_t);
1229void _thread_cleanupspecific(void);
1230void _thread_dump_info(void);
1231void _thread_init(void);
1232void _thread_kern_sched(ucontext_t *);
1233void _thread_kern_scheduler(void);
1234void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
1235void _thread_kern_sched_sig(void);
1236void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
1237void _thread_kern_sched_state_unlock(enum pthread_state state,
1238 spinlock_t *lock, char *fname, int lineno);
1239void _thread_kern_set_timeout(const struct timespec *);
1240void _thread_kern_sig_defer(void);
1241void _thread_kern_sig_undefer(void);
1242void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
1243void _thread_sig_check_pending(pthread_t pthread);
1244void _thread_sig_handle_pending(void);
1245void _thread_sig_send(pthread_t pthread, int sig);
1246void _thread_sig_wrapper(void);
1247void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
1248void _thread_start(void);
1249void _thread_seterrno(pthread_t, int);
1250int _thread_fd_table_init(int fd);
1251pthread_addr_t _thread_gc(pthread_addr_t);
1252void _thread_enter_cancellation_point(void);
1253void _thread_leave_cancellation_point(void);
1254void _thread_cancellation_point(void);
1255
1256/* #include <sys/aio.h> */
1257#ifdef _SYS_AIO_H_
1258int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1259#endif
1260
1261/* #include <signal.h> */
1262#ifdef _SIGNAL_H_
1263int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1264int __sys_sigpending(sigset_t *);
1265int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1266int __sys_sigsuspend(const sigset_t *);
1267int __sys_sigreturn(ucontext_t *);
1268int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1269#endif
1270
1271/* #include <sys/stat.h> */
1272#ifdef _SYS_STAT_H_
1273int __sys_fchmod(int, mode_t);
1274int __sys_fstat(int, struct stat *);
1275int __sys_fchflags(int, u_long);
1276#endif
1277
1278/* #include <sys/mount.h> */
1279#ifdef _SYS_MOUNT_H_
1280int __sys_fstatfs(int, struct statfs *);
1281#endif
1282
1283/* #inclde <sys/event.h> */
1284#ifdef _SYS_EVENT_H_
1285int __sys_kevent(int, const struct kevent *, int, struct kevent *,
1286 int, const struct timespec *);
1287#endif
1288
1289/* #include <sys/socket.h> */
1290#ifdef _SYS_SOCKET_H_
1291int __sys_accept(int, struct sockaddr *, int *);
1292int __sys_bind(int, const struct sockaddr *, int);
1293int __sys_connect(int, const struct sockaddr *, int);
1294int __sys_getpeername(int, struct sockaddr *, int *);
1295int __sys_getsockname(int, struct sockaddr *, int *);
1296int __sys_getsockopt(int, int, int, void *, int *);
1297int __sys_listen(int, int);
1298int __sys_setsockopt(int, int, int, const void *, int);
1299int __sys_shutdown(int, int);
1300int __sys_socket(int, int, int);
1301int __sys_socketpair(int, int, int, int *);
1302ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *);
1303ssize_t __sys_recvmsg(int, struct msghdr *, int);
1304ssize_t __sys_send(int, const void *, size_t, int);
1305int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
1306ssize_t __sys_sendmsg(int, const struct msghdr *, int);
1307ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int);
1308#endif
1309
1310/* #include <unistd.h> */
1311#ifdef _UNISTD_H_
1312int __sys_close(int);
1313int __sys_dup(int);
1314int __sys_dup2(int, int);
1315int __sys_execve(const char *, char * const *, char * const *);
1316int __sys_fchown(int, uid_t, gid_t);
1317int __sys_fork(void);
1318int __sys_fsync(int);
1319int __sys_pipe(int *);
1320int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1321long __sys_fpathconf(int, int);
1322ssize_t __sys_read(int, void *, size_t);
1323ssize_t __sys_write(int, const void *, size_t);
1324void __sys_exit(int);
1325#endif
1326
1327/* #include <fcntl.h> */
1328#ifdef _SYS_FCNTL_H_
1329int __sys_fcntl(int, int, ...);
1330int __sys_flock(int, int);
1331int __sys_open(const char *, int, ...);
1332#endif
1333
1334/* #include <sys/ioctl.h> */
1335#ifdef _SYS_IOCTL_H_
1336int __sys_ioctl(int, unsigned long, ...);
1337#endif
1338
1339/* #include <dirent.h> */
1340#ifdef _DIRENT_H_
1341int __sys_getdirentries(int, char *, int, long *);
1342#endif
1343
1344/* #include <sys/uio.h> */
1345#ifdef _SYS_UIO_H_
1346ssize_t __sys_readv(int, const struct iovec *, int);
1347ssize_t __sys_writev(int, const struct iovec *, int);
1348#endif
1349
1350/* #include <sys/wait.h> */
1351#ifdef WNOHANG
1352pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
1353#endif
1354
1355/* #include <poll.h> */
1356#ifdef _SYS_POLL_H_
1357int __sys_poll(struct pollfd *, unsigned, int);
1358#endif
1359
1360/* #include <sys/mman.h> */
1361#ifdef _SYS_MMAN_H_
1362int __sys_msync(void *, size_t, int);
1363#endif
1364
1365/* #include <setjmp.h> */
1366#ifdef _SETJMP_H_
1367extern void __siglongjmp(sigjmp_buf, int) __dead2;
1368extern void __longjmp(jmp_buf, int) __dead2;
1369extern void ___longjmp(jmp_buf, int) __dead2;
1370#endif
1371
1372/* #include <sys/capability.h> */
1373#ifdef _SYS_CAPABILITY_H
1374int __sys___cap_get_fd(int, struct cap *);
1375int __sys___cap_set_fd(int, struct cap *);
1376#endif
1377
1378/* #include <sys/acl.h> */
1379#ifdef _SYS_ACL_H
1380int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *);
1381int __sys___acl_delete_fd(int, acl_type_t);
1382int __sys___acl_get_fd(int, acl_type_t, struct acl *);
1383int __sys___acl_set_fd(int, acl_type_t, struct acl *);
1384#endif
1385__END_DECLS
1386
1387#endif /* !_PTHREAD_PRIVATE_H */
579};
580
581/*
582 * Define a continuation routine that can be used to perform a
583 * transfer of control:
584 */
585typedef void (*thread_continuation_t) (void *);
586
587struct pthread_signal_frame;
588
589struct pthread_state_data {
590 struct pthread_signal_frame *psd_curframe;
591 sigset_t psd_sigmask;
592 struct timespec psd_wakeup_time;
593 union pthread_wait_data psd_wait_data;
594 enum pthread_state psd_state;
595 int psd_flags;
596 int psd_interrupted;
597 int psd_longjmp_val;
598 int psd_sigmask_seqno;
599 int psd_signo;
600 int psd_sig_defer_count;
601 /* XXX - What about thread->timeout and/or thread->error? */
602};
603
604
605/*
606 * Normally thread contexts are stored as jmp_bufs via _setjmp()/_longjmp(),
607 * but they may also be sigjmp_buf and ucontext_t. When a thread is
608 * interrupted by a signal, it's context is saved as a ucontext_t. An
609 * application is also free to use [_]longjmp()/[_]siglongjmp() to jump
610 * between contexts within the same thread. Future support will also
611 * include setcontext()/getcontext().
612 *
613 * Define an enumerated type that can identify the 4 different context
614 * types.
615 */
616typedef enum {
617 CTX_JB_NOSIG, /* context is jmp_buf without saved sigset */
618 CTX_JB, /* context is jmp_buf (with saved sigset) */
619 CTX_SJB, /* context is sigjmp_buf (with saved sigset) */
620 CTX_UC /* context is ucontext_t (with saved sigset) */
621} thread_context_t;
622
623/*
624 * There are 2 basic contexts that a frame may contain at any
625 * one time:
626 *
627 * o ctx - The context that the thread should return to after normal
628 * completion of the signal handler.
629 * o sig_jb - The context just before the signal handler is invoked.
630 * Attempts at abnormal returns from user supplied signal handlers
631 * will return back to the signal context to perform any necessary
632 * cleanup.
633 */
634struct pthread_signal_frame {
635 /*
636 * This stores the threads state before the signal.
637 */
638 struct pthread_state_data saved_state;
639
640 /*
641 * Threads return context; ctxtype identifies the type of context.
642 * For signal frame 0, these point to the context storage area
643 * within the pthread structure. When handling signals (frame > 0),
644 * these point to a context storage area that is allocated off the
645 * threads stack.
646 */
647 union {
648 jmp_buf jb;
649 sigjmp_buf sigjb;
650 ucontext_t uc;
651 } ctx;
652 thread_context_t ctxtype;
653 int longjmp_val;
654 int signo; /* signal, arg 1 to sighandler */
655 int sig_has_args; /* use signal args if true */
656 ucontext_t uc;
657 siginfo_t siginfo;
658};
659
660/*
661 * Thread structure.
662 */
663struct pthread {
664 /*
665 * Magic value to help recognize a valid thread structure
666 * from an invalid one:
667 */
668#define PTHREAD_MAGIC ((u_int32_t) 0xd09ba115)
669 u_int32_t magic;
670 char *name;
671 u_int64_t uniqueid; /* for gdb */
672
673 /*
674 * Lock for accesses to this thread structure.
675 */
676 spinlock_t lock;
677
678 /* Queue entry for list of all threads: */
679 TAILQ_ENTRY(pthread) tle;
680
681 /* Queue entry for list of dead threads: */
682 TAILQ_ENTRY(pthread) dle;
683
684 /*
685 * Thread start routine, argument, stack pointer and thread
686 * attributes.
687 */
688 void *(*start_routine)(void *);
689 void *arg;
690 void *stack;
691 struct pthread_attr attr;
692
693 /*
694 * Threads return context; ctxtype identifies the type of context.
695 */
696 union {
697 jmp_buf jb;
698 sigjmp_buf sigjb;
699 ucontext_t uc;
700 } ctx;
701 thread_context_t ctxtype;
702 int longjmp_val;
703
704 /*
705 * Used for tracking delivery of signal handlers.
706 */
707 struct pthread_signal_frame *curframe;
708
709 /*
710 * Cancelability flags - the lower 2 bits are used by cancel
711 * definitions in pthread.h
712 */
713#define PTHREAD_AT_CANCEL_POINT 0x0004
714#define PTHREAD_CANCELLING 0x0008
715#define PTHREAD_CANCEL_NEEDED 0x0010
716 int cancelflags;
717
718 enum pthread_susp suspended;
719
720 thread_continuation_t continuation;
721
722 /*
723 * Current signal mask and pending signals.
724 */
725 sigset_t sigmask;
726 sigset_t sigpend;
727 int sigmask_seqno;
728 int check_pending;
729
730 /* Thread state: */
731 enum pthread_state state;
732
733 /* Scheduling clock when this thread was last made active. */
734 long last_active;
735
736 /* Scheduling clock when this thread was last made inactive. */
737 long last_inactive;
738
739 /*
740 * Number of microseconds accumulated by this thread when
741 * time slicing is active.
742 */
743 long slice_usec;
744
745 /*
746 * Time to wake up thread. This is used for sleeping threads and
747 * for any operation which may time out (such as select).
748 */
749 struct timespec wakeup_time;
750
751 /* TRUE if operation has timed out. */
752 int timeout;
753
754 /*
755 * Error variable used instead of errno. The function __error()
756 * returns a pointer to this.
757 */
758 int error;
759
760 /* Pointer to a thread that is waiting to join (NULL if no joiner). */
761 struct pthread *joiner;
762
763 /*
764 * The current thread can belong to only one scheduling queue at
765 * a time (ready or waiting queue). It can also belong to:
766 *
767 * o A queue of threads waiting for a mutex
768 * o A queue of threads waiting for a condition variable
769 * o A queue of threads waiting for a file descriptor lock
770 * o A queue of threads needing work done by the kernel thread
771 * (waiting for a spinlock or file I/O)
772 *
773 * A thread can also be joining a thread (the joiner field above).
774 *
775 * It must not be possible for a thread to belong to any of the
776 * above queues while it is handling a signal. Signal handlers
777 * may longjmp back to previous stack frames circumventing normal
778 * control flow. This could corrupt queue integrity if the thread
779 * retains membership in the queue. Therefore, if a thread is a
780 * member of one of these queues when a signal handler is invoked,
781 * it must remove itself from the queue before calling the signal
782 * handler and reinsert itself after normal return of the handler.
783 *
784 * Use pqe for the scheduling queue link (both ready and waiting),
785 * sqe for synchronization (mutex and condition variable) queue
786 * links, and qe for all other links.
787 */
788 TAILQ_ENTRY(pthread) pqe; /* priority queue link */
789 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
790 TAILQ_ENTRY(pthread) qe; /* all other queues link */
791
792 /* Wait data. */
793 union pthread_wait_data data;
794
795 /*
796 * Allocated for converting select into poll.
797 */
798 struct pthread_poll_data poll_data;
799
800 /*
801 * Set to TRUE if a blocking operation was
802 * interrupted by a signal:
803 */
804 int interrupted;
805
806 /* Signal number when in state PS_SIGWAIT: */
807 int signo;
808
809 /*
810 * Set to non-zero when this thread has deferred signals.
811 * We allow for recursive deferral.
812 */
813 int sig_defer_count;
814
815 /*
816 * Set to TRUE if this thread should yield after undeferring
817 * signals.
818 */
819 int yield_on_sig_undefer;
820
821 /* Miscellaneous flags; only set with signals deferred. */
822 int flags;
823#define PTHREAD_FLAGS_PRIVATE 0x0001
824#define PTHREAD_EXITING 0x0002
825#define PTHREAD_FLAGS_IN_WAITQ 0x0004 /* in waiting queue using pqe link */
826#define PTHREAD_FLAGS_IN_PRIOQ 0x0008 /* in priority queue using pqe link */
827#define PTHREAD_FLAGS_IN_WORKQ 0x0010 /* in work queue using qe link */
828#define PTHREAD_FLAGS_IN_FILEQ 0x0020 /* in file lock queue using qe link */
829#define PTHREAD_FLAGS_IN_FDQ 0x0040 /* in fd lock queue using qe link */
830#define PTHREAD_FLAGS_IN_CONDQ 0x0080 /* in condition queue using sqe link*/
831#define PTHREAD_FLAGS_IN_MUTEXQ 0x0100 /* in mutex queue using sqe link */
832#define PTHREAD_FLAGS_TRACE 0x0200 /* for debugging purposes */
833#define PTHREAD_FLAGS_IN_SYNCQ \
834 (PTHREAD_FLAGS_IN_CONDQ | PTHREAD_FLAGS_IN_MUTEXQ)
835
836 /*
837 * Base priority is the user setable and retrievable priority
838 * of the thread. It is only affected by explicit calls to
839 * set thread priority and upon thread creation via a thread
840 * attribute or default priority.
841 */
842 char base_priority;
843
844 /*
845 * Inherited priority is the priority a thread inherits by
846 * taking a priority inheritence or protection mutex. It
847 * is not affected by base priority changes. Inherited
848 * priority defaults to and remains 0 until a mutex is taken
849 * that is being waited on by any other thread whose priority
850 * is non-zero.
851 */
852 char inherited_priority;
853
854 /*
855 * Active priority is always the maximum of the threads base
856 * priority and inherited priority. When there is a change
857 * in either the base or inherited priority, the active
858 * priority must be recalculated.
859 */
860 char active_priority;
861
862 /* Number of priority ceiling or protection mutexes owned. */
863 int priority_mutex_count;
864
865 /*
866 * Queue of currently owned mutexes.
867 */
868 TAILQ_HEAD(, pthread_mutex) mutexq;
869
870 void *ret;
871 const void **specific_data;
872 int specific_data_count;
873
874 /* Cleanup handlers Link List */
875 struct pthread_cleanup *cleanup;
876 char *fname; /* Ptr to source file name */
877 int lineno; /* Source line number. */
878};
879
880/*
881 * Global variables for the uthread kernel.
882 */
883
884/* Kernel thread structure used when there are no running threads: */
885SCLASS struct pthread _thread_kern_thread;
886
887/* Ptr to the thread structure for the running thread: */
888SCLASS struct pthread * volatile _thread_run
889#ifdef GLOBAL_PTHREAD_PRIVATE
890= &_thread_kern_thread;
891#else
892;
893#endif
894
895/* Ptr to the thread structure for the last user thread to run: */
896SCLASS struct pthread * volatile _last_user_thread
897#ifdef GLOBAL_PTHREAD_PRIVATE
898= &_thread_kern_thread;
899#else
900;
901#endif
902
903/*
904 * Ptr to the thread running in single-threaded mode or NULL if
905 * running multi-threaded (default POSIX behaviour).
906 */
907SCLASS struct pthread * volatile _thread_single
908#ifdef GLOBAL_PTHREAD_PRIVATE
909= NULL;
910#else
911;
912#endif
913
914/* List of all threads: */
915SCLASS TAILQ_HEAD(, pthread) _thread_list
916#ifdef GLOBAL_PTHREAD_PRIVATE
917= TAILQ_HEAD_INITIALIZER(_thread_list);
918#else
919;
920#endif
921
922/*
923 * Array of kernel pipe file descriptors that are used to ensure that
924 * no signals are missed in calls to _select.
925 */
926SCLASS int _thread_kern_pipe[2]
927#ifdef GLOBAL_PTHREAD_PRIVATE
928= {
929 -1,
930 -1
931};
932#else
933;
934#endif
935SCLASS int volatile _queue_signals
936#ifdef GLOBAL_PTHREAD_PRIVATE
937= 0;
938#else
939;
940#endif
941SCLASS int _thread_kern_in_sched
942#ifdef GLOBAL_PTHREAD_PRIVATE
943= 0;
944#else
945;
946#endif
947
948SCLASS int _sig_in_handler
949#ifdef GLOBAL_PTHREAD_PRIVATE
950= 0;
951#else
952;
953#endif
954
955/* Time of day at last scheduling timer signal: */
956SCLASS struct timeval volatile _sched_tod
957#ifdef GLOBAL_PTHREAD_PRIVATE
958= { 0, 0 };
959#else
960;
961#endif
962
963/*
964 * Current scheduling timer ticks; used as resource usage.
965 */
966SCLASS unsigned int volatile _sched_ticks
967#ifdef GLOBAL_PTHREAD_PRIVATE
968= 0;
969#else
970;
971#endif
972
973/* Dead threads: */
974SCLASS TAILQ_HEAD(, pthread) _dead_list
975#ifdef GLOBAL_PTHREAD_PRIVATE
976= TAILQ_HEAD_INITIALIZER(_dead_list);
977#else
978;
979#endif
980
981/* Initial thread: */
982SCLASS struct pthread *_thread_initial
983#ifdef GLOBAL_PTHREAD_PRIVATE
984= NULL;
985#else
986;
987#endif
988
989/* Default thread attributes: */
990SCLASS struct pthread_attr pthread_attr_default
991#ifdef GLOBAL_PTHREAD_PRIVATE
992= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
993 PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
994 PTHREAD_STACK_DEFAULT, PTHREAD_GUARD_DEFAULT };
995#else
996;
997#endif
998
999/* Default mutex attributes: */
1000SCLASS struct pthread_mutex_attr pthread_mutexattr_default
1001#ifdef GLOBAL_PTHREAD_PRIVATE
1002= { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 };
1003#else
1004;
1005#endif
1006
1007/* Default condition variable attributes: */
1008SCLASS struct pthread_cond_attr pthread_condattr_default
1009#ifdef GLOBAL_PTHREAD_PRIVATE
1010= { COND_TYPE_FAST, 0 };
1011#else
1012;
1013#endif
1014
1015/*
1016 * Standard I/O file descriptors need special flag treatment since
1017 * setting one to non-blocking does all on *BSD. Sigh. This array
1018 * is used to store the initial flag settings.
1019 */
1020SCLASS int _pthread_stdio_flags[3];
1021
1022/* File table information: */
1023SCLASS struct fd_table_entry **_thread_fd_table
1024#ifdef GLOBAL_PTHREAD_PRIVATE
1025= NULL;
1026#else
1027;
1028#endif
1029
1030/* Table for polling file descriptors: */
1031SCLASS struct pollfd *_thread_pfd_table
1032#ifdef GLOBAL_PTHREAD_PRIVATE
1033= NULL;
1034#else
1035;
1036#endif
1037
1038SCLASS const int dtablecount
1039#ifdef GLOBAL_PTHREAD_PRIVATE
1040= 4096/sizeof(struct fd_table_entry);
1041#else
1042;
1043#endif
1044SCLASS int _thread_dtablesize /* Descriptor table size. */
1045#ifdef GLOBAL_PTHREAD_PRIVATE
1046= 0;
1047#else
1048;
1049#endif
1050
1051SCLASS int _clock_res_usec /* Clock resolution in usec. */
1052#ifdef GLOBAL_PTHREAD_PRIVATE
1053= CLOCK_RES_USEC;
1054#else
1055;
1056#endif
1057
1058/* Garbage collector mutex and condition variable. */
1059SCLASS pthread_mutex_t _gc_mutex
1060#ifdef GLOBAL_PTHREAD_PRIVATE
1061= NULL
1062#endif
1063;
1064SCLASS pthread_cond_t _gc_cond
1065#ifdef GLOBAL_PTHREAD_PRIVATE
1066= NULL
1067#endif
1068;
1069
1070/*
1071 * Array of signal actions for this process.
1072 */
1073SCLASS struct sigaction _thread_sigact[NSIG];
1074
1075/*
1076 * Array of counts of dummy handlers for SIG_DFL signals. This is used to
1077 * assure that there is always a dummy signal handler installed while there is a
1078 * thread sigwait()ing on the corresponding signal.
1079 */
1080SCLASS int _thread_dfl_count[NSIG];
1081
1082/*
1083 * Pending signals and mask for this process:
1084 */
1085SCLASS sigset_t _process_sigpending;
1086SCLASS sigset_t _process_sigmask
1087#ifdef GLOBAL_PTHREAD_PRIVATE
1088= { {0, 0, 0, 0} }
1089#endif
1090;
1091
1092/*
1093 * Scheduling queues:
1094 */
1095SCLASS pq_queue_t _readyq;
1096SCLASS TAILQ_HEAD(, pthread) _waitingq;
1097
1098/*
1099 * Work queue:
1100 */
1101SCLASS TAILQ_HEAD(, pthread) _workq;
1102
1103/* Tracks the number of threads blocked while waiting for a spinlock. */
1104SCLASS volatile int _spinblock_count
1105#ifdef GLOBAL_PTHREAD_PRIVATE
1106= 0
1107#endif
1108;
1109
1110/* Used to maintain pending and active signals: */
1111struct sigstatus {
1112 int pending; /* Is this a pending signal? */
1113 int blocked; /*
1114 * A handler is currently active for
1115 * this signal; ignore subsequent
1116 * signals until the handler is done.
1117 */
1118 int signo; /* arg 1 to signal handler */
1119 siginfo_t siginfo; /* arg 2 to signal handler */
1120 ucontext_t uc; /* arg 3 to signal handler */
1121};
1122
1123SCLASS struct sigstatus _thread_sigq[NSIG];
1124
1125/* Indicates that the signal queue needs to be checked. */
1126SCLASS volatile int _sigq_check_reqd
1127#ifdef GLOBAL_PTHREAD_PRIVATE
1128= 0
1129#endif
1130;
1131
1132/* The signal stack. */
1133SCLASS struct sigaltstack _thread_sigstack;
1134
1135/* Thread switch hook. */
1136SCLASS pthread_switch_routine_t _sched_switch_hook
1137#ifdef GLOBAL_PTHREAD_PRIVATE
1138= NULL
1139#endif
1140;
1141
1142/*
1143 * Declare the kernel scheduler jump buffer and stack:
1144 */
1145SCLASS jmp_buf _thread_kern_sched_jb;
1146
1147SCLASS void * _thread_kern_sched_stack
1148#ifdef GLOBAL_PTHREAD_PRIVATE
1149= NULL
1150#endif
1151;
1152
1153
1154/* Used for _PTHREADS_INVARIANTS checking. */
1155SCLASS int _thread_kern_new_state
1156#ifdef GLOBAL_PTHREAD_PRIVATE
1157= 0
1158#endif
1159;
1160
1161/* Undefine the storage class specifier: */
1162#undef SCLASS
1163
1164#ifdef _LOCK_DEBUG
1165#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock_debug(_fd, _type, \
1166 _ts, __FILE__, __LINE__)
1167#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock_debug(_fd, _type, \
1168 __FILE__, __LINE__)
1169#else
1170#define _FD_LOCK(_fd,_type,_ts) _thread_fd_lock(_fd, _type, _ts)
1171#define _FD_UNLOCK(_fd,_type) _thread_fd_unlock(_fd, _type)
1172#endif
1173
1174/*
1175 * Function prototype definitions.
1176 */
1177__BEGIN_DECLS
1178char *__ttyname_basic(int);
1179char *__ttyname_r_basic(int, char *, size_t);
1180char *ttyname_r(int, char *, size_t);
1181void _cond_wait_backout(pthread_t);
1182void _fd_lock_backout(pthread_t);
1183int _find_thread(pthread_t);
1184struct pthread *_get_curthread(void);
1185void _set_curthread(struct pthread *);
1186void *_thread_stack_alloc(size_t, size_t);
1187void _thread_stack_free(void *, size_t, size_t);
1188int _thread_create(pthread_t *,const pthread_attr_t *,void *(*start_routine)(void *),void *,pthread_t);
1189int _thread_fd_lock(int, int, struct timespec *);
1190int _thread_fd_lock_debug(int, int, struct timespec *,char *fname,int lineno);
1191int _mutex_cv_lock(pthread_mutex_t *);
1192int _mutex_cv_unlock(pthread_mutex_t *);
1193void _mutex_lock_backout(pthread_t);
1194void _mutex_notify_priochange(pthread_t);
1195int _mutex_reinit(pthread_mutex_t *);
1196void _mutex_unlock_private(pthread_t);
1197int _cond_reinit(pthread_cond_t *);
1198int _pq_alloc(struct pq_queue *, int, int);
1199int _pq_init(struct pq_queue *);
1200void _pq_remove(struct pq_queue *pq, struct pthread *);
1201void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1202void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1203struct pthread *_pq_first(struct pq_queue *pq);
1204void *_pthread_getspecific(pthread_key_t);
1205int _pthread_key_create(pthread_key_t *, void (*) (void *));
1206int _pthread_key_delete(pthread_key_t);
1207int _pthread_mutex_destroy(pthread_mutex_t *);
1208int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1209int _pthread_mutex_lock(pthread_mutex_t *);
1210int _pthread_mutex_trylock(pthread_mutex_t *);
1211int _pthread_mutex_unlock(pthread_mutex_t *);
1212int _pthread_mutexattr_init(pthread_mutexattr_t *);
1213int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1214int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1215int _pthread_once(pthread_once_t *, void (*) (void));
1216pthread_t _pthread_self(void);
1217int _pthread_setspecific(pthread_key_t, const void *);
1218void _waitq_insert(pthread_t pthread);
1219void _waitq_remove(pthread_t pthread);
1220#if defined(_PTHREADS_INVARIANTS)
1221void _waitq_setactive(void);
1222void _waitq_clearactive(void);
1223#endif
1224void _thread_exit(char *, int, char *);
1225void _thread_exit_cleanup(void);
1226void _thread_fd_unlock(int, int);
1227void _thread_fd_unlock_debug(int, int, char *, int);
1228void _thread_fd_unlock_owned(pthread_t);
1229void *_thread_cleanup(pthread_t);
1230void _thread_cleanupspecific(void);
1231void _thread_dump_info(void);
1232void _thread_init(void);
1233void _thread_kern_sched(ucontext_t *);
1234void _thread_kern_scheduler(void);
1235void _thread_kern_sched_frame(struct pthread_signal_frame *psf);
1236void _thread_kern_sched_sig(void);
1237void _thread_kern_sched_state(enum pthread_state, char *fname, int lineno);
1238void _thread_kern_sched_state_unlock(enum pthread_state state,
1239 spinlock_t *lock, char *fname, int lineno);
1240void _thread_kern_set_timeout(const struct timespec *);
1241void _thread_kern_sig_defer(void);
1242void _thread_kern_sig_undefer(void);
1243void _thread_sig_handler(int, siginfo_t *, ucontext_t *);
1244void _thread_sig_check_pending(pthread_t pthread);
1245void _thread_sig_handle_pending(void);
1246void _thread_sig_send(pthread_t pthread, int sig);
1247void _thread_sig_wrapper(void);
1248void _thread_sigframe_restore(pthread_t thread, struct pthread_signal_frame *psf);
1249void _thread_start(void);
1250void _thread_seterrno(pthread_t, int);
1251int _thread_fd_table_init(int fd);
1252pthread_addr_t _thread_gc(pthread_addr_t);
1253void _thread_enter_cancellation_point(void);
1254void _thread_leave_cancellation_point(void);
1255void _thread_cancellation_point(void);
1256
1257/* #include <sys/aio.h> */
1258#ifdef _SYS_AIO_H_
1259int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1260#endif
1261
1262/* #include <signal.h> */
1263#ifdef _SIGNAL_H_
1264int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1265int __sys_sigpending(sigset_t *);
1266int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1267int __sys_sigsuspend(const sigset_t *);
1268int __sys_sigreturn(ucontext_t *);
1269int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1270#endif
1271
1272/* #include <sys/stat.h> */
1273#ifdef _SYS_STAT_H_
1274int __sys_fchmod(int, mode_t);
1275int __sys_fstat(int, struct stat *);
1276int __sys_fchflags(int, u_long);
1277#endif
1278
1279/* #include <sys/mount.h> */
1280#ifdef _SYS_MOUNT_H_
1281int __sys_fstatfs(int, struct statfs *);
1282#endif
1283
1284/* #inclde <sys/event.h> */
1285#ifdef _SYS_EVENT_H_
1286int __sys_kevent(int, const struct kevent *, int, struct kevent *,
1287 int, const struct timespec *);
1288#endif
1289
1290/* #include <sys/socket.h> */
1291#ifdef _SYS_SOCKET_H_
1292int __sys_accept(int, struct sockaddr *, int *);
1293int __sys_bind(int, const struct sockaddr *, int);
1294int __sys_connect(int, const struct sockaddr *, int);
1295int __sys_getpeername(int, struct sockaddr *, int *);
1296int __sys_getsockname(int, struct sockaddr *, int *);
1297int __sys_getsockopt(int, int, int, void *, int *);
1298int __sys_listen(int, int);
1299int __sys_setsockopt(int, int, int, const void *, int);
1300int __sys_shutdown(int, int);
1301int __sys_socket(int, int, int);
1302int __sys_socketpair(int, int, int, int *);
1303ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, int *);
1304ssize_t __sys_recvmsg(int, struct msghdr *, int);
1305ssize_t __sys_send(int, const void *, size_t, int);
1306int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, off_t *, int);
1307ssize_t __sys_sendmsg(int, const struct msghdr *, int);
1308ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, int);
1309#endif
1310
1311/* #include <unistd.h> */
1312#ifdef _UNISTD_H_
1313int __sys_close(int);
1314int __sys_dup(int);
1315int __sys_dup2(int, int);
1316int __sys_execve(const char *, char * const *, char * const *);
1317int __sys_fchown(int, uid_t, gid_t);
1318int __sys_fork(void);
1319int __sys_fsync(int);
1320int __sys_pipe(int *);
1321int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1322long __sys_fpathconf(int, int);
1323ssize_t __sys_read(int, void *, size_t);
1324ssize_t __sys_write(int, const void *, size_t);
1325void __sys_exit(int);
1326#endif
1327
1328/* #include <fcntl.h> */
1329#ifdef _SYS_FCNTL_H_
1330int __sys_fcntl(int, int, ...);
1331int __sys_flock(int, int);
1332int __sys_open(const char *, int, ...);
1333#endif
1334
1335/* #include <sys/ioctl.h> */
1336#ifdef _SYS_IOCTL_H_
1337int __sys_ioctl(int, unsigned long, ...);
1338#endif
1339
1340/* #include <dirent.h> */
1341#ifdef _DIRENT_H_
1342int __sys_getdirentries(int, char *, int, long *);
1343#endif
1344
1345/* #include <sys/uio.h> */
1346#ifdef _SYS_UIO_H_
1347ssize_t __sys_readv(int, const struct iovec *, int);
1348ssize_t __sys_writev(int, const struct iovec *, int);
1349#endif
1350
1351/* #include <sys/wait.h> */
1352#ifdef WNOHANG
1353pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
1354#endif
1355
1356/* #include <poll.h> */
1357#ifdef _SYS_POLL_H_
1358int __sys_poll(struct pollfd *, unsigned, int);
1359#endif
1360
1361/* #include <sys/mman.h> */
1362#ifdef _SYS_MMAN_H_
1363int __sys_msync(void *, size_t, int);
1364#endif
1365
1366/* #include <setjmp.h> */
1367#ifdef _SETJMP_H_
1368extern void __siglongjmp(sigjmp_buf, int) __dead2;
1369extern void __longjmp(jmp_buf, int) __dead2;
1370extern void ___longjmp(jmp_buf, int) __dead2;
1371#endif
1372
1373/* #include <sys/capability.h> */
1374#ifdef _SYS_CAPABILITY_H
1375int __sys___cap_get_fd(int, struct cap *);
1376int __sys___cap_set_fd(int, struct cap *);
1377#endif
1378
1379/* #include <sys/acl.h> */
1380#ifdef _SYS_ACL_H
1381int __sys___acl_aclcheck_fd(int, acl_type_t, struct acl *);
1382int __sys___acl_delete_fd(int, acl_type_t);
1383int __sys___acl_get_fd(int, acl_type_t, struct acl *);
1384int __sys___acl_set_fd(int, acl_type_t, struct acl *);
1385#endif
1386__END_DECLS
1387
1388#endif /* !_PTHREAD_PRIVATE_H */