Deleted Added
full compact
thr_private.h (139023) thr_private.h (141822)
1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Private thread definitions for the uthread kernel.
33 *
1/*
2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by John Birrell.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * Private thread definitions for the uthread kernel.
33 *
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 139023 2004-12-18 18:07:37Z deischen $
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 141822 2005-02-13 18:38:06Z deischen $
35 */
36
37#ifndef _THR_PRIVATE_H
38#define _THR_PRIVATE_H
39
40/*
41 * Include files.
42 */
43#include <setjmp.h>
44#include <signal.h>
45#include <stdio.h>
46#include <sys/queue.h>
47#include <sys/types.h>
48#include <sys/time.h>
49#include <sys/cdefs.h>
50#include <sys/kse.h>
51#include <sched.h>
52#include <ucontext.h>
53#include <unistd.h>
54#include <pthread.h>
55#include <pthread_np.h>
56
57#ifndef LIBTHREAD_DB
58#include "lock.h"
59#include "pthread_md.h"
60#endif
61
62/*
63 * Evaluate the storage class specifier.
64 */
65#ifdef GLOBAL_PTHREAD_PRIVATE
66#define SCLASS
67#define SCLASS_PRESET(x...) = x
68#else
69#define SCLASS extern
70#define SCLASS_PRESET(x...)
71#endif
72
73/*
74 * Kernel fatal error handler macro.
75 */
76#define PANIC(string) _thr_exit(__FILE__,__LINE__,string)
77
78
79/* Output debug messages like this: */
80#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
81#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
82
83#define DBG_MUTEX 0x0001
84#define DBG_SIG 0x0002
85
86#ifdef _PTHREADS_INVARIANTS
87#define THR_ASSERT(cond, msg) do { \
88 if (!(cond)) \
89 PANIC(msg); \
90} while (0)
91#else
92#define THR_ASSERT(cond, msg)
93#endif
94
95/*
96 * State change macro without scheduling queue change:
97 */
98#define THR_SET_STATE(thrd, newstate) do { \
99 (thrd)->state = newstate; \
100 (thrd)->fname = __FILE__; \
101 (thrd)->lineno = __LINE__; \
102} while (0)
103
104
105#define TIMESPEC_ADD(dst, src, val) \
106 do { \
107 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
108 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
109 if ((dst)->tv_nsec > 1000000000) { \
110 (dst)->tv_sec++; \
111 (dst)->tv_nsec -= 1000000000; \
112 } \
113 } while (0)
114
115#define TIMESPEC_SUB(dst, src, val) \
116 do { \
117 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
118 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
119 if ((dst)->tv_nsec < 0) { \
120 (dst)->tv_sec--; \
121 (dst)->tv_nsec += 1000000000; \
122 } \
123 } while (0)
124
125/*
126 * Priority queues.
127 *
128 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
129 */
130typedef struct pq_list {
131 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
132 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
133 int pl_prio; /* the priority of this list */
134 int pl_queued; /* is this in the priority queue */
135} pq_list_t;
136
137typedef struct pq_queue {
138 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
139 pq_list_t *pq_lists; /* array of all priority lists */
140 int pq_size; /* number of priority lists */
141#define PQF_ACTIVE 0x0001
142 int pq_flags;
143 int pq_threads;
144} pq_queue_t;
145
146/*
147 * Each KSEG has a scheduling queue. For now, threads that exist in their
148 * own KSEG (system scope) will get a full priority queue. In the future
149 * this can be optimized for the single thread per KSEG case.
150 */
151struct sched_queue {
152 pq_queue_t sq_runq;
153 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
154};
155
156typedef struct kse_thr_mailbox *kse_critical_t;
157
158struct kse_group;
159
160#define MAX_KSE_LOCKLEVEL 5
161struct kse {
162 /* -- location and order specific items for gdb -- */
163 struct kcb *k_kcb;
164 struct pthread *k_curthread; /* current thread */
165 struct kse_group *k_kseg; /* parent KSEG */
166 struct sched_queue *k_schedq; /* scheduling queue */
167 /* -- end of location and order specific items -- */
168 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
169 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
170 /*
171 * Items that are only modified by the kse, or that otherwise
172 * don't need to be locked when accessed
173 */
174 struct lock k_lock;
175 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
176 int k_locklevel;
177 stack_t k_stack;
178 int k_flags;
179#define KF_STARTED 0x0001 /* kernel kse created */
180#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
181#define KF_TERMINATED 0x0004 /* kse is terminated */
182#define KF_IDLE 0x0008 /* kse is idle */
183#define KF_SWITCH 0x0010 /* thread switch in UTS */
184 int k_error; /* syscall errno in critical */
185 int k_cpu; /* CPU ID when bound */
186 int k_sigseqno; /* signal buffered count */
187};
188
189#define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE)
190#define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE)
191#define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0)
192#define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH)
193#define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH)
194#define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0)
195
196/*
197 * Each KSE group contains one or more KSEs in which threads can run.
198 * At least for now, there is one scheduling queue per KSE group; KSEs
199 * within the same KSE group compete for threads from the same scheduling
200 * queue. A scope system thread has one KSE in one KSE group; the group
201 * does not use its scheduling queue.
202 */
203struct kse_group {
204 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
205 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
206 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
207 struct sched_queue kg_schedq; /* scheduling queue */
208 struct lock kg_lock;
209 int kg_threadcount; /* # of assigned threads */
210 int kg_ksecount; /* # of assigned KSEs */
211 int kg_idle_kses;
212 int kg_flags;
213#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
214#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
215};
216
217/*
218 * Add/remove threads from a KSE's scheduling queue.
219 * For now the scheduling queue is hung off the KSEG.
220 */
221#define KSEG_THRQ_ADD(kseg, thr) \
222do { \
223 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
224 (kseg)->kg_threadcount++; \
225} while (0)
226
227#define KSEG_THRQ_REMOVE(kseg, thr) \
228do { \
229 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
230 (kseg)->kg_threadcount--; \
231} while (0)
232
233
234/*
235 * Lock acquire and release for KSEs.
236 */
237#define KSE_LOCK_ACQUIRE(kse, lck) \
238do { \
239 if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \
240 (kse)->k_locklevel++; \
241 _lock_acquire((lck), \
242 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
243 } \
244 else \
245 PANIC("Exceeded maximum lock level"); \
246} while (0)
247
248#define KSE_LOCK_RELEASE(kse, lck) \
249do { \
250 if ((kse)->k_locklevel > 0) { \
251 _lock_release((lck), \
252 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
253 (kse)->k_locklevel--; \
254 } \
255} while (0)
256
257/*
258 * Lock our own KSEG.
259 */
260#define KSE_LOCK(curkse) \
261 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
262#define KSE_UNLOCK(curkse) \
263 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
264
265/*
266 * Lock a potentially different KSEG.
267 */
268#define KSE_SCHED_LOCK(curkse, kseg) \
269 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
270#define KSE_SCHED_UNLOCK(curkse, kseg) \
271 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
272
273/*
274 * Waiting queue manipulation macros (using pqe link):
275 */
276#define KSE_WAITQ_REMOVE(kse, thrd) \
277do { \
278 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
279 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
280 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
281 } \
282} while (0)
283#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
284#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
285
286#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
287
288/*
289 * TailQ initialization values.
290 */
291#define TAILQ_INITIALIZER { NULL, NULL }
292
293/*
294 * lock initialization values.
295 */
296#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
297
298struct pthread_mutex {
299 /*
300 * Lock for accesses to this structure.
301 */
302 struct lock m_lock;
303 enum pthread_mutextype m_type;
304 int m_protocol;
305 TAILQ_HEAD(mutex_head, pthread) m_queue;
306 struct pthread *m_owner;
307 long m_flags;
308 int m_count;
309 int m_refcount;
310
311 /*
312 * Used for priority inheritence and protection.
313 *
314 * m_prio - For priority inheritence, the highest active
315 * priority (threads locking the mutex inherit
316 * this priority). For priority protection, the
317 * ceiling priority of this mutex.
318 * m_saved_prio - mutex owners inherited priority before
319 * taking the mutex, restored when the owner
320 * unlocks the mutex.
321 */
322 int m_prio;
323 int m_saved_prio;
324
325 /*
326 * Link for list of all mutexes a thread currently owns.
327 */
328 TAILQ_ENTRY(pthread_mutex) m_qe;
329};
330
331/*
332 * Flags for mutexes.
333 */
334#define MUTEX_FLAGS_PRIVATE 0x01
335#define MUTEX_FLAGS_INITED 0x02
336#define MUTEX_FLAGS_BUSY 0x04
337
338/*
339 * Static mutex initialization values.
340 */
341#define PTHREAD_MUTEX_STATIC_INITIALIZER \
342 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
343 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
344 TAILQ_INITIALIZER }
345
346struct pthread_mutex_attr {
347 enum pthread_mutextype m_type;
348 int m_protocol;
349 int m_ceiling;
350 long m_flags;
351};
352
353#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
354 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
355
356/*
357 * Condition variable definitions.
358 */
359enum pthread_cond_type {
360 COND_TYPE_FAST,
361 COND_TYPE_MAX
362};
363
364struct pthread_cond {
365 /*
366 * Lock for accesses to this structure.
367 */
368 struct lock c_lock;
369 enum pthread_cond_type c_type;
370 TAILQ_HEAD(cond_head, pthread) c_queue;
371 struct pthread_mutex *c_mutex;
372 long c_flags;
373 long c_seqno;
374};
375
376struct pthread_cond_attr {
377 enum pthread_cond_type c_type;
378 long c_flags;
379};
380
381struct pthread_barrier {
382 pthread_mutex_t b_lock;
383 pthread_cond_t b_cond;
384 int b_count;
385 int b_waiters;
386 int b_generation;
387};
388
389struct pthread_barrierattr {
390 int pshared;
391};
392
393struct pthread_spinlock {
394 volatile int s_lock;
395 pthread_t s_owner;
396};
397
398/*
399 * Flags for condition variables.
400 */
401#define COND_FLAGS_PRIVATE 0x01
402#define COND_FLAGS_INITED 0x02
403#define COND_FLAGS_BUSY 0x04
404
405/*
406 * Static cond initialization values.
407 */
408#define PTHREAD_COND_STATIC_INITIALIZER \
409 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
410 NULL, NULL, 0, 0 }
411
412/*
413 * Cleanup definitions.
414 */
415struct pthread_cleanup {
416 struct pthread_cleanup *next;
417 void (*routine) ();
418 void *routine_arg;
419 int onstack;
420};
421
422#define THR_CLEANUP_PUSH(td, func, arg) { \
423 struct pthread_cleanup __cup; \
424 \
425 __cup.routine = func; \
426 __cup.routine_arg = arg; \
427 __cup.onstack = 1; \
428 __cup.next = (td)->cleanup; \
429 (td)->cleanup = &__cup;
430
431#define THR_CLEANUP_POP(td, exec) \
432 (td)->cleanup = __cup.next; \
433 if ((exec) != 0) \
434 __cup.routine(__cup.routine_arg); \
435}
436
437struct pthread_atfork {
438 TAILQ_ENTRY(pthread_atfork) qe;
439 void (*prepare)(void);
440 void (*parent)(void);
441 void (*child)(void);
442};
443
444struct pthread_attr {
445 int sched_policy;
446 int sched_inherit;
447 int sched_interval;
448 int prio;
449 int suspend;
450#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
451#define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
452 int flags;
453 void *arg_attr;
454 void (*cleanup_attr) ();
455 void *stackaddr_attr;
456 size_t stacksize_attr;
457 size_t guardsize_attr;
458};
459
460/*
461 * Thread creation state attributes.
462 */
463#define THR_CREATE_RUNNING 0
464#define THR_CREATE_SUSPENDED 1
465
466/*
467 * Miscellaneous definitions.
468 */
35 */
36
37#ifndef _THR_PRIVATE_H
38#define _THR_PRIVATE_H
39
40/*
41 * Include files.
42 */
43#include <setjmp.h>
44#include <signal.h>
45#include <stdio.h>
46#include <sys/queue.h>
47#include <sys/types.h>
48#include <sys/time.h>
49#include <sys/cdefs.h>
50#include <sys/kse.h>
51#include <sched.h>
52#include <ucontext.h>
53#include <unistd.h>
54#include <pthread.h>
55#include <pthread_np.h>
56
57#ifndef LIBTHREAD_DB
58#include "lock.h"
59#include "pthread_md.h"
60#endif
61
62/*
63 * Evaluate the storage class specifier.
64 */
65#ifdef GLOBAL_PTHREAD_PRIVATE
66#define SCLASS
67#define SCLASS_PRESET(x...) = x
68#else
69#define SCLASS extern
70#define SCLASS_PRESET(x...)
71#endif
72
73/*
74 * Kernel fatal error handler macro.
75 */
76#define PANIC(string) _thr_exit(__FILE__,__LINE__,string)
77
78
79/* Output debug messages like this: */
80#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
81#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args)
82
83#define DBG_MUTEX 0x0001
84#define DBG_SIG 0x0002
85
86#ifdef _PTHREADS_INVARIANTS
87#define THR_ASSERT(cond, msg) do { \
88 if (!(cond)) \
89 PANIC(msg); \
90} while (0)
91#else
92#define THR_ASSERT(cond, msg)
93#endif
94
95/*
96 * State change macro without scheduling queue change:
97 */
98#define THR_SET_STATE(thrd, newstate) do { \
99 (thrd)->state = newstate; \
100 (thrd)->fname = __FILE__; \
101 (thrd)->lineno = __LINE__; \
102} while (0)
103
104
105#define TIMESPEC_ADD(dst, src, val) \
106 do { \
107 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \
108 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
109 if ((dst)->tv_nsec > 1000000000) { \
110 (dst)->tv_sec++; \
111 (dst)->tv_nsec -= 1000000000; \
112 } \
113 } while (0)
114
115#define TIMESPEC_SUB(dst, src, val) \
116 do { \
117 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
118 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
119 if ((dst)->tv_nsec < 0) { \
120 (dst)->tv_sec--; \
121 (dst)->tv_nsec += 1000000000; \
122 } \
123 } while (0)
124
125/*
126 * Priority queues.
127 *
128 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch].
129 */
130typedef struct pq_list {
131 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */
132 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */
133 int pl_prio; /* the priority of this list */
134 int pl_queued; /* is this in the priority queue */
135} pq_list_t;
136
137typedef struct pq_queue {
138 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */
139 pq_list_t *pq_lists; /* array of all priority lists */
140 int pq_size; /* number of priority lists */
141#define PQF_ACTIVE 0x0001
142 int pq_flags;
143 int pq_threads;
144} pq_queue_t;
145
146/*
147 * Each KSEG has a scheduling queue. For now, threads that exist in their
148 * own KSEG (system scope) will get a full priority queue. In the future
149 * this can be optimized for the single thread per KSEG case.
150 */
151struct sched_queue {
152 pq_queue_t sq_runq;
153 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */
154};
155
156typedef struct kse_thr_mailbox *kse_critical_t;
157
158struct kse_group;
159
160#define MAX_KSE_LOCKLEVEL 5
161struct kse {
162 /* -- location and order specific items for gdb -- */
163 struct kcb *k_kcb;
164 struct pthread *k_curthread; /* current thread */
165 struct kse_group *k_kseg; /* parent KSEG */
166 struct sched_queue *k_schedq; /* scheduling queue */
167 /* -- end of location and order specific items -- */
168 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */
169 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */
170 /*
171 * Items that are only modified by the kse, or that otherwise
172 * don't need to be locked when accessed
173 */
174 struct lock k_lock;
175 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL];
176 int k_locklevel;
177 stack_t k_stack;
178 int k_flags;
179#define KF_STARTED 0x0001 /* kernel kse created */
180#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
181#define KF_TERMINATED 0x0004 /* kse is terminated */
182#define KF_IDLE 0x0008 /* kse is idle */
183#define KF_SWITCH 0x0010 /* thread switch in UTS */
184 int k_error; /* syscall errno in critical */
185 int k_cpu; /* CPU ID when bound */
186 int k_sigseqno; /* signal buffered count */
187};
188
189#define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE)
190#define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE)
191#define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0)
192#define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH)
193#define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH)
194#define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0)
195
196/*
197 * Each KSE group contains one or more KSEs in which threads can run.
198 * At least for now, there is one scheduling queue per KSE group; KSEs
199 * within the same KSE group compete for threads from the same scheduling
200 * queue. A scope system thread has one KSE in one KSE group; the group
201 * does not use its scheduling queue.
202 */
203struct kse_group {
204 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */
205 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */
206 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */
207 struct sched_queue kg_schedq; /* scheduling queue */
208 struct lock kg_lock;
209 int kg_threadcount; /* # of assigned threads */
210 int kg_ksecount; /* # of assigned KSEs */
211 int kg_idle_kses;
212 int kg_flags;
213#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
214#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */
215};
216
217/*
218 * Add/remove threads from a KSE's scheduling queue.
219 * For now the scheduling queue is hung off the KSEG.
220 */
221#define KSEG_THRQ_ADD(kseg, thr) \
222do { \
223 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\
224 (kseg)->kg_threadcount++; \
225} while (0)
226
227#define KSEG_THRQ_REMOVE(kseg, thr) \
228do { \
229 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \
230 (kseg)->kg_threadcount--; \
231} while (0)
232
233
234/*
235 * Lock acquire and release for KSEs.
236 */
237#define KSE_LOCK_ACQUIRE(kse, lck) \
238do { \
239 if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \
240 (kse)->k_locklevel++; \
241 _lock_acquire((lck), \
242 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \
243 } \
244 else \
245 PANIC("Exceeded maximum lock level"); \
246} while (0)
247
248#define KSE_LOCK_RELEASE(kse, lck) \
249do { \
250 if ((kse)->k_locklevel > 0) { \
251 _lock_release((lck), \
252 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \
253 (kse)->k_locklevel--; \
254 } \
255} while (0)
256
257/*
258 * Lock our own KSEG.
259 */
260#define KSE_LOCK(curkse) \
261 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock)
262#define KSE_UNLOCK(curkse) \
263 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock)
264
265/*
266 * Lock a potentially different KSEG.
267 */
268#define KSE_SCHED_LOCK(curkse, kseg) \
269 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock)
270#define KSE_SCHED_UNLOCK(curkse, kseg) \
271 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock)
272
273/*
274 * Waiting queue manipulation macros (using pqe link):
275 */
276#define KSE_WAITQ_REMOVE(kse, thrd) \
277do { \
278 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \
279 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \
280 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \
281 } \
282} while (0)
283#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd)
284#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq)
285
286#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx)
287
288/*
289 * TailQ initialization values.
290 */
291#define TAILQ_INITIALIZER { NULL, NULL }
292
293/*
294 * lock initialization values.
295 */
296#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT }
297
298struct pthread_mutex {
299 /*
300 * Lock for accesses to this structure.
301 */
302 struct lock m_lock;
303 enum pthread_mutextype m_type;
304 int m_protocol;
305 TAILQ_HEAD(mutex_head, pthread) m_queue;
306 struct pthread *m_owner;
307 long m_flags;
308 int m_count;
309 int m_refcount;
310
311 /*
312 * Used for priority inheritence and protection.
313 *
314 * m_prio - For priority inheritence, the highest active
315 * priority (threads locking the mutex inherit
316 * this priority). For priority protection, the
317 * ceiling priority of this mutex.
318 * m_saved_prio - mutex owners inherited priority before
319 * taking the mutex, restored when the owner
320 * unlocks the mutex.
321 */
322 int m_prio;
323 int m_saved_prio;
324
325 /*
326 * Link for list of all mutexes a thread currently owns.
327 */
328 TAILQ_ENTRY(pthread_mutex) m_qe;
329};
330
331/*
332 * Flags for mutexes.
333 */
334#define MUTEX_FLAGS_PRIVATE 0x01
335#define MUTEX_FLAGS_INITED 0x02
336#define MUTEX_FLAGS_BUSY 0x04
337
338/*
339 * Static mutex initialization values.
340 */
341#define PTHREAD_MUTEX_STATIC_INITIALIZER \
342 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \
343 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \
344 TAILQ_INITIALIZER }
345
346struct pthread_mutex_attr {
347 enum pthread_mutextype m_type;
348 int m_protocol;
349 int m_ceiling;
350 long m_flags;
351};
352
353#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
354 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
355
356/*
357 * Condition variable definitions.
358 */
359enum pthread_cond_type {
360 COND_TYPE_FAST,
361 COND_TYPE_MAX
362};
363
364struct pthread_cond {
365 /*
366 * Lock for accesses to this structure.
367 */
368 struct lock c_lock;
369 enum pthread_cond_type c_type;
370 TAILQ_HEAD(cond_head, pthread) c_queue;
371 struct pthread_mutex *c_mutex;
372 long c_flags;
373 long c_seqno;
374};
375
376struct pthread_cond_attr {
377 enum pthread_cond_type c_type;
378 long c_flags;
379};
380
381struct pthread_barrier {
382 pthread_mutex_t b_lock;
383 pthread_cond_t b_cond;
384 int b_count;
385 int b_waiters;
386 int b_generation;
387};
388
389struct pthread_barrierattr {
390 int pshared;
391};
392
393struct pthread_spinlock {
394 volatile int s_lock;
395 pthread_t s_owner;
396};
397
398/*
399 * Flags for condition variables.
400 */
401#define COND_FLAGS_PRIVATE 0x01
402#define COND_FLAGS_INITED 0x02
403#define COND_FLAGS_BUSY 0x04
404
405/*
406 * Static cond initialization values.
407 */
408#define PTHREAD_COND_STATIC_INITIALIZER \
409 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \
410 NULL, NULL, 0, 0 }
411
412/*
413 * Cleanup definitions.
414 */
415struct pthread_cleanup {
416 struct pthread_cleanup *next;
417 void (*routine) ();
418 void *routine_arg;
419 int onstack;
420};
421
422#define THR_CLEANUP_PUSH(td, func, arg) { \
423 struct pthread_cleanup __cup; \
424 \
425 __cup.routine = func; \
426 __cup.routine_arg = arg; \
427 __cup.onstack = 1; \
428 __cup.next = (td)->cleanup; \
429 (td)->cleanup = &__cup;
430
431#define THR_CLEANUP_POP(td, exec) \
432 (td)->cleanup = __cup.next; \
433 if ((exec) != 0) \
434 __cup.routine(__cup.routine_arg); \
435}
436
437struct pthread_atfork {
438 TAILQ_ENTRY(pthread_atfork) qe;
439 void (*prepare)(void);
440 void (*parent)(void);
441 void (*child)(void);
442};
443
444struct pthread_attr {
445 int sched_policy;
446 int sched_inherit;
447 int sched_interval;
448 int prio;
449 int suspend;
450#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */
451#define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */
452 int flags;
453 void *arg_attr;
454 void (*cleanup_attr) ();
455 void *stackaddr_attr;
456 size_t stacksize_attr;
457 size_t guardsize_attr;
458};
459
460/*
461 * Thread creation state attributes.
462 */
463#define THR_CREATE_RUNNING 0
464#define THR_CREATE_SUSPENDED 1
465
466/*
467 * Miscellaneous definitions.
468 */
469#define THR_STACK_DEFAULT 65536
469#define THR_STACK32_DEFAULT (1 * 1024 * 1024)
470#define THR_STACK64_DEFAULT (2 * 1024 * 1024)
470
471/*
472 * Maximum size of initial thread's stack. This perhaps deserves to be larger
473 * than the stacks of other threads, since many applications are likely to run
474 * almost entirely on this stack.
475 */
471
472/*
473 * Maximum size of initial thread's stack. This perhaps deserves to be larger
474 * than the stacks of other threads, since many applications are likely to run
475 * almost entirely on this stack.
476 */
476#define THR_STACK_INITIAL 0x100000
477#define THR_STACK32_INITIAL (2 * 1024 * 1024)
478#define THR_STACK64_INITIAL (4 * 1024 * 1024)
477
478/*
479 * Define the different priority ranges. All applications have thread
480 * priorities constrained within 0-31. The threads library raises the
481 * priority when delivering signals in order to ensure that signal
482 * delivery happens (from the POSIX spec) "as soon as possible".
483 * In the future, the threads library will also be able to map specific
484 * threads into real-time (cooperating) processes or kernel threads.
485 * The RT and SIGNAL priorities will be used internally and added to
486 * thread base priorities so that the scheduling queue can handle both
487 * normal and RT priority threads with and without signal handling.
488 *
489 * The approach taken is that, within each class, signal delivery
490 * always has priority over thread execution.
491 */
492#define THR_DEFAULT_PRIORITY 15
493#define THR_MIN_PRIORITY 0
494#define THR_MAX_PRIORITY 31 /* 0x1F */
495#define THR_SIGNAL_PRIORITY 32 /* 0x20 */
496#define THR_RT_PRIORITY 64 /* 0x40 */
497#define THR_FIRST_PRIORITY THR_MIN_PRIORITY
498#define THR_LAST_PRIORITY \
499 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
500#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
501
502/*
503 * Clock resolution in microseconds.
504 */
505#define CLOCK_RES_USEC 10000
506
507/*
508 * Time slice period in microseconds.
509 */
510#define TIMESLICE_USEC 20000
511
512/*
513 * XXX - Define a thread-safe macro to get the current time of day
514 * which is updated at regular intervals by something.
515 *
516 * For now, we just make the system call to get the time.
517 */
518#define KSE_GET_TOD(curkse, tsp) \
519do { \
520 *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \
521 if ((tsp)->tv_sec == 0) \
522 clock_gettime(CLOCK_REALTIME, tsp); \
523} while (0)
524
525struct pthread_rwlockattr {
526 int pshared;
527};
528
529struct pthread_rwlock {
530 pthread_mutex_t lock; /* monitor lock */
531 pthread_cond_t read_signal;
532 pthread_cond_t write_signal;
533 int state; /* 0 = idle >0 = # of readers -1 = writer */
534 int blocked_writers;
535};
536
537/*
538 * Thread states.
539 */
540enum pthread_state {
541 PS_RUNNING,
542 PS_LOCKWAIT,
543 PS_MUTEX_WAIT,
544 PS_COND_WAIT,
545 PS_SLEEP_WAIT,
546 PS_SIGSUSPEND,
547 PS_SIGWAIT,
548 PS_JOIN,
549 PS_SUSPENDED,
550 PS_DEAD,
551 PS_DEADLOCK,
552 PS_STATE_MAX
553};
554
555struct sigwait_data {
556 sigset_t *waitset;
557 siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */
558};
559
560union pthread_wait_data {
561 pthread_mutex_t mutex;
562 pthread_cond_t cond;
563 struct lock *lock;
564 struct sigwait_data *sigwait;
565};
566
567/*
568 * Define a continuation routine that can be used to perform a
569 * transfer of control:
570 */
571typedef void (*thread_continuation_t) (void *);
572
573/*
574 * This stores a thread's state prior to running a signal handler.
575 * It is used when a signal is delivered to a thread blocked in
576 * userland. If the signal handler returns normally, the thread's
577 * state is restored from here.
578 */
579struct pthread_sigframe {
580 int psf_valid;
581 int psf_flags;
582 int psf_cancelflags;
583 int psf_interrupted;
584 int psf_timeout;
585 int psf_signo;
586 enum pthread_state psf_state;
587 union pthread_wait_data psf_wait_data;
588 struct timespec psf_wakeup_time;
589 sigset_t psf_sigset;
590 sigset_t psf_sigmask;
591 int psf_seqno;
592 thread_continuation_t psf_continuation;
593};
594
595struct join_status {
596 struct pthread *thread;
597 void *ret;
598 int error;
599};
600
601struct pthread_specific_elem {
602 const void *data;
603 int seqno;
604};
605
606struct pthread_key {
607 volatile int allocated;
608 volatile int count;
609 int seqno;
610 void (*destructor) (void *);
611};
612
613#define MAX_THR_LOCKLEVEL 5
614/*
615 * Thread structure.
616 */
617struct pthread {
618 /* Thread control block */
619 struct tcb *tcb;
620
621 /*
622 * Magic value to help recognize a valid thread structure
623 * from an invalid one:
624 */
625#define THR_MAGIC ((u_int32_t) 0xd09ba115)
626 u_int32_t magic;
627 char *name;
628 u_int64_t uniqueid; /* for gdb */
629
630 /* Queue entry for list of all threads: */
631 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
632 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
633
634 /* Queue entry for GC lists: */
635 TAILQ_ENTRY(pthread) gcle;
636
637 /* Hash queue entry */
638 LIST_ENTRY(pthread) hle;
639
640 /*
641 * Lock for accesses to this thread structure.
642 */
643 struct lock lock;
644 struct lockuser lockusers[MAX_THR_LOCKLEVEL];
645 int locklevel;
646 kse_critical_t critical[MAX_KSE_LOCKLEVEL];
647 struct kse *kse;
648 struct kse_group *kseg;
649
650 /*
651 * Thread start routine, argument, stack pointer and thread
652 * attributes.
653 */
654 void *(*start_routine)(void *);
655 void *arg;
656 struct pthread_attr attr;
657
658 int active; /* thread running */
659 int blocked; /* thread blocked in kernel */
660 int need_switchout;
661
662 /*
663 * Used for tracking delivery of signal handlers.
664 */
665 siginfo_t *siginfo;
666 thread_continuation_t sigbackout;
667
668 /*
669 * Cancelability flags - the lower 2 bits are used by cancel
670 * definitions in pthread.h
671 */
672#define THR_AT_CANCEL_POINT 0x0004
673#define THR_CANCELLING 0x0008
674#define THR_CANCEL_NEEDED 0x0010
675 int cancelflags;
676
677 thread_continuation_t continuation;
678
679 /*
680 * The thread's base and pending signal masks. The active
681 * signal mask is stored in the thread's context (in mailbox).
682 */
683 sigset_t sigmask;
684 sigset_t sigpend;
685 sigset_t *oldsigmask;
686 volatile int check_pending;
687 int refcount;
688
689 /* Thread state: */
690 enum pthread_state state;
691 volatile int lock_switch;
692
693 /*
694 * Number of microseconds accumulated by this thread when
695 * time slicing is active.
696 */
697 long slice_usec;
698
699 /*
700 * Time to wake up thread. This is used for sleeping threads and
701 * for any operation which may time out (such as select).
702 */
703 struct timespec wakeup_time;
704
705 /* TRUE if operation has timed out. */
706 int timeout;
707
708 /*
709 * Error variable used instead of errno. The function __error()
710 * returns a pointer to this.
711 */
712 int error;
713
714 /*
715 * The joiner is the thread that is joining to this thread. The
716 * join status keeps track of a join operation to another thread.
717 */
718 struct pthread *joiner;
719 struct join_status join_status;
720
721 /*
722 * The current thread can belong to only one scheduling queue at
723 * a time (ready or waiting queue). It can also belong to:
724 *
725 * o A queue of threads waiting for a mutex
726 * o A queue of threads waiting for a condition variable
727 *
728 * It is possible for a thread to belong to more than one of the
729 * above queues if it is handling a signal. A thread may only
730 * enter a mutex or condition variable queue when it is not
731 * being called from a signal handler. If a thread is a member
732 * of one of these queues when a signal handler is invoked, it
733 * must be removed from the queue before invoking the handler
734 * and then added back to the queue after return from the handler.
735 *
736 * Use pqe for the scheduling queue link (both ready and waiting),
737 * sqe for synchronization (mutex, condition variable, and join)
738 * queue links, and qe for all other links.
739 */
740 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
741 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
742
743 /* Wait data. */
744 union pthread_wait_data data;
745
746 /*
747 * Set to TRUE if a blocking operation was
748 * interrupted by a signal:
749 */
750 int interrupted;
751
752 /*
753 * Set to non-zero when this thread has entered a critical
754 * region. We allow for recursive entries into critical regions.
755 */
756 int critical_count;
757
758 /*
759 * Set to TRUE if this thread should yield after leaving a
760 * critical region to check for signals, messages, etc.
761 */
762 int critical_yield;
763
764 int sflags;
765#define THR_FLAGS_IN_SYNCQ 0x0001
766
767 /* Miscellaneous flags; only set with scheduling lock held. */
768 int flags;
769#define THR_FLAGS_PRIVATE 0x0001
770#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
771#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
772#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
773#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
774
775 /* Thread list flags; only set with thread list lock held. */
776#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
777#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
778#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
779 int tlflags;
780
781 /*
782 * Base priority is the user setable and retrievable priority
783 * of the thread. It is only affected by explicit calls to
784 * set thread priority and upon thread creation via a thread
785 * attribute or default priority.
786 */
787 char base_priority;
788
789 /*
790 * Inherited priority is the priority a thread inherits by
791 * taking a priority inheritence or protection mutex. It
792 * is not affected by base priority changes. Inherited
793 * priority defaults to and remains 0 until a mutex is taken
794 * that is being waited on by any other thread whose priority
795 * is non-zero.
796 */
797 char inherited_priority;
798
799 /*
800 * Active priority is always the maximum of the threads base
801 * priority and inherited priority. When there is a change
802 * in either the base or inherited priority, the active
803 * priority must be recalculated.
804 */
805 char active_priority;
806
807 /* Number of priority ceiling or protection mutexes owned. */
808 int priority_mutex_count;
809
810 /* Number rwlocks rdlocks held. */
811 int rdlock_count;
812
813 /*
814 * Queue of currently owned mutexes.
815 */
816 TAILQ_HEAD(, pthread_mutex) mutexq;
817
818 void *ret;
819 struct pthread_specific_elem *specific;
820 int specific_data_count;
821
822 /* Alternative stack for sigaltstack() */
823 stack_t sigstk;
824
825 /*
826 * Current locks bitmap for rtld.
827 */
828 int rtld_bits;
829
830 /* Cleanup handlers Link List */
831 struct pthread_cleanup *cleanup;
832 char *fname; /* Ptr to source file name */
833 int lineno; /* Source line number. */
834};
835
836/*
837 * Critical regions can also be detected by looking at the threads
838 * current lock level. Ensure these macros increment and decrement
839 * the lock levels such that locks can not be held with a lock level
840 * of 0.
841 */
842#define THR_IN_CRITICAL(thrd) \
843 (((thrd)->locklevel > 0) || \
844 ((thrd)->critical_count > 0))
845
846#define THR_YIELD_CHECK(thrd) \
847do { \
848 if (!THR_IN_CRITICAL(thrd)) { \
849 if (__predict_false(_libkse_debug)) \
850 _thr_debug_check_yield(thrd); \
851 if ((thrd)->critical_yield != 0) \
852 _thr_sched_switch(thrd); \
853 if ((thrd)->check_pending != 0) \
854 _thr_sig_check_pending(thrd); \
855 } \
856} while (0)
857
858#define THR_LOCK_ACQUIRE(thrd, lck) \
859do { \
860 if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \
861 THR_DEACTIVATE_LAST_LOCK(thrd); \
862 (thrd)->locklevel++; \
863 _lock_acquire((lck), \
864 &(thrd)->lockusers[(thrd)->locklevel - 1], \
865 (thrd)->active_priority); \
866 } else \
867 PANIC("Exceeded maximum lock level"); \
868} while (0)
869
870#define THR_LOCK_RELEASE(thrd, lck) \
871do { \
872 if ((thrd)->locklevel > 0) { \
873 _lock_release((lck), \
874 &(thrd)->lockusers[(thrd)->locklevel - 1]); \
875 (thrd)->locklevel--; \
876 THR_ACTIVATE_LAST_LOCK(thrd); \
877 if ((thrd)->locklevel == 0) \
878 THR_YIELD_CHECK(thrd); \
879 } \
880} while (0)
881
882#define THR_ACTIVATE_LAST_LOCK(thrd) \
883do { \
884 if ((thrd)->locklevel > 0) \
885 _lockuser_setactive( \
886 &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \
887} while (0)
888
889#define THR_DEACTIVATE_LAST_LOCK(thrd) \
890do { \
891 if ((thrd)->locklevel > 0) \
892 _lockuser_setactive( \
893 &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \
894} while (0)
895
896/*
897 * For now, threads will have their own lock separate from their
898 * KSE scheduling lock.
899 */
900#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
901#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
902#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
903#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
904
905/*
906 * Priority queue manipulation macros (using pqe link). We use
907 * the thread's kseg link instead of the kse link because a thread
908 * does not (currently) have a statically assigned kse.
909 */
910#define THR_RUNQ_INSERT_HEAD(thrd) \
911 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
912#define THR_RUNQ_INSERT_TAIL(thrd) \
913 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
914#define THR_RUNQ_REMOVE(thrd) \
915 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
916
917/*
918 * Macros to insert/remove threads to the all thread list and
919 * the gc list.
920 */
921#define THR_LIST_ADD(thrd) do { \
922 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
923 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
924 _thr_hash_add(thrd); \
925 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
926 } \
927} while (0)
928#define THR_LIST_REMOVE(thrd) do { \
929 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
930 TAILQ_REMOVE(&_thread_list, thrd, tle); \
931 _thr_hash_remove(thrd); \
932 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
933 } \
934} while (0)
935#define THR_GCLIST_ADD(thrd) do { \
936 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
937 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
938 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
939 _gc_count++; \
940 } \
941} while (0)
942#define THR_GCLIST_REMOVE(thrd) do { \
943 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
944 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
945 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
946 _gc_count--; \
947 } \
948} while (0)
949
950#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
951
952/*
953 * Locking the scheduling queue for another thread uses that thread's
954 * KSEG lock.
955 */
956#define THR_SCHED_LOCK(curthr, thr) do { \
957 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
958 (curthr)->locklevel++; \
959 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
960} while (0)
961
962#define THR_SCHED_UNLOCK(curthr, thr) do { \
963 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
964 (curthr)->locklevel--; \
965 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
966} while (0)
967
968/* Take the scheduling lock with the intent to call the scheduler. */
969#define THR_LOCK_SWITCH(curthr) do { \
970 (void)_kse_critical_enter(); \
971 KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
972} while (0)
973#define THR_UNLOCK_SWITCH(curthr) do { \
974 KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
975} while (0)
976
977#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
978#define THR_CRITICAL_LEAVE(thr) do { \
979 (thr)->critical_count--; \
980 if (((thr)->critical_yield != 0) && \
981 ((thr)->critical_count == 0)) { \
982 (thr)->critical_yield = 0; \
983 _thr_sched_switch(thr); \
984 } \
985} while (0)
986
987#define THR_IS_ACTIVE(thrd) \
988 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
989
990#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
991
992#define THR_IS_SUSPENDED(thrd) \
993 (((thrd)->state == PS_SUSPENDED) || \
994 (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
995#define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
996#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
997 TMDF_SUSPEND) == 0)
998
999extern int __isthreaded;
1000
1001static inline int
1002_kse_isthreaded(void)
1003{
1004 return (__isthreaded != 0);
1005}
1006
1007/*
1008 * Global variables for the pthread kernel.
1009 */
1010
1011SCLASS void *_usrstack SCLASS_PRESET(NULL);
1012SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
1013SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
1014/* For debugger */
1015SCLASS int _libkse_debug SCLASS_PRESET(0);
1016SCLASS int _thread_activated SCLASS_PRESET(0);
1017SCLASS int _thread_scope_system SCLASS_PRESET(0);
1018
1019/* List of all threads: */
1020SCLASS TAILQ_HEAD(, pthread) _thread_list
1021 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
1022
1023/* List of threads needing GC: */
1024SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
1025 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
1026
1027SCLASS int _thread_active_threads SCLASS_PRESET(1);
1028
1029SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1030SCLASS pthread_mutex_t _thr_atfork_mutex;
1031
1032/* Default thread attributes: */
1033SCLASS struct pthread_attr _pthread_attr_default
1034 SCLASS_PRESET({
1035 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1036 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
479
480/*
481 * Define the different priority ranges. All applications have thread
482 * priorities constrained within 0-31. The threads library raises the
483 * priority when delivering signals in order to ensure that signal
484 * delivery happens (from the POSIX spec) "as soon as possible".
485 * In the future, the threads library will also be able to map specific
486 * threads into real-time (cooperating) processes or kernel threads.
487 * The RT and SIGNAL priorities will be used internally and added to
488 * thread base priorities so that the scheduling queue can handle both
489 * normal and RT priority threads with and without signal handling.
490 *
491 * The approach taken is that, within each class, signal delivery
492 * always has priority over thread execution.
493 */
494#define THR_DEFAULT_PRIORITY 15
495#define THR_MIN_PRIORITY 0
496#define THR_MAX_PRIORITY 31 /* 0x1F */
497#define THR_SIGNAL_PRIORITY 32 /* 0x20 */
498#define THR_RT_PRIORITY 64 /* 0x40 */
499#define THR_FIRST_PRIORITY THR_MIN_PRIORITY
500#define THR_LAST_PRIORITY \
501 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
502#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY)
503
504/*
505 * Clock resolution in microseconds.
506 */
507#define CLOCK_RES_USEC 10000
508
509/*
510 * Time slice period in microseconds.
511 */
512#define TIMESLICE_USEC 20000
513
514/*
515 * XXX - Define a thread-safe macro to get the current time of day
516 * which is updated at regular intervals by something.
517 *
518 * For now, we just make the system call to get the time.
519 */
520#define KSE_GET_TOD(curkse, tsp) \
521do { \
522 *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \
523 if ((tsp)->tv_sec == 0) \
524 clock_gettime(CLOCK_REALTIME, tsp); \
525} while (0)
526
527struct pthread_rwlockattr {
528 int pshared;
529};
530
531struct pthread_rwlock {
532 pthread_mutex_t lock; /* monitor lock */
533 pthread_cond_t read_signal;
534 pthread_cond_t write_signal;
535 int state; /* 0 = idle >0 = # of readers -1 = writer */
536 int blocked_writers;
537};
538
539/*
540 * Thread states.
541 */
542enum pthread_state {
543 PS_RUNNING,
544 PS_LOCKWAIT,
545 PS_MUTEX_WAIT,
546 PS_COND_WAIT,
547 PS_SLEEP_WAIT,
548 PS_SIGSUSPEND,
549 PS_SIGWAIT,
550 PS_JOIN,
551 PS_SUSPENDED,
552 PS_DEAD,
553 PS_DEADLOCK,
554 PS_STATE_MAX
555};
556
557struct sigwait_data {
558 sigset_t *waitset;
559 siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */
560};
561
562union pthread_wait_data {
563 pthread_mutex_t mutex;
564 pthread_cond_t cond;
565 struct lock *lock;
566 struct sigwait_data *sigwait;
567};
568
569/*
570 * Define a continuation routine that can be used to perform a
571 * transfer of control:
572 */
573typedef void (*thread_continuation_t) (void *);
574
575/*
576 * This stores a thread's state prior to running a signal handler.
577 * It is used when a signal is delivered to a thread blocked in
578 * userland. If the signal handler returns normally, the thread's
579 * state is restored from here.
580 */
581struct pthread_sigframe {
582 int psf_valid;
583 int psf_flags;
584 int psf_cancelflags;
585 int psf_interrupted;
586 int psf_timeout;
587 int psf_signo;
588 enum pthread_state psf_state;
589 union pthread_wait_data psf_wait_data;
590 struct timespec psf_wakeup_time;
591 sigset_t psf_sigset;
592 sigset_t psf_sigmask;
593 int psf_seqno;
594 thread_continuation_t psf_continuation;
595};
596
597struct join_status {
598 struct pthread *thread;
599 void *ret;
600 int error;
601};
602
603struct pthread_specific_elem {
604 const void *data;
605 int seqno;
606};
607
608struct pthread_key {
609 volatile int allocated;
610 volatile int count;
611 int seqno;
612 void (*destructor) (void *);
613};
614
615#define MAX_THR_LOCKLEVEL 5
616/*
617 * Thread structure.
618 */
619struct pthread {
620 /* Thread control block */
621 struct tcb *tcb;
622
623 /*
624 * Magic value to help recognize a valid thread structure
625 * from an invalid one:
626 */
627#define THR_MAGIC ((u_int32_t) 0xd09ba115)
628 u_int32_t magic;
629 char *name;
630 u_int64_t uniqueid; /* for gdb */
631
632 /* Queue entry for list of all threads: */
633 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */
634 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */
635
636 /* Queue entry for GC lists: */
637 TAILQ_ENTRY(pthread) gcle;
638
639 /* Hash queue entry */
640 LIST_ENTRY(pthread) hle;
641
642 /*
643 * Lock for accesses to this thread structure.
644 */
645 struct lock lock;
646 struct lockuser lockusers[MAX_THR_LOCKLEVEL];
647 int locklevel;
648 kse_critical_t critical[MAX_KSE_LOCKLEVEL];
649 struct kse *kse;
650 struct kse_group *kseg;
651
652 /*
653 * Thread start routine, argument, stack pointer and thread
654 * attributes.
655 */
656 void *(*start_routine)(void *);
657 void *arg;
658 struct pthread_attr attr;
659
660 int active; /* thread running */
661 int blocked; /* thread blocked in kernel */
662 int need_switchout;
663
664 /*
665 * Used for tracking delivery of signal handlers.
666 */
667 siginfo_t *siginfo;
668 thread_continuation_t sigbackout;
669
670 /*
671 * Cancelability flags - the lower 2 bits are used by cancel
672 * definitions in pthread.h
673 */
674#define THR_AT_CANCEL_POINT 0x0004
675#define THR_CANCELLING 0x0008
676#define THR_CANCEL_NEEDED 0x0010
677 int cancelflags;
678
679 thread_continuation_t continuation;
680
681 /*
682 * The thread's base and pending signal masks. The active
683 * signal mask is stored in the thread's context (in mailbox).
684 */
685 sigset_t sigmask;
686 sigset_t sigpend;
687 sigset_t *oldsigmask;
688 volatile int check_pending;
689 int refcount;
690
691 /* Thread state: */
692 enum pthread_state state;
693 volatile int lock_switch;
694
695 /*
696 * Number of microseconds accumulated by this thread when
697 * time slicing is active.
698 */
699 long slice_usec;
700
701 /*
702 * Time to wake up thread. This is used for sleeping threads and
703 * for any operation which may time out (such as select).
704 */
705 struct timespec wakeup_time;
706
707 /* TRUE if operation has timed out. */
708 int timeout;
709
710 /*
711 * Error variable used instead of errno. The function __error()
712 * returns a pointer to this.
713 */
714 int error;
715
716 /*
717 * The joiner is the thread that is joining to this thread. The
718 * join status keeps track of a join operation to another thread.
719 */
720 struct pthread *joiner;
721 struct join_status join_status;
722
723 /*
724 * The current thread can belong to only one scheduling queue at
725 * a time (ready or waiting queue). It can also belong to:
726 *
727 * o A queue of threads waiting for a mutex
728 * o A queue of threads waiting for a condition variable
729 *
730 * It is possible for a thread to belong to more than one of the
731 * above queues if it is handling a signal. A thread may only
732 * enter a mutex or condition variable queue when it is not
733 * being called from a signal handler. If a thread is a member
734 * of one of these queues when a signal handler is invoked, it
735 * must be removed from the queue before invoking the handler
736 * and then added back to the queue after return from the handler.
737 *
738 * Use pqe for the scheduling queue link (both ready and waiting),
739 * sqe for synchronization (mutex, condition variable, and join)
740 * queue links, and qe for all other links.
741 */
742 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */
743 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */
744
745 /* Wait data. */
746 union pthread_wait_data data;
747
748 /*
749 * Set to TRUE if a blocking operation was
750 * interrupted by a signal:
751 */
752 int interrupted;
753
754 /*
755 * Set to non-zero when this thread has entered a critical
756 * region. We allow for recursive entries into critical regions.
757 */
758 int critical_count;
759
760 /*
761 * Set to TRUE if this thread should yield after leaving a
762 * critical region to check for signals, messages, etc.
763 */
764 int critical_yield;
765
766 int sflags;
767#define THR_FLAGS_IN_SYNCQ 0x0001
768
769 /* Miscellaneous flags; only set with scheduling lock held. */
770 int flags;
771#define THR_FLAGS_PRIVATE 0x0001
772#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */
773#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */
774#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */
775#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */
776
777 /* Thread list flags; only set with thread list lock held. */
778#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
779#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
780#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
781 int tlflags;
782
783 /*
784 * Base priority is the user setable and retrievable priority
785 * of the thread. It is only affected by explicit calls to
786 * set thread priority and upon thread creation via a thread
787 * attribute or default priority.
788 */
789 char base_priority;
790
791 /*
792 * Inherited priority is the priority a thread inherits by
793 * taking a priority inheritence or protection mutex. It
794 * is not affected by base priority changes. Inherited
795 * priority defaults to and remains 0 until a mutex is taken
796 * that is being waited on by any other thread whose priority
797 * is non-zero.
798 */
799 char inherited_priority;
800
801 /*
802 * Active priority is always the maximum of the threads base
803 * priority and inherited priority. When there is a change
804 * in either the base or inherited priority, the active
805 * priority must be recalculated.
806 */
807 char active_priority;
808
809 /* Number of priority ceiling or protection mutexes owned. */
810 int priority_mutex_count;
811
812 /* Number rwlocks rdlocks held. */
813 int rdlock_count;
814
815 /*
816 * Queue of currently owned mutexes.
817 */
818 TAILQ_HEAD(, pthread_mutex) mutexq;
819
820 void *ret;
821 struct pthread_specific_elem *specific;
822 int specific_data_count;
823
824 /* Alternative stack for sigaltstack() */
825 stack_t sigstk;
826
827 /*
828 * Current locks bitmap for rtld.
829 */
830 int rtld_bits;
831
832 /* Cleanup handlers Link List */
833 struct pthread_cleanup *cleanup;
834 char *fname; /* Ptr to source file name */
835 int lineno; /* Source line number. */
836};
837
838/*
839 * Critical regions can also be detected by looking at the threads
840 * current lock level. Ensure these macros increment and decrement
841 * the lock levels such that locks can not be held with a lock level
842 * of 0.
843 */
844#define THR_IN_CRITICAL(thrd) \
845 (((thrd)->locklevel > 0) || \
846 ((thrd)->critical_count > 0))
847
848#define THR_YIELD_CHECK(thrd) \
849do { \
850 if (!THR_IN_CRITICAL(thrd)) { \
851 if (__predict_false(_libkse_debug)) \
852 _thr_debug_check_yield(thrd); \
853 if ((thrd)->critical_yield != 0) \
854 _thr_sched_switch(thrd); \
855 if ((thrd)->check_pending != 0) \
856 _thr_sig_check_pending(thrd); \
857 } \
858} while (0)
859
860#define THR_LOCK_ACQUIRE(thrd, lck) \
861do { \
862 if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \
863 THR_DEACTIVATE_LAST_LOCK(thrd); \
864 (thrd)->locklevel++; \
865 _lock_acquire((lck), \
866 &(thrd)->lockusers[(thrd)->locklevel - 1], \
867 (thrd)->active_priority); \
868 } else \
869 PANIC("Exceeded maximum lock level"); \
870} while (0)
871
872#define THR_LOCK_RELEASE(thrd, lck) \
873do { \
874 if ((thrd)->locklevel > 0) { \
875 _lock_release((lck), \
876 &(thrd)->lockusers[(thrd)->locklevel - 1]); \
877 (thrd)->locklevel--; \
878 THR_ACTIVATE_LAST_LOCK(thrd); \
879 if ((thrd)->locklevel == 0) \
880 THR_YIELD_CHECK(thrd); \
881 } \
882} while (0)
883
884#define THR_ACTIVATE_LAST_LOCK(thrd) \
885do { \
886 if ((thrd)->locklevel > 0) \
887 _lockuser_setactive( \
888 &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \
889} while (0)
890
891#define THR_DEACTIVATE_LAST_LOCK(thrd) \
892do { \
893 if ((thrd)->locklevel > 0) \
894 _lockuser_setactive( \
895 &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \
896} while (0)
897
898/*
899 * For now, threads will have their own lock separate from their
900 * KSE scheduling lock.
901 */
902#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock)
903#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock)
904#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
905#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
906
907/*
908 * Priority queue manipulation macros (using pqe link). We use
909 * the thread's kseg link instead of the kse link because a thread
910 * does not (currently) have a statically assigned kse.
911 */
912#define THR_RUNQ_INSERT_HEAD(thrd) \
913 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
914#define THR_RUNQ_INSERT_TAIL(thrd) \
915 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
916#define THR_RUNQ_REMOVE(thrd) \
917 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
918
919/*
920 * Macros to insert/remove threads to the all thread list and
921 * the gc list.
922 */
923#define THR_LIST_ADD(thrd) do { \
924 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \
925 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \
926 _thr_hash_add(thrd); \
927 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \
928 } \
929} while (0)
930#define THR_LIST_REMOVE(thrd) do { \
931 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \
932 TAILQ_REMOVE(&_thread_list, thrd, tle); \
933 _thr_hash_remove(thrd); \
934 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \
935 } \
936} while (0)
937#define THR_GCLIST_ADD(thrd) do { \
938 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
939 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
940 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
941 _gc_count++; \
942 } \
943} while (0)
944#define THR_GCLIST_REMOVE(thrd) do { \
945 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
946 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
947 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
948 _gc_count--; \
949 } \
950} while (0)
951
952#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5)
953
954/*
955 * Locking the scheduling queue for another thread uses that thread's
956 * KSEG lock.
957 */
958#define THR_SCHED_LOCK(curthr, thr) do { \
959 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \
960 (curthr)->locklevel++; \
961 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \
962} while (0)
963
964#define THR_SCHED_UNLOCK(curthr, thr) do { \
965 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \
966 (curthr)->locklevel--; \
967 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
968} while (0)
969
970/* Take the scheduling lock with the intent to call the scheduler. */
971#define THR_LOCK_SWITCH(curthr) do { \
972 (void)_kse_critical_enter(); \
973 KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
974} while (0)
975#define THR_UNLOCK_SWITCH(curthr) do { \
976 KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
977} while (0)
978
979#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
980#define THR_CRITICAL_LEAVE(thr) do { \
981 (thr)->critical_count--; \
982 if (((thr)->critical_yield != 0) && \
983 ((thr)->critical_count == 0)) { \
984 (thr)->critical_yield = 0; \
985 _thr_sched_switch(thr); \
986 } \
987} while (0)
988
989#define THR_IS_ACTIVE(thrd) \
990 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd))
991
992#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
993
994#define THR_IS_SUSPENDED(thrd) \
995 (((thrd)->state == PS_SUSPENDED) || \
996 (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0))
997#define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0)
998#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \
999 TMDF_SUSPEND) == 0)
1000
1001extern int __isthreaded;
1002
1003static inline int
1004_kse_isthreaded(void)
1005{
1006 return (__isthreaded != 0);
1007}
1008
1009/*
1010 * Global variables for the pthread kernel.
1011 */
1012
1013SCLASS void *_usrstack SCLASS_PRESET(NULL);
1014SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL);
1015SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL);
1016/* For debugger */
1017SCLASS int _libkse_debug SCLASS_PRESET(0);
1018SCLASS int _thread_activated SCLASS_PRESET(0);
1019SCLASS int _thread_scope_system SCLASS_PRESET(0);
1020
1021/* List of all threads: */
1022SCLASS TAILQ_HEAD(, pthread) _thread_list
1023 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list));
1024
1025/* List of threads needing GC: */
1026SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
1027 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
1028
1029SCLASS int _thread_active_threads SCLASS_PRESET(1);
1030
1031SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
1032SCLASS pthread_mutex_t _thr_atfork_mutex;
1033
1034/* Default thread attributes: */
1035SCLASS struct pthread_attr _pthread_attr_default
1036 SCLASS_PRESET({
1037 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
1038 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
1037 NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
1039 NULL, NULL, /* stacksize */0, /* guardsize */0
1038 });
1039
1040/* Default mutex attributes: */
1041SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1042 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1043
1044/* Default condition variable attributes: */
1045SCLASS struct pthread_cond_attr _pthread_condattr_default
1046 SCLASS_PRESET({COND_TYPE_FAST, 0});
1047
1048/* Clock resolution in usec. */
1049SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
1050
1051/* Array of signal actions for this process: */
1052SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
1053
1054/*
1055 * Lock for above count of dummy handlers and for the process signal
1056 * mask and pending signal sets.
1057 */
1058SCLASS struct lock _thread_signal_lock;
1059
1060/* Pending signals and mask for this process: */
1061SCLASS sigset_t _thr_proc_sigpending;
1062SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG];
1063
1064SCLASS pid_t _thr_pid SCLASS_PRESET(0);
1065
1066/* Garbage collector lock. */
1067SCLASS struct lock _gc_lock;
1068SCLASS int _gc_check SCLASS_PRESET(0);
1069SCLASS int _gc_count SCLASS_PRESET(0);
1070
1071SCLASS struct lock _mutex_static_lock;
1072SCLASS struct lock _rwlock_static_lock;
1073SCLASS struct lock _keytable_lock;
1074SCLASS struct lock _thread_list_lock;
1075SCLASS int _thr_guard_default;
1040 });
1041
1042/* Default mutex attributes: */
1043SCLASS struct pthread_mutex_attr _pthread_mutexattr_default
1044 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 });
1045
1046/* Default condition variable attributes: */
1047SCLASS struct pthread_cond_attr _pthread_condattr_default
1048 SCLASS_PRESET({COND_TYPE_FAST, 0});
1049
1050/* Clock resolution in usec. */
1051SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC);
1052
1053/* Array of signal actions for this process: */
1054SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG];
1055
1056/*
1057 * Lock for above count of dummy handlers and for the process signal
1058 * mask and pending signal sets.
1059 */
1060SCLASS struct lock _thread_signal_lock;
1061
1062/* Pending signals and mask for this process: */
1063SCLASS sigset_t _thr_proc_sigpending;
1064SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG];
1065
1066SCLASS pid_t _thr_pid SCLASS_PRESET(0);
1067
1068/* Garbage collector lock. */
1069SCLASS struct lock _gc_lock;
1070SCLASS int _gc_check SCLASS_PRESET(0);
1071SCLASS int _gc_count SCLASS_PRESET(0);
1072
1073SCLASS struct lock _mutex_static_lock;
1074SCLASS struct lock _rwlock_static_lock;
1075SCLASS struct lock _keytable_lock;
1076SCLASS struct lock _thread_list_lock;
1077SCLASS int _thr_guard_default;
1078SCLASS int _thr_stack_default;
1079SCLASS int _thr_stack_initial;
1076SCLASS int _thr_page_size;
1077SCLASS pthread_t _thr_sig_daemon;
1078SCLASS int _thr_debug_flags SCLASS_PRESET(0);
1079
1080/* Undefine the storage class and preset specifiers: */
1081#undef SCLASS
1082#undef SCLASS_PRESET
1083
1084
1085/*
1086 * Function prototype definitions.
1087 */
1088__BEGIN_DECLS
1089int _cond_reinit(pthread_cond_t *);
1090struct kse *_kse_alloc(struct pthread *, int sys_scope);
1091kse_critical_t _kse_critical_enter(void);
1092void _kse_critical_leave(kse_critical_t);
1093int _kse_in_critical(void);
1094void _kse_free(struct pthread *, struct kse *);
1095void _kse_init();
1096struct kse_group *_kseg_alloc(struct pthread *);
1097void _kse_lock_wait(struct lock *, struct lockuser *lu);
1098void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
1099void _kse_single_thread(struct pthread *);
1100int _kse_setthreaded(int);
1101void _kseg_free(struct kse_group *);
1102int _mutex_cv_lock(pthread_mutex_t *);
1103int _mutex_cv_unlock(pthread_mutex_t *);
1104void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
1105int _mutex_reinit(struct pthread_mutex *);
1106void _mutex_unlock_private(struct pthread *);
1107void _libpthread_init(struct pthread *);
1108int _pq_alloc(struct pq_queue *, int, int);
1109void _pq_free(struct pq_queue *);
1110int _pq_init(struct pq_queue *);
1111void _pq_remove(struct pq_queue *pq, struct pthread *);
1112void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1113void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1114struct pthread *_pq_first(struct pq_queue *pq);
1115struct pthread *_pq_first_debug(struct pq_queue *pq);
1116void *_pthread_getspecific(pthread_key_t);
1117int _pthread_key_create(pthread_key_t *, void (*) (void *));
1118int _pthread_key_delete(pthread_key_t);
1119int _pthread_mutex_destroy(pthread_mutex_t *);
1120int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1121int _pthread_mutex_lock(pthread_mutex_t *);
1122int _pthread_mutex_trylock(pthread_mutex_t *);
1123int _pthread_mutex_unlock(pthread_mutex_t *);
1124int _pthread_mutexattr_init(pthread_mutexattr_t *);
1125int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1126int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1127int _pthread_once(pthread_once_t *, void (*) (void));
1128int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1129int _pthread_rwlock_destroy (pthread_rwlock_t *);
1130struct pthread *_pthread_self(void);
1131int _pthread_setspecific(pthread_key_t, const void *);
1132void _pthread_yield(void);
1133void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1134void _pthread_cleanup_pop(int execute);
1135struct pthread *_thr_alloc(struct pthread *);
1136void _thr_exit(char *, int, char *);
1137void _thr_exit_cleanup(void);
1138void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
1139void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1140void _thr_mutex_reinit(pthread_mutex_t *);
1141int _thr_ref_add(struct pthread *, struct pthread *, int);
1142void _thr_ref_delete(struct pthread *, struct pthread *);
1143void _thr_rtld_init(void);
1144void _thr_rtld_fini(void);
1145int _thr_schedule_add(struct pthread *, struct pthread *);
1146void _thr_schedule_remove(struct pthread *, struct pthread *);
1147void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1148struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1149struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1150void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
1151int _thr_stack_alloc(struct pthread_attr *);
1152void _thr_stack_free(struct pthread_attr *);
1153void _thr_exit_cleanup(void);
1154void _thr_free(struct pthread *, struct pthread *);
1155void _thr_gc(struct pthread *);
1156void _thr_panic_exit(char *, int, char *);
1157void _thread_cleanupspecific(void);
1158void _thread_dump_info(void);
1159void _thread_printf(int, const char *, ...);
1160void _thr_sched_switch(struct pthread *);
1161void _thr_sched_switch_unlocked(struct pthread *);
1162void _thr_set_timeout(const struct timespec *);
1163void _thr_seterrno(struct pthread *, int);
1164void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
1165void _thr_sig_check_pending(struct pthread *);
1166void _thr_sig_rundown(struct pthread *, ucontext_t *);
1167void _thr_sig_send(struct pthread *pthread, int sig);
1168void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1169void _thr_spinlock_init(void);
1170void _thr_cancel_enter(struct pthread *);
1171void _thr_cancel_leave(struct pthread *, int);
1172int _thr_setconcurrency(int new_level);
1173int _thr_setmaxconcurrency(void);
1174void _thr_critical_enter(struct pthread *);
1175void _thr_critical_leave(struct pthread *);
1176int _thr_start_sig_daemon(void);
1177int _thr_getprocsig(int sig, siginfo_t *siginfo);
1178int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1179void _thr_signal_init(void);
1180void _thr_signal_deinit(void);
1181void _thr_hash_add(struct pthread *);
1182void _thr_hash_remove(struct pthread *);
1183struct pthread *_thr_hash_find(struct pthread *);
1184void _thr_finish_cancellation(void *arg);
1185int _thr_sigonstack(void *sp);
1186void _thr_debug_check_yield(struct pthread *);
1187
1188/*
1189 * Aliases for _pthread functions. Should be called instead of
1190 * originals if PLT replocation is unwanted at runtme.
1191 */
1192int _thr_cond_broadcast(pthread_cond_t *);
1193int _thr_cond_signal(pthread_cond_t *);
1194int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1195int _thr_mutex_lock(pthread_mutex_t *);
1196int _thr_mutex_unlock(pthread_mutex_t *);
1197int _thr_rwlock_rdlock (pthread_rwlock_t *);
1198int _thr_rwlock_wrlock (pthread_rwlock_t *);
1199int _thr_rwlock_unlock (pthread_rwlock_t *);
1200
1201/* #include <sys/aio.h> */
1202#ifdef _SYS_AIO_H_
1203int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1204#endif
1205
1206/* #include <fcntl.h> */
1207#ifdef _SYS_FCNTL_H_
1208int __sys_fcntl(int, int, ...);
1209int __sys_open(const char *, int, ...);
1210#endif
1211
1212/* #include <sys/ioctl.h> */
1213#ifdef _SYS_IOCTL_H_
1214int __sys_ioctl(int, unsigned long, ...);
1215#endif
1216
1217/* #inclde <sched.h> */
1218#ifdef _SCHED_H_
1219int __sys_sched_yield(void);
1220#endif
1221
1222/* #include <signal.h> */
1223#ifdef _SIGNAL_H_
1224int __sys_kill(pid_t, int);
1225int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1226int __sys_sigpending(sigset_t *);
1227int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1228int __sys_sigsuspend(const sigset_t *);
1229int __sys_sigreturn(ucontext_t *);
1230int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1231#endif
1232
1233/* #include <sys/socket.h> */
1234#ifdef _SYS_SOCKET_H_
1235int __sys_accept(int, struct sockaddr *, socklen_t *);
1236int __sys_connect(int, const struct sockaddr *, socklen_t);
1237int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1238 off_t *, int);
1239#endif
1240
1241/* #include <sys/uio.h> */
1242#ifdef _SYS_UIO_H_
1243ssize_t __sys_readv(int, const struct iovec *, int);
1244ssize_t __sys_writev(int, const struct iovec *, int);
1245#endif
1246
1247/* #include <time.h> */
1248#ifdef _TIME_H_
1249int __sys_nanosleep(const struct timespec *, struct timespec *);
1250#endif
1251
1252/* #include <unistd.h> */
1253#ifdef _UNISTD_H_
1254int __sys_close(int);
1255int __sys_execve(const char *, char * const *, char * const *);
1256int __sys_fork(void);
1257int __sys_fsync(int);
1258pid_t __sys_getpid(void);
1259int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1260ssize_t __sys_read(int, void *, size_t);
1261ssize_t __sys_write(int, const void *, size_t);
1262void __sys_exit(int);
1263int __sys_sigwait(const sigset_t *, int *);
1264int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *);
1265#endif
1266
1267/* #include <poll.h> */
1268#ifdef _SYS_POLL_H_
1269int __sys_poll(struct pollfd *, unsigned, int);
1270#endif
1271
1272/* #include <sys/mman.h> */
1273#ifdef _SYS_MMAN_H_
1274int __sys_msync(void *, size_t, int);
1275#endif
1276
1277#endif /* !_THR_PRIVATE_H */
1080SCLASS int _thr_page_size;
1081SCLASS pthread_t _thr_sig_daemon;
1082SCLASS int _thr_debug_flags SCLASS_PRESET(0);
1083
1084/* Undefine the storage class and preset specifiers: */
1085#undef SCLASS
1086#undef SCLASS_PRESET
1087
1088
1089/*
1090 * Function prototype definitions.
1091 */
1092__BEGIN_DECLS
1093int _cond_reinit(pthread_cond_t *);
1094struct kse *_kse_alloc(struct pthread *, int sys_scope);
1095kse_critical_t _kse_critical_enter(void);
1096void _kse_critical_leave(kse_critical_t);
1097int _kse_in_critical(void);
1098void _kse_free(struct pthread *, struct kse *);
1099void _kse_init();
1100struct kse_group *_kseg_alloc(struct pthread *);
1101void _kse_lock_wait(struct lock *, struct lockuser *lu);
1102void _kse_lock_wakeup(struct lock *, struct lockuser *lu);
1103void _kse_single_thread(struct pthread *);
1104int _kse_setthreaded(int);
1105void _kseg_free(struct kse_group *);
1106int _mutex_cv_lock(pthread_mutex_t *);
1107int _mutex_cv_unlock(pthread_mutex_t *);
1108void _mutex_notify_priochange(struct pthread *, struct pthread *, int);
1109int _mutex_reinit(struct pthread_mutex *);
1110void _mutex_unlock_private(struct pthread *);
1111void _libpthread_init(struct pthread *);
1112int _pq_alloc(struct pq_queue *, int, int);
1113void _pq_free(struct pq_queue *);
1114int _pq_init(struct pq_queue *);
1115void _pq_remove(struct pq_queue *pq, struct pthread *);
1116void _pq_insert_head(struct pq_queue *pq, struct pthread *);
1117void _pq_insert_tail(struct pq_queue *pq, struct pthread *);
1118struct pthread *_pq_first(struct pq_queue *pq);
1119struct pthread *_pq_first_debug(struct pq_queue *pq);
1120void *_pthread_getspecific(pthread_key_t);
1121int _pthread_key_create(pthread_key_t *, void (*) (void *));
1122int _pthread_key_delete(pthread_key_t);
1123int _pthread_mutex_destroy(pthread_mutex_t *);
1124int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *);
1125int _pthread_mutex_lock(pthread_mutex_t *);
1126int _pthread_mutex_trylock(pthread_mutex_t *);
1127int _pthread_mutex_unlock(pthread_mutex_t *);
1128int _pthread_mutexattr_init(pthread_mutexattr_t *);
1129int _pthread_mutexattr_destroy(pthread_mutexattr_t *);
1130int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
1131int _pthread_once(pthread_once_t *, void (*) (void));
1132int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *);
1133int _pthread_rwlock_destroy (pthread_rwlock_t *);
1134struct pthread *_pthread_self(void);
1135int _pthread_setspecific(pthread_key_t, const void *);
1136void _pthread_yield(void);
1137void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg);
1138void _pthread_cleanup_pop(int execute);
1139struct pthread *_thr_alloc(struct pthread *);
1140void _thr_exit(char *, int, char *);
1141void _thr_exit_cleanup(void);
1142void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
1143void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
1144void _thr_mutex_reinit(pthread_mutex_t *);
1145int _thr_ref_add(struct pthread *, struct pthread *, int);
1146void _thr_ref_delete(struct pthread *, struct pthread *);
1147void _thr_rtld_init(void);
1148void _thr_rtld_fini(void);
1149int _thr_schedule_add(struct pthread *, struct pthread *);
1150void _thr_schedule_remove(struct pthread *, struct pthread *);
1151void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
1152struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread);
1153struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *);
1154void _thr_sig_dispatch(struct kse *, int, siginfo_t *);
1155int _thr_stack_alloc(struct pthread_attr *);
1156void _thr_stack_free(struct pthread_attr *);
1157void _thr_exit_cleanup(void);
1158void _thr_free(struct pthread *, struct pthread *);
1159void _thr_gc(struct pthread *);
1160void _thr_panic_exit(char *, int, char *);
1161void _thread_cleanupspecific(void);
1162void _thread_dump_info(void);
1163void _thread_printf(int, const char *, ...);
1164void _thr_sched_switch(struct pthread *);
1165void _thr_sched_switch_unlocked(struct pthread *);
1166void _thr_set_timeout(const struct timespec *);
1167void _thr_seterrno(struct pthread *, int);
1168void _thr_sig_handler(int, siginfo_t *, ucontext_t *);
1169void _thr_sig_check_pending(struct pthread *);
1170void _thr_sig_rundown(struct pthread *, ucontext_t *);
1171void _thr_sig_send(struct pthread *pthread, int sig);
1172void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf);
1173void _thr_spinlock_init(void);
1174void _thr_cancel_enter(struct pthread *);
1175void _thr_cancel_leave(struct pthread *, int);
1176int _thr_setconcurrency(int new_level);
1177int _thr_setmaxconcurrency(void);
1178void _thr_critical_enter(struct pthread *);
1179void _thr_critical_leave(struct pthread *);
1180int _thr_start_sig_daemon(void);
1181int _thr_getprocsig(int sig, siginfo_t *siginfo);
1182int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo);
1183void _thr_signal_init(void);
1184void _thr_signal_deinit(void);
1185void _thr_hash_add(struct pthread *);
1186void _thr_hash_remove(struct pthread *);
1187struct pthread *_thr_hash_find(struct pthread *);
1188void _thr_finish_cancellation(void *arg);
1189int _thr_sigonstack(void *sp);
1190void _thr_debug_check_yield(struct pthread *);
1191
1192/*
1193 * Aliases for _pthread functions. Should be called instead of
1194 * originals if PLT replocation is unwanted at runtme.
1195 */
1196int _thr_cond_broadcast(pthread_cond_t *);
1197int _thr_cond_signal(pthread_cond_t *);
1198int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *);
1199int _thr_mutex_lock(pthread_mutex_t *);
1200int _thr_mutex_unlock(pthread_mutex_t *);
1201int _thr_rwlock_rdlock (pthread_rwlock_t *);
1202int _thr_rwlock_wrlock (pthread_rwlock_t *);
1203int _thr_rwlock_unlock (pthread_rwlock_t *);
1204
1205/* #include <sys/aio.h> */
1206#ifdef _SYS_AIO_H_
1207int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
1208#endif
1209
1210/* #include <fcntl.h> */
1211#ifdef _SYS_FCNTL_H_
1212int __sys_fcntl(int, int, ...);
1213int __sys_open(const char *, int, ...);
1214#endif
1215
1216/* #include <sys/ioctl.h> */
1217#ifdef _SYS_IOCTL_H_
1218int __sys_ioctl(int, unsigned long, ...);
1219#endif
1220
1221/* #inclde <sched.h> */
1222#ifdef _SCHED_H_
1223int __sys_sched_yield(void);
1224#endif
1225
1226/* #include <signal.h> */
1227#ifdef _SIGNAL_H_
1228int __sys_kill(pid_t, int);
1229int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
1230int __sys_sigpending(sigset_t *);
1231int __sys_sigprocmask(int, const sigset_t *, sigset_t *);
1232int __sys_sigsuspend(const sigset_t *);
1233int __sys_sigreturn(ucontext_t *);
1234int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
1235#endif
1236
1237/* #include <sys/socket.h> */
1238#ifdef _SYS_SOCKET_H_
1239int __sys_accept(int, struct sockaddr *, socklen_t *);
1240int __sys_connect(int, const struct sockaddr *, socklen_t);
1241int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
1242 off_t *, int);
1243#endif
1244
1245/* #include <sys/uio.h> */
1246#ifdef _SYS_UIO_H_
1247ssize_t __sys_readv(int, const struct iovec *, int);
1248ssize_t __sys_writev(int, const struct iovec *, int);
1249#endif
1250
1251/* #include <time.h> */
1252#ifdef _TIME_H_
1253int __sys_nanosleep(const struct timespec *, struct timespec *);
1254#endif
1255
1256/* #include <unistd.h> */
1257#ifdef _UNISTD_H_
1258int __sys_close(int);
1259int __sys_execve(const char *, char * const *, char * const *);
1260int __sys_fork(void);
1261int __sys_fsync(int);
1262pid_t __sys_getpid(void);
1263int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
1264ssize_t __sys_read(int, void *, size_t);
1265ssize_t __sys_write(int, const void *, size_t);
1266void __sys_exit(int);
1267int __sys_sigwait(const sigset_t *, int *);
1268int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *);
1269#endif
1270
1271/* #include <poll.h> */
1272#ifdef _SYS_POLL_H_
1273int __sys_poll(struct pollfd *, unsigned, int);
1274#endif
1275
1276/* #include <sys/mman.h> */
1277#ifdef _SYS_MMAN_H_
1278int __sys_msync(void *, size_t, int);
1279#endif
1280
1281#endif /* !_THR_PRIVATE_H */