1/* 2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Private thread definitions for the uthread kernel. 33 *
|
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 139023 2004-12-18 18:07:37Z deischen $
|
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 141822 2005-02-13 18:38:06Z deischen $ |
35 */ 36 37#ifndef _THR_PRIVATE_H 38#define _THR_PRIVATE_H 39 40/* 41 * Include files. 42 */ 43#include <setjmp.h> 44#include <signal.h> 45#include <stdio.h> 46#include <sys/queue.h> 47#include <sys/types.h> 48#include <sys/time.h> 49#include <sys/cdefs.h> 50#include <sys/kse.h> 51#include <sched.h> 52#include <ucontext.h> 53#include <unistd.h> 54#include <pthread.h> 55#include <pthread_np.h> 56 57#ifndef LIBTHREAD_DB 58#include "lock.h" 59#include "pthread_md.h" 60#endif 61 62/* 63 * Evaluate the storage class specifier. 64 */ 65#ifdef GLOBAL_PTHREAD_PRIVATE 66#define SCLASS 67#define SCLASS_PRESET(x...) = x 68#else 69#define SCLASS extern 70#define SCLASS_PRESET(x...) 71#endif 72 73/* 74 * Kernel fatal error handler macro. 75 */ 76#define PANIC(string) _thr_exit(__FILE__,__LINE__,string) 77 78 79/* Output debug messages like this: */ 80#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 81#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 82 83#define DBG_MUTEX 0x0001 84#define DBG_SIG 0x0002 85 86#ifdef _PTHREADS_INVARIANTS 87#define THR_ASSERT(cond, msg) do { \ 88 if (!(cond)) \ 89 PANIC(msg); \ 90} while (0) 91#else 92#define THR_ASSERT(cond, msg) 93#endif 94 95/* 96 * State change macro without scheduling queue change: 97 */ 98#define THR_SET_STATE(thrd, newstate) do { \ 99 (thrd)->state = newstate; \ 100 (thrd)->fname = __FILE__; \ 101 (thrd)->lineno = __LINE__; \ 102} while (0) 103 104 105#define TIMESPEC_ADD(dst, src, val) \ 106 do { \ 107 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ 108 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ 109 if ((dst)->tv_nsec > 1000000000) { \ 110 (dst)->tv_sec++; \ 111 (dst)->tv_nsec -= 1000000000; \ 112 } \ 113 } while (0) 114 115#define TIMESPEC_SUB(dst, src, val) \ 116 do { \ 117 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 118 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 119 if ((dst)->tv_nsec < 0) { \ 120 (dst)->tv_sec--; \ 121 (dst)->tv_nsec += 1000000000; \ 122 } \ 123 } while (0) 124 125/* 126 * Priority queues. 127 * 128 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. 129 */ 130typedef struct pq_list { 131 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ 132 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ 133 int pl_prio; /* the priority of this list */ 134 int pl_queued; /* is this in the priority queue */ 135} pq_list_t; 136 137typedef struct pq_queue { 138 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ 139 pq_list_t *pq_lists; /* array of all priority lists */ 140 int pq_size; /* number of priority lists */ 141#define PQF_ACTIVE 0x0001 142 int pq_flags; 143 int pq_threads; 144} pq_queue_t; 145 146/* 147 * Each KSEG has a scheduling queue. For now, threads that exist in their 148 * own KSEG (system scope) will get a full priority queue. In the future 149 * this can be optimized for the single thread per KSEG case. 150 */ 151struct sched_queue { 152 pq_queue_t sq_runq; 153 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ 154}; 155 156typedef struct kse_thr_mailbox *kse_critical_t; 157 158struct kse_group; 159 160#define MAX_KSE_LOCKLEVEL 5 161struct kse { 162 /* -- location and order specific items for gdb -- */ 163 struct kcb *k_kcb; 164 struct pthread *k_curthread; /* current thread */ 165 struct kse_group *k_kseg; /* parent KSEG */ 166 struct sched_queue *k_schedq; /* scheduling queue */ 167 /* -- end of location and order specific items -- */ 168 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ 169 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ 170 /* 171 * Items that are only modified by the kse, or that otherwise 172 * don't need to be locked when accessed 173 */ 174 struct lock k_lock; 175 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; 176 int k_locklevel; 177 stack_t k_stack; 178 int k_flags; 179#define KF_STARTED 0x0001 /* kernel kse created */ 180#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ 181#define KF_TERMINATED 0x0004 /* kse is terminated */ 182#define KF_IDLE 0x0008 /* kse is idle */ 183#define KF_SWITCH 0x0010 /* thread switch in UTS */ 184 int k_error; /* syscall errno in critical */ 185 int k_cpu; /* CPU ID when bound */ 186 int k_sigseqno; /* signal buffered count */ 187}; 188 189#define KSE_SET_IDLE(kse) ((kse)->k_flags |= KF_IDLE) 190#define KSE_CLEAR_IDLE(kse) ((kse)->k_flags &= ~KF_IDLE) 191#define KSE_IS_IDLE(kse) (((kse)->k_flags & KF_IDLE) != 0) 192#define KSE_SET_SWITCH(kse) ((kse)->k_flags |= KF_SWITCH) 193#define KSE_CLEAR_SWITCH(kse) ((kse)->k_flags &= ~KF_SWITCH) 194#define KSE_IS_SWITCH(kse) (((kse)->k_flags & KF_SWITCH) != 0) 195 196/* 197 * Each KSE group contains one or more KSEs in which threads can run. 198 * At least for now, there is one scheduling queue per KSE group; KSEs 199 * within the same KSE group compete for threads from the same scheduling 200 * queue. A scope system thread has one KSE in one KSE group; the group 201 * does not use its scheduling queue. 202 */ 203struct kse_group { 204 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ 205 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ 206 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ 207 struct sched_queue kg_schedq; /* scheduling queue */ 208 struct lock kg_lock; 209 int kg_threadcount; /* # of assigned threads */ 210 int kg_ksecount; /* # of assigned KSEs */ 211 int kg_idle_kses; 212 int kg_flags; 213#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ 214#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ 215}; 216 217/* 218 * Add/remove threads from a KSE's scheduling queue. 219 * For now the scheduling queue is hung off the KSEG. 220 */ 221#define KSEG_THRQ_ADD(kseg, thr) \ 222do { \ 223 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ 224 (kseg)->kg_threadcount++; \ 225} while (0) 226 227#define KSEG_THRQ_REMOVE(kseg, thr) \ 228do { \ 229 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ 230 (kseg)->kg_threadcount--; \ 231} while (0) 232 233 234/* 235 * Lock acquire and release for KSEs. 236 */ 237#define KSE_LOCK_ACQUIRE(kse, lck) \ 238do { \ 239 if ((kse)->k_locklevel < MAX_KSE_LOCKLEVEL) { \ 240 (kse)->k_locklevel++; \ 241 _lock_acquire((lck), \ 242 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ 243 } \ 244 else \ 245 PANIC("Exceeded maximum lock level"); \ 246} while (0) 247 248#define KSE_LOCK_RELEASE(kse, lck) \ 249do { \ 250 if ((kse)->k_locklevel > 0) { \ 251 _lock_release((lck), \ 252 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ 253 (kse)->k_locklevel--; \ 254 } \ 255} while (0) 256 257/* 258 * Lock our own KSEG. 259 */ 260#define KSE_LOCK(curkse) \ 261 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) 262#define KSE_UNLOCK(curkse) \ 263 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) 264 265/* 266 * Lock a potentially different KSEG. 267 */ 268#define KSE_SCHED_LOCK(curkse, kseg) \ 269 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) 270#define KSE_SCHED_UNLOCK(curkse, kseg) \ 271 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) 272 273/* 274 * Waiting queue manipulation macros (using pqe link): 275 */ 276#define KSE_WAITQ_REMOVE(kse, thrd) \ 277do { \ 278 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ 279 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ 280 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ 281 } \ 282} while (0) 283#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) 284#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) 285 286#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx) 287 288/* 289 * TailQ initialization values. 290 */ 291#define TAILQ_INITIALIZER { NULL, NULL } 292 293/* 294 * lock initialization values. 295 */ 296#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } 297 298struct pthread_mutex { 299 /* 300 * Lock for accesses to this structure. 301 */ 302 struct lock m_lock; 303 enum pthread_mutextype m_type; 304 int m_protocol; 305 TAILQ_HEAD(mutex_head, pthread) m_queue; 306 struct pthread *m_owner; 307 long m_flags; 308 int m_count; 309 int m_refcount; 310 311 /* 312 * Used for priority inheritence and protection. 313 * 314 * m_prio - For priority inheritence, the highest active 315 * priority (threads locking the mutex inherit 316 * this priority). For priority protection, the 317 * ceiling priority of this mutex. 318 * m_saved_prio - mutex owners inherited priority before 319 * taking the mutex, restored when the owner 320 * unlocks the mutex. 321 */ 322 int m_prio; 323 int m_saved_prio; 324 325 /* 326 * Link for list of all mutexes a thread currently owns. 327 */ 328 TAILQ_ENTRY(pthread_mutex) m_qe; 329}; 330 331/* 332 * Flags for mutexes. 333 */ 334#define MUTEX_FLAGS_PRIVATE 0x01 335#define MUTEX_FLAGS_INITED 0x02 336#define MUTEX_FLAGS_BUSY 0x04 337 338/* 339 * Static mutex initialization values. 340 */ 341#define PTHREAD_MUTEX_STATIC_INITIALIZER \ 342 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ 343 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ 344 TAILQ_INITIALIZER } 345 346struct pthread_mutex_attr { 347 enum pthread_mutextype m_type; 348 int m_protocol; 349 int m_ceiling; 350 long m_flags; 351}; 352 353#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ 354 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } 355 356/* 357 * Condition variable definitions. 358 */ 359enum pthread_cond_type { 360 COND_TYPE_FAST, 361 COND_TYPE_MAX 362}; 363 364struct pthread_cond { 365 /* 366 * Lock for accesses to this structure. 367 */ 368 struct lock c_lock; 369 enum pthread_cond_type c_type; 370 TAILQ_HEAD(cond_head, pthread) c_queue; 371 struct pthread_mutex *c_mutex; 372 long c_flags; 373 long c_seqno; 374}; 375 376struct pthread_cond_attr { 377 enum pthread_cond_type c_type; 378 long c_flags; 379}; 380 381struct pthread_barrier { 382 pthread_mutex_t b_lock; 383 pthread_cond_t b_cond; 384 int b_count; 385 int b_waiters; 386 int b_generation; 387}; 388 389struct pthread_barrierattr { 390 int pshared; 391}; 392 393struct pthread_spinlock { 394 volatile int s_lock; 395 pthread_t s_owner; 396}; 397 398/* 399 * Flags for condition variables. 400 */ 401#define COND_FLAGS_PRIVATE 0x01 402#define COND_FLAGS_INITED 0x02 403#define COND_FLAGS_BUSY 0x04 404 405/* 406 * Static cond initialization values. 407 */ 408#define PTHREAD_COND_STATIC_INITIALIZER \ 409 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ 410 NULL, NULL, 0, 0 } 411 412/* 413 * Cleanup definitions. 414 */ 415struct pthread_cleanup { 416 struct pthread_cleanup *next; 417 void (*routine) (); 418 void *routine_arg; 419 int onstack; 420}; 421 422#define THR_CLEANUP_PUSH(td, func, arg) { \ 423 struct pthread_cleanup __cup; \ 424 \ 425 __cup.routine = func; \ 426 __cup.routine_arg = arg; \ 427 __cup.onstack = 1; \ 428 __cup.next = (td)->cleanup; \ 429 (td)->cleanup = &__cup; 430 431#define THR_CLEANUP_POP(td, exec) \ 432 (td)->cleanup = __cup.next; \ 433 if ((exec) != 0) \ 434 __cup.routine(__cup.routine_arg); \ 435} 436 437struct pthread_atfork { 438 TAILQ_ENTRY(pthread_atfork) qe; 439 void (*prepare)(void); 440 void (*parent)(void); 441 void (*child)(void); 442}; 443 444struct pthread_attr { 445 int sched_policy; 446 int sched_inherit; 447 int sched_interval; 448 int prio; 449 int suspend; 450#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */ 451#define THR_SIGNAL_THREAD 0x200 /* This is a signal thread */ 452 int flags; 453 void *arg_attr; 454 void (*cleanup_attr) (); 455 void *stackaddr_attr; 456 size_t stacksize_attr; 457 size_t guardsize_attr; 458}; 459 460/* 461 * Thread creation state attributes. 462 */ 463#define THR_CREATE_RUNNING 0 464#define THR_CREATE_SUSPENDED 1 465 466/* 467 * Miscellaneous definitions. 468 */
|
469#define THR_STACK_DEFAULT 65536
|
469#define THR_STACK32_DEFAULT (1 * 1024 * 1024) 470#define THR_STACK64_DEFAULT (2 * 1024 * 1024) |
471 472/* 473 * Maximum size of initial thread's stack. This perhaps deserves to be larger 474 * than the stacks of other threads, since many applications are likely to run 475 * almost entirely on this stack. 476 */
|
476#define THR_STACK_INITIAL 0x100000
|
477#define THR_STACK32_INITIAL (2 * 1024 * 1024) 478#define THR_STACK64_INITIAL (4 * 1024 * 1024) |
479 480/* 481 * Define the different priority ranges. All applications have thread 482 * priorities constrained within 0-31. The threads library raises the 483 * priority when delivering signals in order to ensure that signal 484 * delivery happens (from the POSIX spec) "as soon as possible". 485 * In the future, the threads library will also be able to map specific 486 * threads into real-time (cooperating) processes or kernel threads. 487 * The RT and SIGNAL priorities will be used internally and added to 488 * thread base priorities so that the scheduling queue can handle both 489 * normal and RT priority threads with and without signal handling. 490 * 491 * The approach taken is that, within each class, signal delivery 492 * always has priority over thread execution. 493 */ 494#define THR_DEFAULT_PRIORITY 15 495#define THR_MIN_PRIORITY 0 496#define THR_MAX_PRIORITY 31 /* 0x1F */ 497#define THR_SIGNAL_PRIORITY 32 /* 0x20 */ 498#define THR_RT_PRIORITY 64 /* 0x40 */ 499#define THR_FIRST_PRIORITY THR_MIN_PRIORITY 500#define THR_LAST_PRIORITY \ 501 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) 502#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) 503 504/* 505 * Clock resolution in microseconds. 506 */ 507#define CLOCK_RES_USEC 10000 508 509/* 510 * Time slice period in microseconds. 511 */ 512#define TIMESLICE_USEC 20000 513 514/* 515 * XXX - Define a thread-safe macro to get the current time of day 516 * which is updated at regular intervals by something. 517 * 518 * For now, we just make the system call to get the time. 519 */ 520#define KSE_GET_TOD(curkse, tsp) \ 521do { \ 522 *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \ 523 if ((tsp)->tv_sec == 0) \ 524 clock_gettime(CLOCK_REALTIME, tsp); \ 525} while (0) 526 527struct pthread_rwlockattr { 528 int pshared; 529}; 530 531struct pthread_rwlock { 532 pthread_mutex_t lock; /* monitor lock */ 533 pthread_cond_t read_signal; 534 pthread_cond_t write_signal; 535 int state; /* 0 = idle >0 = # of readers -1 = writer */ 536 int blocked_writers; 537}; 538 539/* 540 * Thread states. 541 */ 542enum pthread_state { 543 PS_RUNNING, 544 PS_LOCKWAIT, 545 PS_MUTEX_WAIT, 546 PS_COND_WAIT, 547 PS_SLEEP_WAIT, 548 PS_SIGSUSPEND, 549 PS_SIGWAIT, 550 PS_JOIN, 551 PS_SUSPENDED, 552 PS_DEAD, 553 PS_DEADLOCK, 554 PS_STATE_MAX 555}; 556 557struct sigwait_data { 558 sigset_t *waitset; 559 siginfo_t *siginfo; /* used to save siginfo for sigwaitinfo() */ 560}; 561 562union pthread_wait_data { 563 pthread_mutex_t mutex; 564 pthread_cond_t cond; 565 struct lock *lock; 566 struct sigwait_data *sigwait; 567}; 568 569/* 570 * Define a continuation routine that can be used to perform a 571 * transfer of control: 572 */ 573typedef void (*thread_continuation_t) (void *); 574 575/* 576 * This stores a thread's state prior to running a signal handler. 577 * It is used when a signal is delivered to a thread blocked in 578 * userland. If the signal handler returns normally, the thread's 579 * state is restored from here. 580 */ 581struct pthread_sigframe { 582 int psf_valid; 583 int psf_flags; 584 int psf_cancelflags; 585 int psf_interrupted; 586 int psf_timeout; 587 int psf_signo; 588 enum pthread_state psf_state; 589 union pthread_wait_data psf_wait_data; 590 struct timespec psf_wakeup_time; 591 sigset_t psf_sigset; 592 sigset_t psf_sigmask; 593 int psf_seqno; 594 thread_continuation_t psf_continuation; 595}; 596 597struct join_status { 598 struct pthread *thread; 599 void *ret; 600 int error; 601}; 602 603struct pthread_specific_elem { 604 const void *data; 605 int seqno; 606}; 607 608struct pthread_key { 609 volatile int allocated; 610 volatile int count; 611 int seqno; 612 void (*destructor) (void *); 613}; 614 615#define MAX_THR_LOCKLEVEL 5 616/* 617 * Thread structure. 618 */ 619struct pthread { 620 /* Thread control block */ 621 struct tcb *tcb; 622 623 /* 624 * Magic value to help recognize a valid thread structure 625 * from an invalid one: 626 */ 627#define THR_MAGIC ((u_int32_t) 0xd09ba115) 628 u_int32_t magic; 629 char *name; 630 u_int64_t uniqueid; /* for gdb */ 631 632 /* Queue entry for list of all threads: */ 633 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ 634 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ 635 636 /* Queue entry for GC lists: */ 637 TAILQ_ENTRY(pthread) gcle; 638 639 /* Hash queue entry */ 640 LIST_ENTRY(pthread) hle; 641 642 /* 643 * Lock for accesses to this thread structure. 644 */ 645 struct lock lock; 646 struct lockuser lockusers[MAX_THR_LOCKLEVEL]; 647 int locklevel; 648 kse_critical_t critical[MAX_KSE_LOCKLEVEL]; 649 struct kse *kse; 650 struct kse_group *kseg; 651 652 /* 653 * Thread start routine, argument, stack pointer and thread 654 * attributes. 655 */ 656 void *(*start_routine)(void *); 657 void *arg; 658 struct pthread_attr attr; 659 660 int active; /* thread running */ 661 int blocked; /* thread blocked in kernel */ 662 int need_switchout; 663 664 /* 665 * Used for tracking delivery of signal handlers. 666 */ 667 siginfo_t *siginfo; 668 thread_continuation_t sigbackout; 669 670 /* 671 * Cancelability flags - the lower 2 bits are used by cancel 672 * definitions in pthread.h 673 */ 674#define THR_AT_CANCEL_POINT 0x0004 675#define THR_CANCELLING 0x0008 676#define THR_CANCEL_NEEDED 0x0010 677 int cancelflags; 678 679 thread_continuation_t continuation; 680 681 /* 682 * The thread's base and pending signal masks. The active 683 * signal mask is stored in the thread's context (in mailbox). 684 */ 685 sigset_t sigmask; 686 sigset_t sigpend; 687 sigset_t *oldsigmask; 688 volatile int check_pending; 689 int refcount; 690 691 /* Thread state: */ 692 enum pthread_state state; 693 volatile int lock_switch; 694 695 /* 696 * Number of microseconds accumulated by this thread when 697 * time slicing is active. 698 */ 699 long slice_usec; 700 701 /* 702 * Time to wake up thread. This is used for sleeping threads and 703 * for any operation which may time out (such as select). 704 */ 705 struct timespec wakeup_time; 706 707 /* TRUE if operation has timed out. */ 708 int timeout; 709 710 /* 711 * Error variable used instead of errno. The function __error() 712 * returns a pointer to this. 713 */ 714 int error; 715 716 /* 717 * The joiner is the thread that is joining to this thread. The 718 * join status keeps track of a join operation to another thread. 719 */ 720 struct pthread *joiner; 721 struct join_status join_status; 722 723 /* 724 * The current thread can belong to only one scheduling queue at 725 * a time (ready or waiting queue). It can also belong to: 726 * 727 * o A queue of threads waiting for a mutex 728 * o A queue of threads waiting for a condition variable 729 * 730 * It is possible for a thread to belong to more than one of the 731 * above queues if it is handling a signal. A thread may only 732 * enter a mutex or condition variable queue when it is not 733 * being called from a signal handler. If a thread is a member 734 * of one of these queues when a signal handler is invoked, it 735 * must be removed from the queue before invoking the handler 736 * and then added back to the queue after return from the handler. 737 * 738 * Use pqe for the scheduling queue link (both ready and waiting), 739 * sqe for synchronization (mutex, condition variable, and join) 740 * queue links, and qe for all other links. 741 */ 742 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ 743 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ 744 745 /* Wait data. */ 746 union pthread_wait_data data; 747 748 /* 749 * Set to TRUE if a blocking operation was 750 * interrupted by a signal: 751 */ 752 int interrupted; 753 754 /* 755 * Set to non-zero when this thread has entered a critical 756 * region. We allow for recursive entries into critical regions. 757 */ 758 int critical_count; 759 760 /* 761 * Set to TRUE if this thread should yield after leaving a 762 * critical region to check for signals, messages, etc. 763 */ 764 int critical_yield; 765 766 int sflags; 767#define THR_FLAGS_IN_SYNCQ 0x0001 768 769 /* Miscellaneous flags; only set with scheduling lock held. */ 770 int flags; 771#define THR_FLAGS_PRIVATE 0x0001 772#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ 773#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ 774#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ 775#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ 776 777 /* Thread list flags; only set with thread list lock held. */ 778#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */ 779#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */ 780#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */ 781 int tlflags; 782 783 /* 784 * Base priority is the user setable and retrievable priority 785 * of the thread. It is only affected by explicit calls to 786 * set thread priority and upon thread creation via a thread 787 * attribute or default priority. 788 */ 789 char base_priority; 790 791 /* 792 * Inherited priority is the priority a thread inherits by 793 * taking a priority inheritence or protection mutex. It 794 * is not affected by base priority changes. Inherited 795 * priority defaults to and remains 0 until a mutex is taken 796 * that is being waited on by any other thread whose priority 797 * is non-zero. 798 */ 799 char inherited_priority; 800 801 /* 802 * Active priority is always the maximum of the threads base 803 * priority and inherited priority. When there is a change 804 * in either the base or inherited priority, the active 805 * priority must be recalculated. 806 */ 807 char active_priority; 808 809 /* Number of priority ceiling or protection mutexes owned. */ 810 int priority_mutex_count; 811 812 /* Number rwlocks rdlocks held. */ 813 int rdlock_count; 814 815 /* 816 * Queue of currently owned mutexes. 817 */ 818 TAILQ_HEAD(, pthread_mutex) mutexq; 819 820 void *ret; 821 struct pthread_specific_elem *specific; 822 int specific_data_count; 823 824 /* Alternative stack for sigaltstack() */ 825 stack_t sigstk; 826 827 /* 828 * Current locks bitmap for rtld. 829 */ 830 int rtld_bits; 831 832 /* Cleanup handlers Link List */ 833 struct pthread_cleanup *cleanup; 834 char *fname; /* Ptr to source file name */ 835 int lineno; /* Source line number. */ 836}; 837 838/* 839 * Critical regions can also be detected by looking at the threads 840 * current lock level. Ensure these macros increment and decrement 841 * the lock levels such that locks can not be held with a lock level 842 * of 0. 843 */ 844#define THR_IN_CRITICAL(thrd) \ 845 (((thrd)->locklevel > 0) || \ 846 ((thrd)->critical_count > 0)) 847 848#define THR_YIELD_CHECK(thrd) \ 849do { \ 850 if (!THR_IN_CRITICAL(thrd)) { \ 851 if (__predict_false(_libkse_debug)) \ 852 _thr_debug_check_yield(thrd); \ 853 if ((thrd)->critical_yield != 0) \ 854 _thr_sched_switch(thrd); \ 855 if ((thrd)->check_pending != 0) \ 856 _thr_sig_check_pending(thrd); \ 857 } \ 858} while (0) 859 860#define THR_LOCK_ACQUIRE(thrd, lck) \ 861do { \ 862 if ((thrd)->locklevel < MAX_THR_LOCKLEVEL) { \ 863 THR_DEACTIVATE_LAST_LOCK(thrd); \ 864 (thrd)->locklevel++; \ 865 _lock_acquire((lck), \ 866 &(thrd)->lockusers[(thrd)->locklevel - 1], \ 867 (thrd)->active_priority); \ 868 } else \ 869 PANIC("Exceeded maximum lock level"); \ 870} while (0) 871 872#define THR_LOCK_RELEASE(thrd, lck) \ 873do { \ 874 if ((thrd)->locklevel > 0) { \ 875 _lock_release((lck), \ 876 &(thrd)->lockusers[(thrd)->locklevel - 1]); \ 877 (thrd)->locklevel--; \ 878 THR_ACTIVATE_LAST_LOCK(thrd); \ 879 if ((thrd)->locklevel == 0) \ 880 THR_YIELD_CHECK(thrd); \ 881 } \ 882} while (0) 883 884#define THR_ACTIVATE_LAST_LOCK(thrd) \ 885do { \ 886 if ((thrd)->locklevel > 0) \ 887 _lockuser_setactive( \ 888 &(thrd)->lockusers[(thrd)->locklevel - 1], 1); \ 889} while (0) 890 891#define THR_DEACTIVATE_LAST_LOCK(thrd) \ 892do { \ 893 if ((thrd)->locklevel > 0) \ 894 _lockuser_setactive( \ 895 &(thrd)->lockusers[(thrd)->locklevel - 1], 0); \ 896} while (0) 897 898/* 899 * For now, threads will have their own lock separate from their 900 * KSE scheduling lock. 901 */ 902#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) 903#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) 904#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) 905#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) 906 907/* 908 * Priority queue manipulation macros (using pqe link). We use 909 * the thread's kseg link instead of the kse link because a thread 910 * does not (currently) have a statically assigned kse. 911 */ 912#define THR_RUNQ_INSERT_HEAD(thrd) \ 913 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 914#define THR_RUNQ_INSERT_TAIL(thrd) \ 915 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 916#define THR_RUNQ_REMOVE(thrd) \ 917 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 918 919/* 920 * Macros to insert/remove threads to the all thread list and 921 * the gc list. 922 */ 923#define THR_LIST_ADD(thrd) do { \ 924 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) { \ 925 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ 926 _thr_hash_add(thrd); \ 927 (thrd)->tlflags |= TLFLAGS_IN_TDLIST; \ 928 } \ 929} while (0) 930#define THR_LIST_REMOVE(thrd) do { \ 931 if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) { \ 932 TAILQ_REMOVE(&_thread_list, thrd, tle); \ 933 _thr_hash_remove(thrd); \ 934 (thrd)->tlflags &= ~TLFLAGS_IN_TDLIST; \ 935 } \ 936} while (0) 937#define THR_GCLIST_ADD(thrd) do { \ 938 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \ 939 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ 940 (thrd)->tlflags |= TLFLAGS_IN_GCLIST; \ 941 _gc_count++; \ 942 } \ 943} while (0) 944#define THR_GCLIST_REMOVE(thrd) do { \ 945 if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \ 946 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ 947 (thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \ 948 _gc_count--; \ 949 } \ 950} while (0) 951 952#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) 953 954/* 955 * Locking the scheduling queue for another thread uses that thread's 956 * KSEG lock. 957 */ 958#define THR_SCHED_LOCK(curthr, thr) do { \ 959 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ 960 (curthr)->locklevel++; \ 961 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ 962} while (0) 963 964#define THR_SCHED_UNLOCK(curthr, thr) do { \ 965 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ 966 (curthr)->locklevel--; \ 967 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \ 968} while (0) 969 970/* Take the scheduling lock with the intent to call the scheduler. */ 971#define THR_LOCK_SWITCH(curthr) do { \ 972 (void)_kse_critical_enter(); \ 973 KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \ 974} while (0) 975#define THR_UNLOCK_SWITCH(curthr) do { \ 976 KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\ 977} while (0) 978 979#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ 980#define THR_CRITICAL_LEAVE(thr) do { \ 981 (thr)->critical_count--; \ 982 if (((thr)->critical_yield != 0) && \ 983 ((thr)->critical_count == 0)) { \ 984 (thr)->critical_yield = 0; \ 985 _thr_sched_switch(thr); \ 986 } \ 987} while (0) 988 989#define THR_IS_ACTIVE(thrd) \ 990 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) 991 992#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 993 994#define THR_IS_SUSPENDED(thrd) \ 995 (((thrd)->state == PS_SUSPENDED) || \ 996 (((thrd)->flags & THR_FLAGS_SUSPENDED) != 0)) 997#define THR_IS_EXITING(thrd) (((thrd)->flags & THR_FLAGS_EXITING) != 0) 998#define DBG_CAN_RUN(thrd) (((thrd)->tcb->tcb_tmbx.tm_dflags & \ 999 TMDF_SUSPEND) == 0) 1000 1001extern int __isthreaded; 1002 1003static inline int 1004_kse_isthreaded(void) 1005{ 1006 return (__isthreaded != 0); 1007} 1008 1009/* 1010 * Global variables for the pthread kernel. 1011 */ 1012 1013SCLASS void *_usrstack SCLASS_PRESET(NULL); 1014SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); 1015SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); 1016/* For debugger */ 1017SCLASS int _libkse_debug SCLASS_PRESET(0); 1018SCLASS int _thread_activated SCLASS_PRESET(0); 1019SCLASS int _thread_scope_system SCLASS_PRESET(0); 1020 1021/* List of all threads: */ 1022SCLASS TAILQ_HEAD(, pthread) _thread_list 1023 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); 1024 1025/* List of threads needing GC: */ 1026SCLASS TAILQ_HEAD(, pthread) _thread_gc_list 1027 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); 1028 1029SCLASS int _thread_active_threads SCLASS_PRESET(1); 1030 1031SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list; 1032SCLASS pthread_mutex_t _thr_atfork_mutex; 1033 1034/* Default thread attributes: */ 1035SCLASS struct pthread_attr _pthread_attr_default 1036 SCLASS_PRESET({ 1037 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, 1038 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
|
1037 NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
|
1039 NULL, NULL, /* stacksize */0, /* guardsize */0 |
1040 }); 1041 1042/* Default mutex attributes: */ 1043SCLASS struct pthread_mutex_attr _pthread_mutexattr_default 1044 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); 1045 1046/* Default condition variable attributes: */ 1047SCLASS struct pthread_cond_attr _pthread_condattr_default 1048 SCLASS_PRESET({COND_TYPE_FAST, 0}); 1049 1050/* Clock resolution in usec. */ 1051SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); 1052 1053/* Array of signal actions for this process: */ 1054SCLASS struct sigaction _thread_sigact[_SIG_MAXSIG]; 1055 1056/* 1057 * Lock for above count of dummy handlers and for the process signal 1058 * mask and pending signal sets. 1059 */ 1060SCLASS struct lock _thread_signal_lock; 1061 1062/* Pending signals and mask for this process: */ 1063SCLASS sigset_t _thr_proc_sigpending; 1064SCLASS siginfo_t _thr_proc_siginfo[_SIG_MAXSIG]; 1065 1066SCLASS pid_t _thr_pid SCLASS_PRESET(0); 1067 1068/* Garbage collector lock. */ 1069SCLASS struct lock _gc_lock; 1070SCLASS int _gc_check SCLASS_PRESET(0); 1071SCLASS int _gc_count SCLASS_PRESET(0); 1072 1073SCLASS struct lock _mutex_static_lock; 1074SCLASS struct lock _rwlock_static_lock; 1075SCLASS struct lock _keytable_lock; 1076SCLASS struct lock _thread_list_lock; 1077SCLASS int _thr_guard_default;
|
1078SCLASS int _thr_stack_default; 1079SCLASS int _thr_stack_initial; |
1080SCLASS int _thr_page_size; 1081SCLASS pthread_t _thr_sig_daemon; 1082SCLASS int _thr_debug_flags SCLASS_PRESET(0); 1083 1084/* Undefine the storage class and preset specifiers: */ 1085#undef SCLASS 1086#undef SCLASS_PRESET 1087 1088 1089/* 1090 * Function prototype definitions. 1091 */ 1092__BEGIN_DECLS 1093int _cond_reinit(pthread_cond_t *); 1094struct kse *_kse_alloc(struct pthread *, int sys_scope); 1095kse_critical_t _kse_critical_enter(void); 1096void _kse_critical_leave(kse_critical_t); 1097int _kse_in_critical(void); 1098void _kse_free(struct pthread *, struct kse *); 1099void _kse_init(); 1100struct kse_group *_kseg_alloc(struct pthread *); 1101void _kse_lock_wait(struct lock *, struct lockuser *lu); 1102void _kse_lock_wakeup(struct lock *, struct lockuser *lu); 1103void _kse_single_thread(struct pthread *); 1104int _kse_setthreaded(int); 1105void _kseg_free(struct kse_group *); 1106int _mutex_cv_lock(pthread_mutex_t *); 1107int _mutex_cv_unlock(pthread_mutex_t *); 1108void _mutex_notify_priochange(struct pthread *, struct pthread *, int); 1109int _mutex_reinit(struct pthread_mutex *); 1110void _mutex_unlock_private(struct pthread *); 1111void _libpthread_init(struct pthread *); 1112int _pq_alloc(struct pq_queue *, int, int); 1113void _pq_free(struct pq_queue *); 1114int _pq_init(struct pq_queue *); 1115void _pq_remove(struct pq_queue *pq, struct pthread *); 1116void _pq_insert_head(struct pq_queue *pq, struct pthread *); 1117void _pq_insert_tail(struct pq_queue *pq, struct pthread *); 1118struct pthread *_pq_first(struct pq_queue *pq); 1119struct pthread *_pq_first_debug(struct pq_queue *pq); 1120void *_pthread_getspecific(pthread_key_t); 1121int _pthread_key_create(pthread_key_t *, void (*) (void *)); 1122int _pthread_key_delete(pthread_key_t); 1123int _pthread_mutex_destroy(pthread_mutex_t *); 1124int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); 1125int _pthread_mutex_lock(pthread_mutex_t *); 1126int _pthread_mutex_trylock(pthread_mutex_t *); 1127int _pthread_mutex_unlock(pthread_mutex_t *); 1128int _pthread_mutexattr_init(pthread_mutexattr_t *); 1129int _pthread_mutexattr_destroy(pthread_mutexattr_t *); 1130int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); 1131int _pthread_once(pthread_once_t *, void (*) (void)); 1132int _pthread_rwlock_init(pthread_rwlock_t *, const pthread_rwlockattr_t *); 1133int _pthread_rwlock_destroy (pthread_rwlock_t *); 1134struct pthread *_pthread_self(void); 1135int _pthread_setspecific(pthread_key_t, const void *); 1136void _pthread_yield(void); 1137void _pthread_cleanup_push(void (*routine) (void *), void *routine_arg); 1138void _pthread_cleanup_pop(int execute); 1139struct pthread *_thr_alloc(struct pthread *); 1140void _thr_exit(char *, int, char *); 1141void _thr_exit_cleanup(void); 1142void _thr_lock_wait(struct lock *lock, struct lockuser *lu); 1143void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); 1144void _thr_mutex_reinit(pthread_mutex_t *); 1145int _thr_ref_add(struct pthread *, struct pthread *, int); 1146void _thr_ref_delete(struct pthread *, struct pthread *); 1147void _thr_rtld_init(void); 1148void _thr_rtld_fini(void); 1149int _thr_schedule_add(struct pthread *, struct pthread *); 1150void _thr_schedule_remove(struct pthread *, struct pthread *); 1151void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); 1152struct kse_mailbox *_thr_setrunnable_unlocked(struct pthread *thread); 1153struct kse_mailbox *_thr_sig_add(struct pthread *, int, siginfo_t *); 1154void _thr_sig_dispatch(struct kse *, int, siginfo_t *); 1155int _thr_stack_alloc(struct pthread_attr *); 1156void _thr_stack_free(struct pthread_attr *); 1157void _thr_exit_cleanup(void); 1158void _thr_free(struct pthread *, struct pthread *); 1159void _thr_gc(struct pthread *); 1160void _thr_panic_exit(char *, int, char *); 1161void _thread_cleanupspecific(void); 1162void _thread_dump_info(void); 1163void _thread_printf(int, const char *, ...); 1164void _thr_sched_switch(struct pthread *); 1165void _thr_sched_switch_unlocked(struct pthread *); 1166void _thr_set_timeout(const struct timespec *); 1167void _thr_seterrno(struct pthread *, int); 1168void _thr_sig_handler(int, siginfo_t *, ucontext_t *); 1169void _thr_sig_check_pending(struct pthread *); 1170void _thr_sig_rundown(struct pthread *, ucontext_t *); 1171void _thr_sig_send(struct pthread *pthread, int sig); 1172void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); 1173void _thr_spinlock_init(void); 1174void _thr_cancel_enter(struct pthread *); 1175void _thr_cancel_leave(struct pthread *, int); 1176int _thr_setconcurrency(int new_level); 1177int _thr_setmaxconcurrency(void); 1178void _thr_critical_enter(struct pthread *); 1179void _thr_critical_leave(struct pthread *); 1180int _thr_start_sig_daemon(void); 1181int _thr_getprocsig(int sig, siginfo_t *siginfo); 1182int _thr_getprocsig_unlocked(int sig, siginfo_t *siginfo); 1183void _thr_signal_init(void); 1184void _thr_signal_deinit(void); 1185void _thr_hash_add(struct pthread *); 1186void _thr_hash_remove(struct pthread *); 1187struct pthread *_thr_hash_find(struct pthread *); 1188void _thr_finish_cancellation(void *arg); 1189int _thr_sigonstack(void *sp); 1190void _thr_debug_check_yield(struct pthread *); 1191 1192/* 1193 * Aliases for _pthread functions. Should be called instead of 1194 * originals if PLT replocation is unwanted at runtme. 1195 */ 1196int _thr_cond_broadcast(pthread_cond_t *); 1197int _thr_cond_signal(pthread_cond_t *); 1198int _thr_cond_wait(pthread_cond_t *, pthread_mutex_t *); 1199int _thr_mutex_lock(pthread_mutex_t *); 1200int _thr_mutex_unlock(pthread_mutex_t *); 1201int _thr_rwlock_rdlock (pthread_rwlock_t *); 1202int _thr_rwlock_wrlock (pthread_rwlock_t *); 1203int _thr_rwlock_unlock (pthread_rwlock_t *); 1204 1205/* #include <sys/aio.h> */ 1206#ifdef _SYS_AIO_H_ 1207int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); 1208#endif 1209 1210/* #include <fcntl.h> */ 1211#ifdef _SYS_FCNTL_H_ 1212int __sys_fcntl(int, int, ...); 1213int __sys_open(const char *, int, ...); 1214#endif 1215 1216/* #include <sys/ioctl.h> */ 1217#ifdef _SYS_IOCTL_H_ 1218int __sys_ioctl(int, unsigned long, ...); 1219#endif 1220 1221/* #inclde <sched.h> */ 1222#ifdef _SCHED_H_ 1223int __sys_sched_yield(void); 1224#endif 1225 1226/* #include <signal.h> */ 1227#ifdef _SIGNAL_H_ 1228int __sys_kill(pid_t, int); 1229int __sys_sigaction(int, const struct sigaction *, struct sigaction *); 1230int __sys_sigpending(sigset_t *); 1231int __sys_sigprocmask(int, const sigset_t *, sigset_t *); 1232int __sys_sigsuspend(const sigset_t *); 1233int __sys_sigreturn(ucontext_t *); 1234int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); 1235#endif 1236 1237/* #include <sys/socket.h> */ 1238#ifdef _SYS_SOCKET_H_ 1239int __sys_accept(int, struct sockaddr *, socklen_t *); 1240int __sys_connect(int, const struct sockaddr *, socklen_t); 1241int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, 1242 off_t *, int); 1243#endif 1244 1245/* #include <sys/uio.h> */ 1246#ifdef _SYS_UIO_H_ 1247ssize_t __sys_readv(int, const struct iovec *, int); 1248ssize_t __sys_writev(int, const struct iovec *, int); 1249#endif 1250 1251/* #include <time.h> */ 1252#ifdef _TIME_H_ 1253int __sys_nanosleep(const struct timespec *, struct timespec *); 1254#endif 1255 1256/* #include <unistd.h> */ 1257#ifdef _UNISTD_H_ 1258int __sys_close(int); 1259int __sys_execve(const char *, char * const *, char * const *); 1260int __sys_fork(void); 1261int __sys_fsync(int); 1262pid_t __sys_getpid(void); 1263int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); 1264ssize_t __sys_read(int, void *, size_t); 1265ssize_t __sys_write(int, const void *, size_t); 1266void __sys_exit(int); 1267int __sys_sigwait(const sigset_t *, int *); 1268int __sys_sigtimedwait(sigset_t *, siginfo_t *, struct timespec *); 1269#endif 1270 1271/* #include <poll.h> */ 1272#ifdef _SYS_POLL_H_ 1273int __sys_poll(struct pollfd *, unsigned, int); 1274#endif 1275 1276/* #include <sys/mman.h> */ 1277#ifdef _SYS_MMAN_H_ 1278int __sys_msync(void *, size_t, int); 1279#endif 1280 1281#endif /* !_THR_PRIVATE_H */
|