1/* 2 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Private thread definitions for the uthread kernel. 33 *
|
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 113942 2003-04-23 21:46:50Z deischen $
|
34 * $FreeBSD: head/lib/libkse/thread/thr_private.h 114187 2003-04-28 23:56:12Z deischen $ |
35 */ 36 37#ifndef _THR_PRIVATE_H 38#define _THR_PRIVATE_H 39 40/* 41 * Include files. 42 */ 43#include <setjmp.h> 44#include <signal.h> 45#include <stdio.h> 46#include <sys/queue.h> 47#include <sys/types.h> 48#include <sys/time.h> 49#include <sys/cdefs.h> 50#include <sys/kse.h> 51#include <sched.h> 52#include <ucontext.h> 53#include <unistd.h> 54#include <pthread.h> 55#include <pthread_np.h> 56 57#include "ksd.h" 58#include "lock.h" 59#include "pthread_md.h" 60 61/* 62 * Evaluate the storage class specifier. 63 */ 64#ifdef GLOBAL_PTHREAD_PRIVATE 65#define SCLASS 66#define SCLASS_PRESET(x...) = x 67#else 68#define SCLASS extern 69#define SCLASS_PRESET(x...) 70#endif 71 72/* 73 * Kernel fatal error handler macro. 74 */ 75#define PANIC(string) _thr_exit(__FILE__,__LINE__,string) 76 77 78/* Output debug messages like this: */ 79#define stdout_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 80#define stderr_debug(args...) _thread_printf(STDOUT_FILENO, ##args) 81 82#define DBG_MUTEX 0x0001 83#define DBG_SIG 0x0002 84 85 86#define THR_ASSERT(cond, msg) do { \ 87 if (!(cond)) \ 88 PANIC(msg); \ 89} while (0) 90 91 92/* 93 * State change macro without scheduling queue change: 94 */ 95#define THR_SET_STATE(thrd, newstate) do { \ 96 (thrd)->state = newstate; \ 97 (thrd)->fname = __FILE__; \ 98 (thrd)->lineno = __LINE__; \ 99} while (0) 100 101 102/* 103 * Define the signals to be used for scheduling. 104 */ 105#define _ITIMER_SCHED_TIMER ITIMER_PROF 106#define _SCHED_SIGNAL SIGPROF 107 108#define TIMESPEC_ADD(dst, src, val) \ 109 do { \ 110 (dst)->tv_sec = (src)->tv_sec + (val)->tv_sec; \ 111 (dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \ 112 if ((dst)->tv_nsec > 1000000000) { \ 113 (dst)->tv_sec++; \ 114 (dst)->tv_nsec -= 1000000000; \ 115 } \ 116 } while (0) 117 118#define TIMESPEC_SUB(dst, src, val) \ 119 do { \ 120 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 121 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 122 if ((dst)->tv_nsec < 0) { \ 123 (dst)->tv_sec--; \ 124 (dst)->tv_nsec += 1000000000; \ 125 } \ 126 } while (0) 127 128/* 129 * Priority queues. 130 * 131 * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. 132 */ 133typedef struct pq_list { 134 TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ 135 TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ 136 int pl_prio; /* the priority of this list */ 137 int pl_queued; /* is this in the priority queue */ 138} pq_list_t; 139 140typedef struct pq_queue { 141 TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ 142 pq_list_t *pq_lists; /* array of all priority lists */ 143 int pq_size; /* number of priority lists */ 144#define PQF_ACTIVE 0x0001 145 int pq_flags;
|
146 int pq_threads; |
147} pq_queue_t; 148 149/* 150 * Each KSEG has a scheduling queue. For now, threads that exist in their 151 * own KSEG (system scope) will get a full priority queue. In the future 152 * this can be optimized for the single thread per KSEG case. 153 */ 154struct sched_queue { 155 pq_queue_t sq_runq; 156 TAILQ_HEAD(, pthread) sq_waitq; /* waiting in userland */ 157}; 158 159/* Used to maintain pending and active signals: */ 160struct sigstatus { 161 siginfo_t *info; /* arg 2 to signal handler */ 162 int pending; /* Is this a pending signal? */ 163 int blocked; /* 164 * This signal has occured and hasn't 165 * yet been handled; ignore subsequent 166 * signals until the handler is done. 167 */ 168 int signo; 169}; 170 171typedef struct kse_thr_mailbox *kse_critical_t; 172 173struct kse_group; 174 175#define MAX_KSE_LOCKLEVEL 3 176struct kse { 177 struct kse_mailbox k_mbx; /* kernel kse mailbox */ 178 /* -- location and order specific items for gdb -- */ 179 struct pthread *k_curthread; /* current thread */ 180 struct kse_group *k_kseg; /* parent KSEG */ 181 struct sched_queue *k_schedq; /* scheduling queue */ 182 /* -- end of location and order specific items -- */ 183 TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ 184 TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ 185 struct ksd k_ksd; /* KSE specific data */ 186 /* 187 * Items that are only modified by the kse, or that otherwise 188 * don't need to be locked when accessed 189 */ 190 struct lock k_lock; 191 struct lockuser k_lockusers[MAX_KSE_LOCKLEVEL]; 192 int k_locklevel; 193 sigset_t k_sigmask; 194 struct sigstatus k_sigq[NSIG]; 195 stack_t k_stack; 196 int k_check_sigq; 197 int k_flags; 198#define KF_STARTED 0x0001 /* kernel kse created */ 199#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */ 200 int k_waiting;
|
201 int k_idle; /* kse is idle */ |
202 int k_error; /* syscall errno in critical */ 203 int k_cpu; /* CPU ID when bound */ 204 int k_done; /* this KSE is done */ 205}; 206 207/* 208 * Each KSE group contains one or more KSEs in which threads can run. 209 * At least for now, there is one scheduling queue per KSE group; KSEs 210 * within the same KSE group compete for threads from the same scheduling 211 * queue. A scope system thread has one KSE in one KSE group; the group 212 * does not use its scheduling queue. 213 */ 214struct kse_group { 215 TAILQ_HEAD(, kse) kg_kseq; /* list of KSEs in group */ 216 TAILQ_HEAD(, pthread) kg_threadq; /* list of threads in group */ 217 TAILQ_ENTRY(kse_group) kg_qe; /* link entry */ 218 struct sched_queue kg_schedq; /* scheduling queue */ 219 struct lock kg_lock; 220 int kg_threadcount; /* # of assigned threads */ 221 int kg_ksecount; /* # of assigned KSEs */ 222 int kg_idle_kses; 223 int kg_flags; 224#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */ 225#define KGF_SCHEDQ_INITED 0x0002 /* has an initialized schedq */ 226}; 227 228/* 229 * Add/remove threads from a KSE's scheduling queue. 230 * For now the scheduling queue is hung off the KSEG. 231 */ 232#define KSEG_THRQ_ADD(kseg, thr) \ 233do { \ 234 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle);\ 235 (kseg)->kg_threadcount++; \ 236} while (0) 237 238#define KSEG_THRQ_REMOVE(kseg, thr) \ 239do { \ 240 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle); \ 241 (kseg)->kg_threadcount--; \ 242} while (0) 243 244 245/* 246 * Lock acquire and release for KSEs. 247 */ 248#define KSE_LOCK_ACQUIRE(kse, lck) \ 249do { \ 250 if ((kse)->k_locklevel >= MAX_KSE_LOCKLEVEL) \ 251 PANIC("Exceeded maximum lock level"); \ 252 else { \ 253 (kse)->k_locklevel++; \ 254 _lock_acquire((lck), \ 255 &(kse)->k_lockusers[(kse)->k_locklevel - 1], 0); \ 256 } \ 257} while (0) 258 259#define KSE_LOCK_RELEASE(kse, lck) \ 260do { \ 261 if ((kse)->k_locklevel > 0) { \ 262 _lock_release((lck), \ 263 &(kse)->k_lockusers[(kse)->k_locklevel - 1]); \ 264 (kse)->k_locklevel--; \ 265 } \ 266} while (0) 267 268/* 269 * Lock our own KSEG. 270 */ 271#define KSE_LOCK(curkse) \ 272 KSE_LOCK_ACQUIRE(curkse, &(curkse)->k_kseg->kg_lock) 273#define KSE_UNLOCK(curkse) \ 274 KSE_LOCK_RELEASE(curkse, &(curkse)->k_kseg->kg_lock) 275 276/* 277 * Lock a potentially different KSEG. 278 */ 279#define KSE_SCHED_LOCK(curkse, kseg) \ 280 KSE_LOCK_ACQUIRE(curkse, &(kseg)->kg_lock) 281#define KSE_SCHED_UNLOCK(curkse, kseg) \ 282 KSE_LOCK_RELEASE(curkse, &(kseg)->kg_lock) 283 284/* 285 * Waiting queue manipulation macros (using pqe link): 286 */ 287#define KSE_WAITQ_REMOVE(kse, thrd) \ 288do { \ 289 if (((thrd)->flags & THR_FLAGS_IN_WAITQ) != 0) { \ 290 TAILQ_REMOVE(&(kse)->k_schedq->sq_waitq, thrd, pqe); \ 291 (thrd)->flags &= ~THR_FLAGS_IN_WAITQ; \ 292 } \ 293} while (0) 294#define KSE_WAITQ_INSERT(kse, thrd) kse_waitq_insert(thrd) 295#define KSE_WAITQ_FIRST(kse) TAILQ_FIRST(&(kse)->k_schedq->sq_waitq) 296 297#define KSE_SET_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 1) 298
|
297#define KSE_CLEAR_WAIT(kse) atomic_set_acq_int(&(kse)->k_waiting, 0)
|
299#define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) |
300 301#define KSE_WAITING(kse) (kse)->k_waiting != 0 302#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) 303
|
304#define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) 305#define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) 306#define KSE_IS_IDLE(kse) ((kse)->k_idle != 0) 307 |
308/* 309 * TailQ initialization values. 310 */ 311#define TAILQ_INITIALIZER { NULL, NULL } 312 313/* 314 * lock initialization values. 315 */ 316#define LCK_INITIALIZER { NULL, NULL, LCK_DEFAULT } 317 318struct pthread_mutex { 319 /* 320 * Lock for accesses to this structure. 321 */ 322 struct lock m_lock; 323 enum pthread_mutextype m_type; 324 int m_protocol; 325 TAILQ_HEAD(mutex_head, pthread) m_queue; 326 struct pthread *m_owner; 327 long m_flags; 328 int m_count; 329 int m_refcount; 330 331 /* 332 * Used for priority inheritence and protection. 333 * 334 * m_prio - For priority inheritence, the highest active 335 * priority (threads locking the mutex inherit 336 * this priority). For priority protection, the 337 * ceiling priority of this mutex. 338 * m_saved_prio - mutex owners inherited priority before 339 * taking the mutex, restored when the owner 340 * unlocks the mutex. 341 */ 342 int m_prio; 343 int m_saved_prio; 344 345 /* 346 * Link for list of all mutexes a thread currently owns. 347 */ 348 TAILQ_ENTRY(pthread_mutex) m_qe; 349}; 350 351/* 352 * Flags for mutexes. 353 */ 354#define MUTEX_FLAGS_PRIVATE 0x01 355#define MUTEX_FLAGS_INITED 0x02 356#define MUTEX_FLAGS_BUSY 0x04 357 358/* 359 * Static mutex initialization values. 360 */ 361#define PTHREAD_MUTEX_STATIC_INITIALIZER \ 362 { LCK_INITIALIZER, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, \ 363 TAILQ_INITIALIZER, NULL, MUTEX_FLAGS_PRIVATE, 0, 0, 0, 0, \ 364 TAILQ_INITIALIZER } 365 366struct pthread_mutex_attr { 367 enum pthread_mutextype m_type; 368 int m_protocol; 369 int m_ceiling; 370 long m_flags; 371}; 372 373#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \ 374 { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE } 375 376/* 377 * Condition variable definitions. 378 */ 379enum pthread_cond_type { 380 COND_TYPE_FAST, 381 COND_TYPE_MAX 382}; 383 384struct pthread_cond { 385 /* 386 * Lock for accesses to this structure. 387 */ 388 struct lock c_lock; 389 enum pthread_cond_type c_type; 390 TAILQ_HEAD(cond_head, pthread) c_queue; 391 struct pthread_mutex *c_mutex; 392 long c_flags; 393 long c_seqno; 394}; 395 396struct pthread_cond_attr { 397 enum pthread_cond_type c_type; 398 long c_flags; 399}; 400 401/* 402 * Flags for condition variables. 403 */ 404#define COND_FLAGS_PRIVATE 0x01 405#define COND_FLAGS_INITED 0x02 406#define COND_FLAGS_BUSY 0x04 407 408/* 409 * Static cond initialization values. 410 */ 411#define PTHREAD_COND_STATIC_INITIALIZER \ 412 { LCK_INITIALIZER, COND_TYPE_FAST, TAILQ_INITIALIZER, \ 413 NULL, NULL, 0, 0 } 414 415/* 416 * Semaphore definitions. 417 */ 418struct sem { 419#define SEM_MAGIC ((u_int32_t) 0x09fa4012) 420 u_int32_t magic; 421 pthread_mutex_t lock; 422 pthread_cond_t gtzero; 423 u_int32_t count; 424 u_int32_t nwaiters; 425}; 426 427/* 428 * Cleanup definitions. 429 */ 430struct pthread_cleanup { 431 struct pthread_cleanup *next; 432 void (*routine) (); 433 void *routine_arg; 434}; 435 436struct pthread_attr { 437 int sched_policy; 438 int sched_inherit; 439 int sched_interval; 440 int prio; 441 int suspend; 442#define THR_STACK_USER 0x100 /* 0xFF reserved for <pthread.h> */ 443 int flags; 444 void *arg_attr; 445 void (*cleanup_attr) (); 446 void *stackaddr_attr; 447 size_t stacksize_attr; 448 size_t guardsize_attr; 449}; 450 451/* 452 * Thread creation state attributes. 453 */ 454#define THR_CREATE_RUNNING 0 455#define THR_CREATE_SUSPENDED 1 456 457/* 458 * Miscellaneous definitions. 459 */ 460#define THR_STACK_DEFAULT 65536 461 462/* 463 * Maximum size of initial thread's stack. This perhaps deserves to be larger 464 * than the stacks of other threads, since many applications are likely to run 465 * almost entirely on this stack. 466 */ 467#define THR_STACK_INITIAL 0x100000 468 469/* 470 * Define the different priority ranges. All applications have thread 471 * priorities constrained within 0-31. The threads library raises the 472 * priority when delivering signals in order to ensure that signal 473 * delivery happens (from the POSIX spec) "as soon as possible". 474 * In the future, the threads library will also be able to map specific 475 * threads into real-time (cooperating) processes or kernel threads. 476 * The RT and SIGNAL priorities will be used internally and added to 477 * thread base priorities so that the scheduling queue can handle both 478 * normal and RT priority threads with and without signal handling. 479 * 480 * The approach taken is that, within each class, signal delivery 481 * always has priority over thread execution. 482 */ 483#define THR_DEFAULT_PRIORITY 15 484#define THR_MIN_PRIORITY 0 485#define THR_MAX_PRIORITY 31 /* 0x1F */ 486#define THR_SIGNAL_PRIORITY 32 /* 0x20 */ 487#define THR_RT_PRIORITY 64 /* 0x40 */ 488#define THR_FIRST_PRIORITY THR_MIN_PRIORITY 489#define THR_LAST_PRIORITY \ 490 (THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY) 491#define THR_BASE_PRIORITY(prio) ((prio) & THR_MAX_PRIORITY) 492 493/* 494 * Clock resolution in microseconds. 495 */ 496#define CLOCK_RES_USEC 10000 497 498/* 499 * Time slice period in microseconds. 500 */ 501#define TIMESLICE_USEC 20000 502 503/* 504 * XXX - Define a thread-safe macro to get the current time of day 505 * which is updated at regular intervals by something. 506 * 507 * For now, we just make the system call to get the time. 508 */ 509#define KSE_GET_TOD(curkse, tsp) \ 510do { \ 511 *tsp = (curkse)->k_mbx.km_timeofday; \ 512 if ((tsp)->tv_sec == 0) \ 513 clock_gettime(CLOCK_REALTIME, tsp); \ 514} while (0) 515 516struct pthread_rwlockattr { 517 int pshared; 518}; 519 520struct pthread_rwlock { 521 pthread_mutex_t lock; /* monitor lock */ 522 int state; /* 0 = idle >0 = # of readers -1 = writer */ 523 pthread_cond_t read_signal; 524 pthread_cond_t write_signal; 525 int blocked_writers; 526}; 527 528/* 529 * Thread states. 530 */ 531enum pthread_state { 532 PS_RUNNING, 533 PS_LOCKWAIT, 534 PS_MUTEX_WAIT, 535 PS_COND_WAIT, 536 PS_SLEEP_WAIT, 537 PS_SIGSUSPEND, 538 PS_SIGWAIT, 539 PS_JOIN, 540 PS_SUSPENDED, 541 PS_DEAD, 542 PS_DEADLOCK, 543 PS_STATE_MAX 544}; 545 546 547union pthread_wait_data { 548 pthread_mutex_t mutex; 549 pthread_cond_t cond; 550 const sigset_t *sigwait; /* Waiting on a signal in sigwait */ 551 struct lock *lock; 552}; 553 554/* 555 * Define a continuation routine that can be used to perform a 556 * transfer of control: 557 */ 558typedef void (*thread_continuation_t) (void *); 559 560/* 561 * This stores a thread's state prior to running a signal handler. 562 * It is used when a signal is delivered to a thread blocked in 563 * userland. If the signal handler returns normally, the thread's 564 * state is restored from here. 565 */ 566struct pthread_sigframe { 567 int psf_flags; 568 int psf_interrupted; 569 int psf_signo; 570 enum pthread_state psf_state; 571 union pthread_wait_data psf_wait_data; 572 struct timespec psf_wakeup_time; 573 sigset_t psf_sigset; 574 sigset_t psf_sigmask; 575 int psf_seqno; 576}; 577 578struct join_status { 579 struct pthread *thread; 580 void *ret; 581 int error; 582}; 583 584struct pthread_specific_elem { 585 const void *data; 586 int seqno; 587}; 588 589 590#define MAX_THR_LOCKLEVEL 3 591/* 592 * Thread structure. 593 */ 594struct pthread { 595 /* 596 * Magic value to help recognize a valid thread structure 597 * from an invalid one: 598 */ 599#define THR_MAGIC ((u_int32_t) 0xd09ba115) 600 u_int32_t magic; 601 char *name; 602 u_int64_t uniqueid; /* for gdb */ 603 604 /* Queue entry for list of all threads: */ 605 TAILQ_ENTRY(pthread) tle; /* link for all threads in process */ 606 TAILQ_ENTRY(pthread) kle; /* link for all threads in KSE/KSEG */ 607 608 /* Queue entry for GC lists: */ 609 TAILQ_ENTRY(pthread) gcle; 610 611 /* 612 * Lock for accesses to this thread structure. 613 */ 614 struct lock lock; 615 struct lockuser lockusers[MAX_THR_LOCKLEVEL]; 616 int locklevel; 617 kse_critical_t critical[MAX_KSE_LOCKLEVEL]; 618 struct kse *kse; 619 struct kse_group *kseg; 620 621 /* 622 * Thread start routine, argument, stack pointer and thread 623 * attributes. 624 */ 625 void *(*start_routine)(void *); 626 void *arg; 627 struct pthread_attr attr; 628 629 /* 630 * Thread mailbox. 631 */ 632 struct kse_thr_mailbox tmbx; 633 int active; /* thread running */ 634 int blocked; /* thread blocked in kernel */ 635 int need_switchout; 636 int need_wakeup; 637 638 /* 639 * Used for tracking delivery of signal handlers. 640 */ 641 struct pthread_sigframe *curframe; 642 siginfo_t siginfo[NSIG]; 643 644 /* 645 * Cancelability flags - the lower 2 bits are used by cancel 646 * definitions in pthread.h 647 */ 648#define THR_AT_CANCEL_POINT 0x0004 649#define THR_CANCELLING 0x0008 650#define THR_CANCEL_NEEDED 0x0010 651 int cancelflags; 652 653 thread_continuation_t continuation; 654 655 /* 656 * The thread's base and pending signal masks. The active 657 * signal mask is stored in the thread's context (in mailbox). 658 */ 659 sigset_t sigmask; 660 sigset_t sigpend; 661 int sigmask_seqno; 662 int check_pending; 663 int refcount; 664 665 /* Thread state: */ 666 enum pthread_state state;
|
667 int lock_switch; |
668 669 /* 670 * Number of microseconds accumulated by this thread when 671 * time slicing is active. 672 */ 673 long slice_usec; 674 675 /* 676 * Time to wake up thread. This is used for sleeping threads and 677 * for any operation which may time out (such as select). 678 */ 679 struct timespec wakeup_time; 680 681 /* TRUE if operation has timed out. */ 682 int timeout; 683 684 /* 685 * Error variable used instead of errno. The function __error() 686 * returns a pointer to this. 687 */ 688 int error; 689 690 /* 691 * The joiner is the thread that is joining to this thread. The 692 * join status keeps track of a join operation to another thread. 693 */ 694 struct pthread *joiner; 695 struct join_status join_status; 696 697 /* 698 * The current thread can belong to only one scheduling queue at 699 * a time (ready or waiting queue). It can also belong to: 700 * 701 * o A queue of threads waiting for a mutex 702 * o A queue of threads waiting for a condition variable 703 * 704 * It is possible for a thread to belong to more than one of the 705 * above queues if it is handling a signal. A thread may only 706 * enter a mutex or condition variable queue when it is not 707 * being called from a signal handler. If a thread is a member 708 * of one of these queues when a signal handler is invoked, it 709 * must be removed from the queue before invoking the handler 710 * and then added back to the queue after return from the handler. 711 * 712 * Use pqe for the scheduling queue link (both ready and waiting), 713 * sqe for synchronization (mutex, condition variable, and join) 714 * queue links, and qe for all other links. 715 */ 716 TAILQ_ENTRY(pthread) pqe; /* priority, wait queues link */ 717 TAILQ_ENTRY(pthread) sqe; /* synchronization queue link */ 718 719 /* Wait data. */ 720 union pthread_wait_data data; 721 722 /* 723 * Set to TRUE if a blocking operation was 724 * interrupted by a signal: 725 */ 726 int interrupted; 727 728 /* Signal number when in state PS_SIGWAIT: */ 729 int signo; 730 731 /* 732 * Set to non-zero when this thread has entered a critical 733 * region. We allow for recursive entries into critical regions. 734 */ 735 int critical_count; 736 737 /* 738 * Set to TRUE if this thread should yield after leaving a 739 * critical region to check for signals, messages, etc. 740 */ 741 int critical_yield; 742 743 int sflags; 744#define THR_FLAGS_IN_SYNCQ 0x0001 745 746 /* Miscellaneous flags; only set with scheduling lock held. */ 747 int flags; 748#define THR_FLAGS_PRIVATE 0x0001 749#define THR_FLAGS_IN_WAITQ 0x0002 /* in waiting queue using pqe link */ 750#define THR_FLAGS_IN_RUNQ 0x0004 /* in run queue using pqe link */ 751#define THR_FLAGS_EXITING 0x0008 /* thread is exiting */ 752#define THR_FLAGS_SUSPENDED 0x0010 /* thread is suspended */ 753#define THR_FLAGS_GC_SAFE 0x0020 /* thread safe for cleaning */ 754#define THR_FLAGS_IN_TDLIST 0x0040 /* thread in all thread list */ 755#define THR_FLAGS_IN_GCLIST 0x0080 /* thread in gc list */ 756 /* 757 * Base priority is the user setable and retrievable priority 758 * of the thread. It is only affected by explicit calls to 759 * set thread priority and upon thread creation via a thread 760 * attribute or default priority. 761 */ 762 char base_priority; 763 764 /* 765 * Inherited priority is the priority a thread inherits by 766 * taking a priority inheritence or protection mutex. It 767 * is not affected by base priority changes. Inherited 768 * priority defaults to and remains 0 until a mutex is taken 769 * that is being waited on by any other thread whose priority 770 * is non-zero. 771 */ 772 char inherited_priority; 773 774 /* 775 * Active priority is always the maximum of the threads base 776 * priority and inherited priority. When there is a change 777 * in either the base or inherited priority, the active 778 * priority must be recalculated. 779 */ 780 char active_priority; 781 782 /* Number of priority ceiling or protection mutexes owned. */ 783 int priority_mutex_count; 784 785 /* 786 * Queue of currently owned mutexes. 787 */ 788 TAILQ_HEAD(, pthread_mutex) mutexq; 789 790 void *ret; 791 struct pthread_specific_elem *specific; 792 int specific_data_count; 793 794 /* Cleanup handlers Link List */ 795 struct pthread_cleanup *cleanup; 796 char *fname; /* Ptr to source file name */ 797 int lineno; /* Source line number. */ 798}; 799 800/* 801 * Critical regions can also be detected by looking at the threads 802 * current lock level. Ensure these macros increment and decrement 803 * the lock levels such that locks can not be held with a lock level 804 * of 0. 805 */ 806#define THR_IN_CRITICAL(thrd) \ 807 (((thrd)->locklevel > 0) || \ 808 ((thrd)->critical_count > 0)) 809 810#define THR_YIELD_CHECK(thrd) \ 811do { \ 812 if (((thrd)->critical_yield != 0) && \
|
806 !(THR_IN_CRITICAL(thrd))) \
|
813 !(THR_IN_CRITICAL(thrd))) { \ 814 THR_LOCK_SWITCH(thrd); \ |
815 _thr_sched_switch(thrd); \
|
816 THR_UNLOCK_SWITCH(thrd); \ 817 } \ |
818 else if (((thrd)->check_pending != 0) && \ 819 !(THR_IN_CRITICAL(thrd))) \ 820 _thr_sig_check_pending(thrd); \ 821} while (0) 822 823#define THR_LOCK_ACQUIRE(thrd, lck) \ 824do { \ 825 if ((thrd)->locklevel >= MAX_THR_LOCKLEVEL) \ 826 PANIC("Exceeded maximum lock level"); \ 827 else { \ 828 (thrd)->locklevel++; \ 829 _lock_acquire((lck), \ 830 &(thrd)->lockusers[(thrd)->locklevel - 1], \ 831 (thrd)->active_priority); \ 832 } \ 833} while (0) 834 835#define THR_LOCK_RELEASE(thrd, lck) \ 836do { \ 837 if ((thrd)->locklevel > 0) { \ 838 _lock_release((lck), \ 839 &(thrd)->lockusers[(thrd)->locklevel - 1]); \ 840 (thrd)->locklevel--; \
|
831 if ((thrd)->locklevel != 0) \
|
841 if ((thrd)->lock_switch) \ |
842 ; \
|
833 else if ((thrd)->critical_yield != 0) \
834 _thr_sched_switch(thrd); \
835 else if ((thrd)->check_pending != 0) \
836 _thr_sig_check_pending(thrd); \
|
843 else { \ 844 THR_YIELD_CHECK(thrd); \ 845 } \ |
846 } \ 847} while (0) 848
|
849#define THR_LOCK_SWITCH(thrd) \ 850do { \ 851 THR_ASSERT(!(thrd)->lock_switch, "context switch locked"); \ 852 _kse_critical_enter(); \ 853 KSE_SCHED_LOCK((thrd)->kse, (thrd)->kseg); \ 854 (thrd)->lock_switch = 1; \ 855} while (0) 856 857#define THR_UNLOCK_SWITCH(thrd) \ 858do { \ 859 THR_ASSERT((thrd)->lock_switch, "context switch not locked"); \ 860 THR_ASSERT(_kse_in_critical(), "Er,not in critical region"); \ 861 (thrd)->lock_switch = 0; \ 862 KSE_SCHED_UNLOCK((thrd)->kse, (thrd)->kseg); \ 863 _kse_critical_leave(&thrd->tmbx); \ 864} while (0) 865 |
866/* 867 * For now, threads will have their own lock separate from their 868 * KSE scheduling lock. 869 */ 870#define THR_LOCK(thr) THR_LOCK_ACQUIRE(thr, &(thr)->lock) 871#define THR_UNLOCK(thr) THR_LOCK_RELEASE(thr, &(thr)->lock) 872#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock) 873#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock) 874 875/* 876 * Priority queue manipulation macros (using pqe link). We use 877 * the thread's kseg link instead of the kse link because a thread 878 * does not (currently) have a statically assigned kse. 879 */ 880#define THR_RUNQ_INSERT_HEAD(thrd) \ 881 _pq_insert_head(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 882#define THR_RUNQ_INSERT_TAIL(thrd) \ 883 _pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 884#define THR_RUNQ_REMOVE(thrd) \ 885 _pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd) 886#define THR_RUNQ_FIRST() \ 887 _pq_first(&(thrd)->kseg->kg_schedq.sq_runq) 888 889/* 890 * Macros to insert/remove threads to the all thread list and 891 * the gc list. 892 */ 893#define THR_LIST_ADD(thrd) do { \ 894 if (((thrd)->flags & THR_FLAGS_IN_TDLIST) == 0) { \ 895 TAILQ_INSERT_HEAD(&_thread_list, thrd, tle); \ 896 (thrd)->flags |= THR_FLAGS_IN_TDLIST; \ 897 } \ 898} while (0) 899#define THR_LIST_REMOVE(thrd) do { \ 900 if (((thrd)->flags & THR_FLAGS_IN_TDLIST) != 0) { \ 901 TAILQ_REMOVE(&_thread_list, thrd, tle); \ 902 (thrd)->flags &= ~THR_FLAGS_IN_TDLIST; \ 903 } \ 904} while (0) 905#define THR_GCLIST_ADD(thrd) do { \ 906 if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \ 907 TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\ 908 (thrd)->flags |= THR_FLAGS_IN_GCLIST; \ 909 _gc_count++; \ 910 } \ 911} while (0) 912#define THR_GCLIST_REMOVE(thrd) do { \ 913 if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \ 914 TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \ 915 (thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \ 916 _gc_count--; \ 917 } \ 918} while (0) 919 920#define GC_NEEDED() (atomic_load_acq_int(&_gc_count) >= 5) 921 922/* 923 * Locking the scheduling queue for another thread uses that thread's 924 * KSEG lock. 925 */ 926#define THR_SCHED_LOCK(curthr, thr) do { \ 927 (curthr)->critical[(curthr)->locklevel] = _kse_critical_enter(); \ 928 (curthr)->locklevel++; \ 929 KSE_SCHED_LOCK((curthr)->kse, (thr)->kseg); \ 930} while (0) 931 932#define THR_SCHED_UNLOCK(curthr, thr) do { \ 933 KSE_SCHED_UNLOCK((curthr)->kse, (thr)->kseg); \ 934 (curthr)->locklevel--; \ 935 _kse_critical_leave((curthr)->critical[(curthr)->locklevel]); \
|
910 if ((curthr)->locklevel == 0) \
911 THR_YIELD_CHECK(curthr); \
|
936} while (0) 937 938#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++ 939#define THR_CRITICAL_LEAVE(thr) do { \ 940 (thr)->critical_count--; \ 941 if (((thr)->critical_yield != 0) && \ 942 ((thr)->critical_count == 0)) { \ 943 (thr)->critical_yield = 0; \
|
944 THR_LOCK_SWITCH(thr); \ |
945 _thr_sched_switch(thr); \
|
946 THR_UNLOCK_SWITCH(thr); \ |
947 } \ 948} while (0) 949 950#define THR_IS_ACTIVE(thrd) \ 951 ((thrd)->kse != NULL) && ((thrd)->kse->k_curthread == (thrd)) 952 953#define THR_IN_SYNCQ(thrd) (((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 954 955/* 956 * Global variables for the pthread kernel. 957 */ 958 959SCLASS void *_usrstack SCLASS_PRESET(NULL); 960SCLASS struct kse *_kse_initial SCLASS_PRESET(NULL); 961SCLASS struct pthread *_thr_initial SCLASS_PRESET(NULL); 962 963/* List of all threads: */ 964SCLASS TAILQ_HEAD(, pthread) _thread_list 965 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_list)); 966 967/* List of threads needing GC: */ 968SCLASS TAILQ_HEAD(, pthread) _thread_gc_list 969 SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list)); 970 971/* Default thread attributes: */ 972SCLASS struct pthread_attr _pthread_attr_default 973 SCLASS_PRESET({ 974 SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY, 975 THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, 976 NULL, NULL, THR_STACK_DEFAULT 977 }); 978 979/* Default mutex attributes: */ 980SCLASS struct pthread_mutex_attr _pthread_mutexattr_default 981 SCLASS_PRESET({PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, 0 }); 982 983/* Default condition variable attributes: */ 984SCLASS struct pthread_cond_attr _pthread_condattr_default 985 SCLASS_PRESET({COND_TYPE_FAST, 0}); 986 987/* Clock resolution in usec. */ 988SCLASS int _clock_res_usec SCLASS_PRESET(CLOCK_RES_USEC); 989 990/* Array of signal actions for this process: */ 991SCLASS struct sigaction _thread_sigact[NSIG]; 992 993/* 994 * Array of counts of dummy handlers for SIG_DFL signals. This is used to 995 * assure that there is always a dummy signal handler installed while there 996 * is a thread sigwait()ing on the corresponding signal. 997 */ 998SCLASS int _thread_dfl_count[NSIG]; 999 1000/* 1001 * Lock for above count of dummy handlers and for the process signal 1002 * mask and pending signal sets. 1003 */ 1004SCLASS struct lock _thread_signal_lock; 1005 1006/* Pending signals and mask for this process: */ 1007SCLASS sigset_t _thr_proc_sigpending; 1008SCLASS sigset_t _thr_proc_sigmask SCLASS_PRESET({{0, 0, 0, 0}}); 1009SCLASS siginfo_t _thr_proc_siginfo[NSIG]; 1010 1011SCLASS pid_t _thr_pid SCLASS_PRESET(0); 1012 1013/* Garbage collector lock. */ 1014SCLASS struct lock _gc_lock; 1015SCLASS int _gc_check SCLASS_PRESET(0); 1016SCLASS int _gc_count SCLASS_PRESET(0); 1017 1018SCLASS struct lock _mutex_static_lock; 1019SCLASS struct lock _rwlock_static_lock; 1020SCLASS struct lock _keytable_lock; 1021SCLASS struct lock _thread_list_lock; 1022SCLASS int _thr_guard_default; 1023SCLASS int _thr_page_size; 1024 1025SCLASS int _thr_debug_flags SCLASS_PRESET(0); 1026 1027/* Undefine the storage class and preset specifiers: */ 1028#undef SCLASS 1029#undef SCLASS_PRESET 1030 1031 1032/* 1033 * Function prototype definitions. 1034 */ 1035__BEGIN_DECLS 1036int _cond_reinit(pthread_cond_t *); 1037void _cond_wait_backout(struct pthread *); 1038struct pthread *_get_curthread(void); 1039struct kse *_get_curkse(void); 1040void _set_curkse(struct kse *); 1041struct kse *_kse_alloc(struct pthread *); 1042kse_critical_t _kse_critical_enter(void); 1043void _kse_critical_leave(kse_critical_t); 1044int _kse_in_critical(void); 1045void _kse_free(struct pthread *, struct kse *); 1046void _kse_init(); 1047struct kse_group *_kseg_alloc(struct pthread *); 1048void _kse_lock_wait(struct lock *, struct lockuser *lu); 1049void _kse_lock_wakeup(struct lock *, struct lockuser *lu); 1050void _kse_sig_check_pending(struct kse *); 1051void _kse_single_thread(struct pthread *); 1052void _kse_start(struct kse *); 1053int _kse_setthreaded(int); 1054int _kse_isthreaded(void); 1055void _kseg_free(struct kse_group *); 1056int _mutex_cv_lock(pthread_mutex_t *); 1057int _mutex_cv_unlock(pthread_mutex_t *); 1058void _mutex_lock_backout(struct pthread *); 1059void _mutex_notify_priochange(struct pthread *, struct pthread *, int); 1060int _mutex_reinit(struct pthread_mutex *); 1061void _mutex_unlock_private(struct pthread *); 1062void _libpthread_init(struct pthread *); 1063int _pq_alloc(struct pq_queue *, int, int); 1064void _pq_free(struct pq_queue *); 1065int _pq_init(struct pq_queue *); 1066void _pq_remove(struct pq_queue *pq, struct pthread *); 1067void _pq_insert_head(struct pq_queue *pq, struct pthread *); 1068void _pq_insert_tail(struct pq_queue *pq, struct pthread *); 1069struct pthread *_pq_first(struct pq_queue *pq); 1070void *_pthread_getspecific(pthread_key_t); 1071int _pthread_key_create(pthread_key_t *, void (*) (void *)); 1072int _pthread_key_delete(pthread_key_t); 1073int _pthread_mutex_destroy(pthread_mutex_t *); 1074int _pthread_mutex_init(pthread_mutex_t *, const pthread_mutexattr_t *); 1075int _pthread_mutex_lock(pthread_mutex_t *); 1076int _pthread_mutex_trylock(pthread_mutex_t *); 1077int _pthread_mutex_unlock(pthread_mutex_t *); 1078int _pthread_mutexattr_init(pthread_mutexattr_t *); 1079int _pthread_mutexattr_destroy(pthread_mutexattr_t *); 1080int _pthread_mutexattr_settype(pthread_mutexattr_t *, int); 1081int _pthread_once(pthread_once_t *, void (*) (void)); 1082struct pthread *_pthread_self(void); 1083int _pthread_setspecific(pthread_key_t, const void *); 1084struct pthread *_thr_alloc(struct pthread *); 1085int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); 1086int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); 1087void _thr_exit(char *, int, char *); 1088void _thr_exit_cleanup(void); 1089void _thr_lock_wait(struct lock *lock, struct lockuser *lu); 1090void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu); 1091int _thr_ref_add(struct pthread *, struct pthread *, int); 1092void _thr_ref_delete(struct pthread *, struct pthread *); 1093int _thr_schedule_add(struct pthread *, struct pthread *); 1094void _thr_schedule_remove(struct pthread *, struct pthread *); 1095void _thr_setrunnable(struct pthread *curthread, struct pthread *thread); 1096void _thr_setrunnable_unlocked(struct pthread *thread); 1097void _thr_sig_add(struct pthread *, int, siginfo_t *, ucontext_t *); 1098void _thr_sig_dispatch(struct kse *, int, siginfo_t *); 1099int _thr_stack_alloc(struct pthread_attr *); 1100void _thr_stack_free(struct pthread_attr *); 1101void _thr_exit_cleanup(void); 1102void _thr_free(struct pthread *, struct pthread *); 1103void _thr_gc(struct pthread *); 1104void _thr_panic_exit(char *, int, char *); 1105void _thread_cleanupspecific(void); 1106void _thread_dump_info(void); 1107void _thread_printf(int, const char *, ...); 1108void _thr_sched_frame(struct pthread_sigframe *); 1109void _thr_sched_switch(struct pthread *); 1110void _thr_set_timeout(const struct timespec *); 1111void _thr_sig_handler(int, siginfo_t *, ucontext_t *); 1112void _thr_sig_check_pending(struct pthread *); 1113void _thr_sig_rundown(struct pthread *, ucontext_t *, 1114 struct pthread_sigframe *); 1115void _thr_sig_send(struct pthread *pthread, int sig); 1116void _thr_sig_wrapper(void); 1117void _thr_sigframe_restore(struct pthread *thread, struct pthread_sigframe *psf); 1118void _thr_seterrno(struct pthread *, int); 1119void _thr_enter_cancellation_point(struct pthread *); 1120void _thr_leave_cancellation_point(struct pthread *);
|
1121int _thr_setconcurrency(int new_level); 1122int _thr_setmaxconcurrency(void); |
1123 1124/* XXX - Stuff that goes away when my sources get more up to date. */ 1125/* #include <sys/kse.h> */ 1126#ifdef SYS_KSE_H 1127int __sys_kse_create(struct kse_mailbox *, int); 1128int __sys_kse_thr_wakeup(struct kse_mailbox *); 1129int __sys_kse_exit(struct kse_mailbox *); 1130int __sys_kse_release(struct kse_mailbox *); 1131#endif 1132 1133/* #include <sys/aio.h> */ 1134#ifdef _SYS_AIO_H_ 1135int __sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *); 1136#endif 1137 1138/* #include <fcntl.h> */ 1139#ifdef _SYS_FCNTL_H_ 1140int __sys_fcntl(int, int, ...); 1141int __sys_open(const char *, int, ...); 1142#endif 1143 1144/* #include <sys/ioctl.h> */ 1145#ifdef _SYS_IOCTL_H_ 1146int __sys_ioctl(int, unsigned long, ...); 1147#endif 1148 1149/* #inclde <sched.h> */ 1150#ifdef _SCHED_H_ 1151int __sys_sched_yield(void); 1152#endif 1153 1154/* #include <signal.h> */ 1155#ifdef _SIGNAL_H_ 1156int __sys_kill(pid_t, int); 1157int __sys_sigaction(int, const struct sigaction *, struct sigaction *); 1158int __sys_sigpending(sigset_t *); 1159int __sys_sigprocmask(int, const sigset_t *, sigset_t *); 1160int __sys_sigsuspend(const sigset_t *); 1161int __sys_sigreturn(ucontext_t *); 1162int __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *); 1163#endif 1164 1165/* #include <sys/socket.h> */ 1166#ifdef _SYS_SOCKET_H_ 1167int __sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *, 1168 off_t *, int); 1169#endif 1170 1171/* #include <sys/uio.h> */ 1172#ifdef _SYS_UIO_H_ 1173ssize_t __sys_readv(int, const struct iovec *, int); 1174ssize_t __sys_writev(int, const struct iovec *, int); 1175#endif 1176 1177/* #include <time.h> */ 1178#ifdef _TIME_H_ 1179int __sys_nanosleep(const struct timespec *, struct timespec *); 1180#endif 1181 1182/* #include <unistd.h> */ 1183#ifdef _UNISTD_H_ 1184int __sys_close(int); 1185int __sys_execve(const char *, char * const *, char * const *); 1186int __sys_fork(void); 1187int __sys_fsync(int); 1188pid_t __sys_getpid(void); 1189int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); 1190ssize_t __sys_read(int, void *, size_t); 1191ssize_t __sys_write(int, const void *, size_t); 1192void __sys_exit(int); 1193#endif 1194 1195/* #include <poll.h> */ 1196#ifdef _SYS_POLL_H_ 1197int __sys_poll(struct pollfd *, unsigned, int); 1198#endif 1199 1200/* #include <sys/mman.h> */ 1201#ifdef _SYS_MMAN_H_ 1202int __sys_msync(void *, size_t, int); 1203#endif 1204 1205#endif /* !_THR_PRIVATE_H */
|