Deleted Added
full compact
thr_kern.c (117345) thr_kern.c (117706)
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/cdefs.h>
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 117345 2003-07-09 01:39:24Z davidxu $");
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 117706 2003-07-17 23:02:30Z davidxu $");
37
38#include <sys/types.h>
39#include <sys/kse.h>
40#include <sys/signalvar.h>
41#include <sys/queue.h>
42#include <machine/atomic.h>
43#include <machine/sigframe.h>
44

--- 70 unchanged lines hidden (view full) ---

115static struct lock thread_lock;
116static int free_thread_count = 0;
117static int inited = 0;
118static int active_threads = 1;
119static int active_kse_count = 0;
120static int active_kseg_count = 0;
121static u_int64_t next_uniqueid = 1;
122
37
38#include <sys/types.h>
39#include <sys/kse.h>
40#include <sys/signalvar.h>
41#include <sys/queue.h>
42#include <machine/atomic.h>
43#include <machine/sigframe.h>
44

--- 70 unchanged lines hidden (view full) ---

115static struct lock thread_lock;
116static int free_thread_count = 0;
117static int inited = 0;
118static int active_threads = 1;
119static int active_kse_count = 0;
120static int active_kseg_count = 0;
121static u_int64_t next_uniqueid = 1;
122
123LIST_HEAD(thread_hash_head, pthread);
124#define THREAD_HASH_QUEUES 127
125static struct thread_hash_head thr_hashtable[THREAD_HASH_QUEUES];
126#define THREAD_HASH(thrd) ((unsigned long)thrd % THREAD_HASH_QUEUES)
123
124#ifdef DEBUG_THREAD_KERN
125static void dump_queues(struct kse *curkse);
126#endif
127static void kse_check_completed(struct kse *kse);
128static void kse_check_waitq(struct kse *kse);
129static void kse_fini(struct kse *curkse);
127
128#ifdef DEBUG_THREAD_KERN
129static void dump_queues(struct kse *curkse);
130#endif
131static void kse_check_completed(struct kse *kse);
132static void kse_check_waitq(struct kse *kse);
133static void kse_fini(struct kse *curkse);
130static void kse_reinit(struct kse *kse);
134static void kse_reinit(struct kse *kse, int sys_scope);
131static void kse_sched_multi(struct kse *curkse);
135static void kse_sched_multi(struct kse *curkse);
132#ifdef NOT_YET
133static void kse_sched_single(struct kse *curkse);
136static void kse_sched_single(struct kse *curkse);
134#endif
135static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
137static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
136static void kse_wait(struct kse *kse, struct pthread *td_wait);
138static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq);
137static void kse_free_unlocked(struct kse *kse);
138static void kseg_free_unlocked(struct kse_group *kseg);
139static void kseg_init(struct kse_group *kseg);
140static void kseg_reinit(struct kse_group *kseg);
141static void kse_waitq_insert(struct pthread *thread);
142static void kse_wakeup_multi(struct kse *curkse);
143static void kse_wakeup_one(struct pthread *thread);
144static void thr_cleanup(struct kse *kse, struct pthread *curthread);

--- 235 unchanged lines hidden (view full) ---

380 __isthreaded = 1;
381
382 /*
383 * Tell the kernel to create a KSE for the initial thread
384 * and enable upcalls in it.
385 */
386 _thr_signal_init();
387 _kse_initial->k_flags |= KF_STARTED;
139static void kse_free_unlocked(struct kse *kse);
140static void kseg_free_unlocked(struct kse_group *kseg);
141static void kseg_init(struct kse_group *kseg);
142static void kseg_reinit(struct kse_group *kseg);
143static void kse_waitq_insert(struct pthread *thread);
144static void kse_wakeup_multi(struct kse *curkse);
145static void kse_wakeup_one(struct pthread *thread);
146static void thr_cleanup(struct kse *kse, struct pthread *curthread);

--- 235 unchanged lines hidden (view full) ---

382 __isthreaded = 1;
383
384 /*
385 * Tell the kernel to create a KSE for the initial thread
386 * and enable upcalls in it.
387 */
388 _thr_signal_init();
389 _kse_initial->k_flags |= KF_STARTED;
390
391#ifdef SYSTEM_SCOPE_ONLY
392 /*
393 * For bound thread, kernel reads mailbox pointer once,
394 * we'd set it here before calling kse_create
395 */
396 KSE_SET_MBOX(_kse_initial, _thr_initial);
397 _kse_initial->k_mbx.km_flags |= KMF_BOUND;
398#endif
399
388 if (kse_create(&_kse_initial->k_mbx, 0) != 0) {
389 _kse_initial->k_flags &= ~KF_STARTED;
390 __isthreaded = 0;
400 if (kse_create(&_kse_initial->k_mbx, 0) != 0) {
401 _kse_initial->k_flags &= ~KF_STARTED;
402 __isthreaded = 0;
391 /* may abort() */
392 PANIC("kse_create() failed\n");
393 return (-1);
394 }
403 PANIC("kse_create() failed\n");
404 return (-1);
405 }
406
407#ifndef SYSTEM_SCOPE_ONLY
408 /* Set current thread to initial thread */
395 KSE_SET_MBOX(_kse_initial, _thr_initial);
396 _thr_start_sig_daemon();
397 _thr_setmaxconcurrency();
409 KSE_SET_MBOX(_kse_initial, _thr_initial);
410 _thr_start_sig_daemon();
411 _thr_setmaxconcurrency();
412#endif
413
398 }
399 return (0);
400}
401
402/*
403 * Lock wait and wakeup handlers for KSE locks. These are only used by
404 * KSEs, and should never be used by threads. KSE locks include the
405 * KSE group lock (used for locking the scheduling queue) and the

--- 181 unchanged lines hidden (view full) ---

587 * o The current thread has signals pending, should
588 * let scheduler install signal trampoline for us.
589 * o There are no runnable threads.
590 * o The next thread to run won't unlock the scheduler
591 * lock. A side note: the current thread may be run
592 * instead of the next thread in the run queue, but
593 * we don't bother checking for that.
594 */
414 }
415 return (0);
416}
417
418/*
419 * Lock wait and wakeup handlers for KSE locks. These are only used by
420 * KSEs, and should never be used by threads. KSE locks include the
421 * KSE group lock (used for locking the scheduling queue) and the

--- 181 unchanged lines hidden (view full) ---

603 * o The current thread has signals pending, should
604 * let scheduler install signal trampoline for us.
605 * o There are no runnable threads.
606 * o The next thread to run won't unlock the scheduler
607 * lock. A side note: the current thread may be run
608 * instead of the next thread in the run queue, but
609 * we don't bother checking for that.
610 */
595 if ((curthread->state == PS_DEAD) ||
611 if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
612 kse_sched_single(curkse);
613 else if ((curthread->state == PS_DEAD) ||
596 (((td = KSE_RUNQ_FIRST(curkse)) == NULL) &&
597 (curthread->state != PS_RUNNING)) ||
598 ((td != NULL) && (td->lock_switch == 0))) {
599 curkse->k_switch = 1;
600 _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx);
601 }
602 else {
603 uts_once = 0;

--- 84 unchanged lines hidden (view full) ---

688}
689
690/*
691 * This is the scheduler for a KSE which runs a scope system thread.
692 * The multi-thread KSE scheduler should also work for a single threaded
693 * KSE, but we use a separate scheduler so that it can be fine-tuned
694 * to be more efficient (and perhaps not need a separate stack for
695 * the KSE, allowing it to use the thread's stack).
614 (((td = KSE_RUNQ_FIRST(curkse)) == NULL) &&
615 (curthread->state != PS_RUNNING)) ||
616 ((td != NULL) && (td->lock_switch == 0))) {
617 curkse->k_switch = 1;
618 _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx);
619 }
620 else {
621 uts_once = 0;

--- 84 unchanged lines hidden (view full) ---

706}
707
708/*
709 * This is the scheduler for a KSE which runs a scope system thread.
710 * The multi-thread KSE scheduler should also work for a single threaded
711 * KSE, but we use a separate scheduler so that it can be fine-tuned
712 * to be more efficient (and perhaps not need a separate stack for
713 * the KSE, allowing it to use the thread's stack).
696 *
697 * XXX - This probably needs some work.
698 */
714 */
699#ifdef NOT_YET
715
700static void
701kse_sched_single(struct kse *curkse)
702{
703 struct pthread *curthread = curkse->k_curthread;
716static void
717kse_sched_single(struct kse *curkse)
718{
719 struct pthread *curthread = curkse->k_curthread;
704 struct pthread *td_wait;
705 struct timespec ts;
720 struct timespec ts;
706 int level;
721 sigset_t sigmask;
722 int i, sigseqno, level, first = 0;
707
723
708 if (curthread->active == 0) {
709 if (curthread->state != PS_RUNNING) {
710 /* Check to see if the thread has timed out. */
711 KSE_GET_TOD(curkse, &ts);
712 if (thr_timedout(curthread, &ts) != 0) {
713 curthread->timeout = 1;
714 curthread->state = PS_RUNNING;
715 }
716 }
717 }
724 if ((curkse->k_flags & KF_INITIALIZED) == 0) {
725 /* Setup this KSEs specific data. */
726 _ksd_setprivate(&curkse->k_ksd);
727 _set_curkse(curkse);
728 curkse->k_flags |= KF_INITIALIZED;
729 first = 1;
730 curthread->active = 1;
731
732 /* Setup kernel signal masks for new thread. */
733 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
734 /*
735 * Enter critical region, this is meanless for bound thread,
736 * It is used to let other code work, those code want mailbox
737 * to be cleared.
738 */
739 _kse_critical_enter();
740 }
718
741
719 /* This thread no longer needs to yield the CPU: */
720 curthread->critical_yield = 0;
721 curthread->need_switchout = 0;
722
723 /*
724 * Lock the scheduling queue.
725 *
726 * There is no scheduling queue for single threaded KSEs,
727 * but we need a lock for protection regardless.
728 */
742 curthread->critical_yield = 0;
743 curthread->need_switchout = 0;
744
745 /*
746 * Lock the scheduling queue.
747 *
748 * There is no scheduling queue for single threaded KSEs,
749 * but we need a lock for protection regardless.
750 */
729 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
751 if (curthread->lock_switch == 0)
752 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
730
731 /*
732 * This has to do the job of kse_switchout_thread(), only
733 * for a single threaded KSE/KSEG.
734 */
735
736 switch (curthread->state) {
737 case PS_DEAD:
753
754 /*
755 * This has to do the job of kse_switchout_thread(), only
756 * for a single threaded KSE/KSEG.
757 */
758
759 switch (curthread->state) {
760 case PS_DEAD:
761 curthread->check_pending = 0;
738 /* Unlock the scheduling queue and exit the KSE and thread. */
762 /* Unlock the scheduling queue and exit the KSE and thread. */
739 thr_cleaup(curkse, curthread);
763 thr_cleanup(curkse, curthread);
740 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
764 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
765 PANIC("bound thread shouldn't get here\n");
741 break;
742
766 break;
767
743 case PS_COND_WAIT:
768 case PS_SIGWAIT:
769 PANIC("bound thread does not have SIGWAIT state\n");
770
744 case PS_SLEEP_WAIT:
771 case PS_SLEEP_WAIT:
745 /* Only insert threads that can timeout: */
746 if (curthread->wakeup_time.tv_sec != -1) {
747 /* Insert into the waiting queue: */
748 KSE_WAITQ_INSERT(curkse, curthread);
749 }
772 PANIC("bound thread does not have SLEEP_WAIT state\n");
773
774 case PS_SIGSUSPEND:
775 PANIC("bound thread does not have SIGSUSPEND state\n");
776
777 case PS_COND_WAIT:
750 break;
751
752 case PS_LOCKWAIT:
778 break;
779
780 case PS_LOCKWAIT:
781 /*
782 * This state doesn't timeout.
783 */
784 curthread->wakeup_time.tv_sec = -1;
785 curthread->wakeup_time.tv_nsec = -1;
753 level = curthread->locklevel - 1;
786 level = curthread->locklevel - 1;
754 if (!_LCK_GRANTED(&curthread->lockusers[level]))
755 KSE_WAITQ_INSERT(curkse, curthread);
756 else
787 if (_LCK_GRANTED(&curthread->lockusers[level]))
757 THR_SET_STATE(curthread, PS_RUNNING);
758 break;
759
788 THR_SET_STATE(curthread, PS_RUNNING);
789 break;
790
791 case PS_RUNNING:
792 if ((curthread->flags & THR_FLAGS_SUSPENDED) != 0) {
793 THR_SET_STATE(curthread, PS_SUSPENDED);
794 }
795 curthread->wakeup_time.tv_sec = -1;
796 curthread->wakeup_time.tv_nsec = -1;
797 break;
798
760 case PS_JOIN:
761 case PS_MUTEX_WAIT:
799 case PS_JOIN:
800 case PS_MUTEX_WAIT:
762 case PS_RUNNING:
763 case PS_SIGSUSPEND:
764 case PS_SIGWAIT:
765 case PS_SUSPENDED:
766 case PS_DEADLOCK:
767 default:
768 /*
769 * These states don't timeout and don't need
770 * to be in the waiting queue.
771 */
801 case PS_SUSPENDED:
802 case PS_DEADLOCK:
803 default:
804 /*
805 * These states don't timeout and don't need
806 * to be in the waiting queue.
807 */
808 curthread->wakeup_time.tv_sec = -1;
809 curthread->wakeup_time.tv_nsec = -1;
772 break;
773 }
810 break;
811 }
812
774 while (curthread->state != PS_RUNNING) {
813 while (curthread->state != PS_RUNNING) {
775 curthread->active = 0;
776 td_wait = KSE_WAITQ_FIRST(curkse);
814 sigseqno = curkse->k_sigseqno;
815 if (curthread->check_pending != 0) {
816 /*
817 * Install pending signals into the frame, possible
818 * cause mutex or condvar backout.
819 */
820 curthread->check_pending = 0;
821 SIGFILLSET(sigmask);
777
822
778 kse_wait(curkse, td_wait);
779
780 if (td_wait != NULL) {
781 KSE_GET_TOD(curkse, &ts);
782 if (thr_timedout(curthread, &ts)) {
783 /* Indicate the thread timedout: */
784 td_wait->timeout = 1;
785
786 /* Make the thread runnable. */
787 THR_SET_STATE(td_wait, PS_RUNNING);
788 KSE_WAITQ_REMOVE(curkse, td_wait);
823 /*
824 * Lock out kernel signal code when we are processing
825 * signals, and get a fresh copy of signal mask.
826 */
827 __sys_sigprocmask(SIG_SETMASK, &sigmask,
828 &curthread->sigmask);
829 for (i = 1; i <= _SIG_MAXSIG; i++) {
830 if (SIGISMEMBER(curthread->sigmask, i))
831 continue;
832 if (SIGISMEMBER(curthread->sigpend, i))
833 _thr_sig_add(curthread, i,
834 &curthread->siginfo[i-1]);
789 }
835 }
836 __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
837 NULL);
838 /* The above code might make thread runnable */
839 if (curthread->state == PS_RUNNING)
840 break;
790 }
841 }
842 THR_DEACTIVATE_LAST_LOCK(curthread);
843 kse_wait(curkse, curthread, sigseqno);
844 THR_ACTIVATE_LAST_LOCK(curthread);
845 KSE_GET_TOD(curkse, &ts);
846 if (thr_timedout(curthread, &ts)) {
847 /* Indicate the thread timedout: */
848 curthread->timeout = 1;
849 /* Make the thread runnable. */
850 THR_SET_STATE(curthread, PS_RUNNING);
851 }
791 }
792
793 /* Remove the frame reference. */
794 curthread->curframe = NULL;
795
852 }
853
854 /* Remove the frame reference. */
855 curthread->curframe = NULL;
856
796 /* Unlock the scheduling queue. */
797 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
857 if (curthread->lock_switch == 0) {
858 /* Unlock the scheduling queue. */
859 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
860 }
798
861
799 /*
800 * Continue the thread at its current frame:
801 */
802 DBG_MSG("Continuing bound thread %p\n", curthread);
862 DBG_MSG("Continuing bound thread %p\n", curthread);
803 _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread);
804 PANIC("Thread has returned from _thread_switch");
863 if (first) {
864 _kse_critical_leave(&curthread->tmbx);
865 pthread_exit(curthread->start_routine(curthread->arg));
866 }
805}
867}
806#endif
807
808#ifdef DEBUG_THREAD_KERN
809static void
810dump_queues(struct kse *curkse)
811{
812 struct pthread *thread;
813
814 DBG_MSG("Threads in waiting queue:\n");

--- 109 unchanged lines hidden (view full) ---

924 /* Check if there are no threads ready to run: */
925 while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
926 (curkse->k_kseg->kg_threadcount != 0)) {
927 /*
928 * Wait for a thread to become active or until there are
929 * no more threads.
930 */
931 td_wait = KSE_WAITQ_FIRST(curkse);
868
869#ifdef DEBUG_THREAD_KERN
870static void
871dump_queues(struct kse *curkse)
872{
873 struct pthread *thread;
874
875 DBG_MSG("Threads in waiting queue:\n");

--- 109 unchanged lines hidden (view full) ---

985 /* Check if there are no threads ready to run: */
986 while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
987 (curkse->k_kseg->kg_threadcount != 0)) {
988 /*
989 * Wait for a thread to become active or until there are
990 * no more threads.
991 */
992 td_wait = KSE_WAITQ_FIRST(curkse);
932 kse_wait(curkse, td_wait);
993 kse_wait(curkse, td_wait, 0);
933 kse_check_completed(curkse);
934 kse_check_waitq(curkse);
935 }
936
937 /* Check for no more threads: */
938 if (curkse->k_kseg->kg_threadcount == 0) {
939 /*
940 * Normally this shouldn't return, but it will if there

--- 57 unchanged lines hidden (view full) ---

998#ifdef NOT_YET
999 if ((((curframe == NULL) && (curthread->check_pending != 0)) ||
1000 (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
1001 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))) &&
1002 !THR_IN_CRITICAL(curthread))
1003 signalcontext(&curthread->tmbx.tm_context, 0,
1004 (__sighandler_t *)thr_resume_wrapper);
1005#else
994 kse_check_completed(curkse);
995 kse_check_waitq(curkse);
996 }
997
998 /* Check for no more threads: */
999 if (curkse->k_kseg->kg_threadcount == 0) {
1000 /*
1001 * Normally this shouldn't return, but it will if there

--- 57 unchanged lines hidden (view full) ---

1059#ifdef NOT_YET
1060 if ((((curframe == NULL) && (curthread->check_pending != 0)) ||
1061 (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
1062 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))) &&
1063 !THR_IN_CRITICAL(curthread))
1064 signalcontext(&curthread->tmbx.tm_context, 0,
1065 (__sighandler_t *)thr_resume_wrapper);
1066#else
1006 if ((curframe == NULL) && (curthread->check_pending != 0) &&
1007 !THR_IN_CRITICAL(curthread)) {
1067 if ((curframe == NULL) && (curthread->state == PS_RUNNING) &&
1068 (curthread->check_pending != 0) && !THR_IN_CRITICAL(curthread)) {
1008 curthread->check_pending = 0;
1009 signalcontext(&curthread->tmbx.tm_context, 0,
1010 (__sighandler_t *)thr_resume_wrapper);
1011 }
1012#endif
1013 /*
1014 * Continue the thread at its current frame:
1015 */

--- 108 unchanged lines hidden (view full) ---

1124 * scheduler lock.
1125 */
1126 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1127 DBG_MSG("Adding thread %p to GC list\n", thread);
1128 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1129 THR_GCLIST_ADD(thread);
1130 /* Use thread_list_lock */
1131 active_threads--;
1069 curthread->check_pending = 0;
1070 signalcontext(&curthread->tmbx.tm_context, 0,
1071 (__sighandler_t *)thr_resume_wrapper);
1072 }
1073#endif
1074 /*
1075 * Continue the thread at its current frame:
1076 */

--- 108 unchanged lines hidden (view full) ---

1185 * scheduler lock.
1186 */
1187 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1188 DBG_MSG("Adding thread %p to GC list\n", thread);
1189 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1190 THR_GCLIST_ADD(thread);
1191 /* Use thread_list_lock */
1192 active_threads--;
1193#ifdef SYSTEM_SCOPE_ONLY
1194 if (active_threads == 0) {
1195#else
1132 if (active_threads == 1) {
1196 if (active_threads == 1) {
1197#endif
1133 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1134 exit(0);
1135 }
1136 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1137 if (sys_scope) {
1138 /*
1139 * System scope thread is single thread group,
1140 * when thread is exited, its kse and ksegrp should
1141 * be recycled as well.
1198 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1199 exit(0);
1200 }
1201 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1202 if (sys_scope) {
1203 /*
1204 * System scope thread is single thread group,
1205 * when thread is exited, its kse and ksegrp should
1206 * be recycled as well.
1207 * kse upcall stack belongs to thread, clear it here.
1142 */
1208 */
1209 curkse->k_stack.ss_sp = 0;
1210 curkse->k_stack.ss_size = 0;
1143 kse_exit();
1144 PANIC("kse_exit() failed for system scope thread");
1145 }
1146 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1147}
1148
1149void
1150_thr_gc(struct pthread *curthread)

--- 83 unchanged lines hidden (view full) ---

1234 /* Add the new thread. */
1235 thr_link(newthread);
1236
1237 /*
1238 * If this is the first time creating a thread, make sure
1239 * the mailbox is set for the current thread.
1240 */
1241 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1211 kse_exit();
1212 PANIC("kse_exit() failed for system scope thread");
1213 }
1214 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1215}
1216
1217void
1218_thr_gc(struct pthread *curthread)

--- 83 unchanged lines hidden (view full) ---

1302 /* Add the new thread. */
1303 thr_link(newthread);
1304
1305 /*
1306 * If this is the first time creating a thread, make sure
1307 * the mailbox is set for the current thread.
1308 */
1309 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1242#ifdef NOT_YET
1243 /* We use the thread's stack as the KSE's stack. */
1310 /* We use the thread's stack as the KSE's stack. */
1244 new_thread->kse->k_mbx.km_stack.ss_sp =
1245 new_thread->attr.stackaddr_attr;
1246 new_thread->kse->k_mbx.km_stack.ss_size =
1247 new_thread->attr.stacksize_attr;
1248#endif
1311 newthread->kse->k_mbx.km_stack.ss_sp =
1312 newthread->attr.stackaddr_attr;
1313 newthread->kse->k_mbx.km_stack.ss_size =
1314 newthread->attr.stacksize_attr;
1315
1249 /*
1250 * No need to lock the scheduling queue since the
1251 * KSE/KSEG pair have not yet been started.
1252 */
1253 KSEG_THRQ_ADD(newthread->kseg, newthread);
1316 /*
1317 * No need to lock the scheduling queue since the
1318 * KSE/KSEG pair have not yet been started.
1319 */
1320 KSEG_THRQ_ADD(newthread->kseg, newthread);
1254 if (newthread->state == PS_RUNNING)
1255 THR_RUNQ_INSERT_TAIL(newthread);
1256 newthread->kse->k_curthread = NULL;
1257 newthread->kse->k_mbx.km_flags = 0;
1258 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
1321 /* this thread never gives up kse */
1322 newthread->active = 1;
1323 newthread->kse->k_curthread = newthread;
1324 newthread->kse->k_mbx.km_flags = KMF_BOUND;
1325 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
1259 newthread->kse->k_mbx.km_quantum = 0;
1326 newthread->kse->k_mbx.km_quantum = 0;
1260
1327 KSE_SET_MBOX(newthread->kse, newthread);
1261 /*
1262 * This thread needs a new KSE and KSEG.
1263 */
1264 newthread->kse->k_flags &= ~KF_INITIALIZED;
1265 newthread->kse->k_flags |= KF_STARTED;
1328 /*
1329 * This thread needs a new KSE and KSEG.
1330 */
1331 newthread->kse->k_flags &= ~KF_INITIALIZED;
1332 newthread->kse->k_flags |= KF_STARTED;
1333 /* Fire up! */
1266 ret = kse_create(&newthread->kse->k_mbx, 1);
1267 if (ret != 0)
1268 ret = errno;
1269 }
1270 else {
1271 /*
1272 * Lock the KSE and add the new thread to its list of
1273 * assigned threads. If the new thread is runnable, also

--- 213 unchanged lines hidden (view full) ---

1487 * see if we need to interrupt it in the kernel.
1488 */
1489 if (thread->check_pending != 0) {
1490 for (i = 1; i <= _SIG_MAXSIG; ++i) {
1491 if (SIGISMEMBER(thread->sigpend, i) &&
1492 !SIGISMEMBER(thread->sigmask, i)) {
1493 restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART;
1494 kse_thr_interrupt(&thread->tmbx,
1334 ret = kse_create(&newthread->kse->k_mbx, 1);
1335 if (ret != 0)
1336 ret = errno;
1337 }
1338 else {
1339 /*
1340 * Lock the KSE and add the new thread to its list of
1341 * assigned threads. If the new thread is runnable, also

--- 213 unchanged lines hidden (view full) ---

1555 * see if we need to interrupt it in the kernel.
1556 */
1557 if (thread->check_pending != 0) {
1558 for (i = 1; i <= _SIG_MAXSIG; ++i) {
1559 if (SIGISMEMBER(thread->sigpend, i) &&
1560 !SIGISMEMBER(thread->sigmask, i)) {
1561 restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART;
1562 kse_thr_interrupt(&thread->tmbx,
1495 restart ? -2 : -1);
1563 restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0);
1496 break;
1497 }
1498 }
1499 }
1500 }
1501 else {
1502 switch (thread->state) {
1503 case PS_DEAD:

--- 108 unchanged lines hidden (view full) ---

1612
1613/*
1614 * This function waits for the smallest timeout value of any waiting
1615 * thread, or until it receives a message from another KSE.
1616 *
1617 * This must be called with the scheduling lock held.
1618 */
1619static void
1564 break;
1565 }
1566 }
1567 }
1568 }
1569 else {
1570 switch (thread->state) {
1571 case PS_DEAD:

--- 108 unchanged lines hidden (view full) ---

1680
1681/*
1682 * This function waits for the smallest timeout value of any waiting
1683 * thread, or until it receives a message from another KSE.
1684 *
1685 * This must be called with the scheduling lock held.
1686 */
1687static void
1620kse_wait(struct kse *kse, struct pthread *td_wait)
1688kse_wait(struct kse *kse, struct pthread *td_wait, int sigseqno)
1621{
1622 struct timespec ts, ts_sleep;
1623 int saved_flags;
1624
1625 KSE_GET_TOD(kse, &ts);
1626
1627 if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
1628 /* Limit sleep to no more than 1 minute. */

--- 6 unchanged lines hidden (view full) ---

1635 ts_sleep.tv_nsec = 0;
1636 }
1637 }
1638 /* Don't sleep for negative times. */
1639 if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) {
1640 KSE_SET_IDLE(kse);
1641 kse->k_kseg->kg_idle_kses++;
1642 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1689{
1690 struct timespec ts, ts_sleep;
1691 int saved_flags;
1692
1693 KSE_GET_TOD(kse, &ts);
1694
1695 if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
1696 /* Limit sleep to no more than 1 minute. */

--- 6 unchanged lines hidden (view full) ---

1703 ts_sleep.tv_nsec = 0;
1704 }
1705 }
1706 /* Don't sleep for negative times. */
1707 if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) {
1708 KSE_SET_IDLE(kse);
1709 kse->k_kseg->kg_idle_kses++;
1710 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1643 saved_flags = kse->k_mbx.km_flags;
1644 kse->k_mbx.km_flags |= KMF_NOUPCALL;
1645 kse_release(&ts_sleep);
1646 kse->k_mbx.km_flags = saved_flags;
1711 if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) &&
1712 (kse->k_sigseqno != sigseqno))
1713 ; /* don't sleep */
1714 else {
1715 saved_flags = kse->k_mbx.km_flags;
1716 kse->k_mbx.km_flags |= KMF_NOUPCALL;
1717 kse_release(&ts_sleep);
1718 kse->k_mbx.km_flags = saved_flags;
1719 }
1647 KSE_SCHED_LOCK(kse, kse->k_kseg);
1648 if (KSE_IS_IDLE(kse)) {
1649 KSE_CLEAR_IDLE(kse);
1650 kse->k_kseg->kg_idle_kses--;
1651 }
1652 }
1653}
1654

--- 305 unchanged lines hidden (view full) ---

1960/*
1961 * Allocate a new KSE.
1962 *
1963 * We allow the current thread to be NULL in the case that this
1964 * is the first time a KSE is being created (library initialization).
1965 * In this case, we don't need to (and can't) take any locks.
1966 */
1967struct kse *
1720 KSE_SCHED_LOCK(kse, kse->k_kseg);
1721 if (KSE_IS_IDLE(kse)) {
1722 KSE_CLEAR_IDLE(kse);
1723 kse->k_kseg->kg_idle_kses--;
1724 }
1725 }
1726}
1727

--- 305 unchanged lines hidden (view full) ---

2033/*
2034 * Allocate a new KSE.
2035 *
2036 * We allow the current thread to be NULL in the case that this
2037 * is the first time a KSE is being created (library initialization).
2038 * In this case, we don't need to (and can't) take any locks.
2039 */
2040struct kse *
1968_kse_alloc(struct pthread *curthread)
2041_kse_alloc(struct pthread *curthread, int sys_scope)
1969{
1970 struct kse *kse = NULL;
1971 kse_critical_t crit;
1972 int need_ksd = 0;
1973 int i;
1974
1975 if ((curthread != NULL) && (free_kse_count > 0)) {
1976 crit = _kse_critical_enter();

--- 9 unchanged lines hidden (view full) ---

1986 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1987 free_kse_count--;
1988 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1989 active_kse_count++;
1990 }
1991 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1992 _kse_critical_leave(crit);
1993 if (kse != NULL)
2042{
2043 struct kse *kse = NULL;
2044 kse_critical_t crit;
2045 int need_ksd = 0;
2046 int i;
2047
2048 if ((curthread != NULL) && (free_kse_count > 0)) {
2049 crit = _kse_critical_enter();

--- 9 unchanged lines hidden (view full) ---

2059 TAILQ_REMOVE(&free_kseq, kse, k_qe);
2060 free_kse_count--;
2061 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
2062 active_kse_count++;
2063 }
2064 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
2065 _kse_critical_leave(crit);
2066 if (kse != NULL)
1994 kse_reinit(kse);
2067 kse_reinit(kse, sys_scope);
1995 }
1996 if ((kse == NULL) &&
1997 ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
1998 bzero(kse, sizeof(*kse));
1999
2000 /* Initialize the lockusers. */
2001 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2002 _lockuser_init(&kse->k_lockusers[i], (void *)kse);
2003 _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL);
2004 }
2005 /* _lock_init(kse->k_lock, ...) */
2006
2007 /* We had to malloc a kse; mark it as needing a new ID.*/
2008 need_ksd = 1;
2009
2010 /*
2011 * Create the KSE context.
2068 }
2069 if ((kse == NULL) &&
2070 ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
2071 bzero(kse, sizeof(*kse));
2072
2073 /* Initialize the lockusers. */
2074 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2075 _lockuser_init(&kse->k_lockusers[i], (void *)kse);
2076 _LCK_SET_PRIVATE2(&kse->k_lockusers[i], NULL);
2077 }
2078 /* _lock_init(kse->k_lock, ...) */
2079
2080 /* We had to malloc a kse; mark it as needing a new ID.*/
2081 need_ksd = 1;
2082
2083 /*
2084 * Create the KSE context.
2012 *
2013 * XXX - For now this is done here in the allocation.
2014 * In the future, we may want to have it done
2015 * outside the allocation so that scope system
2016 * threads (one thread per KSE) are not required
2017 * to have a stack for an unneeded kse upcall.
2085 * Scope system threads (one thread per KSE) are not required
2086 * to have a stack for an unneeded kse upcall.
2018 */
2087 */
2019 kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
2020 kse->k_mbx.km_stack.ss_sp = (char *)malloc(KSE_STACKSIZE);
2021 kse->k_mbx.km_stack.ss_size = KSE_STACKSIZE;
2088 if (!sys_scope) {
2089 kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
2090 kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
2091 kse->k_stack.ss_size = KSE_STACKSIZE;
2092 } else {
2093 kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
2094 }
2022 kse->k_mbx.km_udata = (void *)kse;
2023 kse->k_mbx.km_quantum = 20000;
2024 /*
2025 * We need to keep a copy of the stack in case it
2026 * doesn't get used; a KSE running a scope system
2027 * thread will use that thread's stack.
2028 */
2095 kse->k_mbx.km_udata = (void *)kse;
2096 kse->k_mbx.km_quantum = 20000;
2097 /*
2098 * We need to keep a copy of the stack in case it
2099 * doesn't get used; a KSE running a scope system
2100 * thread will use that thread's stack.
2101 */
2029 kse->k_stack.ss_sp = kse->k_mbx.km_stack.ss_sp;
2030 kse->k_stack.ss_size = kse->k_mbx.km_stack.ss_size;
2031 if (kse->k_mbx.km_stack.ss_sp == NULL) {
2102 kse->k_mbx.km_stack = kse->k_stack;
2103 if (!sys_scope && kse->k_stack.ss_sp == NULL) {
2032 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2033 _lockuser_destroy(&kse->k_lockusers[i]);
2034 }
2035 /* _lock_destroy(&kse->k_lock); */
2036 free(kse);
2037 kse = NULL;
2038 }
2039 }

--- 4 unchanged lines hidden (view full) ---

2044 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
2045 }
2046 /* Initialize KSD inside of the lock. */
2047 if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
2048 if (curthread != NULL) {
2049 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
2050 _kse_critical_leave(crit);
2051 }
2104 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2105 _lockuser_destroy(&kse->k_lockusers[i]);
2106 }
2107 /* _lock_destroy(&kse->k_lock); */
2108 free(kse);
2109 kse = NULL;
2110 }
2111 }

--- 4 unchanged lines hidden (view full) ---

2116 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
2117 }
2118 /* Initialize KSD inside of the lock. */
2119 if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
2120 if (curthread != NULL) {
2121 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
2122 _kse_critical_leave(crit);
2123 }
2052 free(kse->k_mbx.km_stack.ss_sp);
2124 if (kse->k_stack.ss_sp)
2125 free(kse->k_stack.ss_sp);
2053 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2054 _lockuser_destroy(&kse->k_lockusers[i]);
2055 }
2056 free(kse);
2057 return (NULL);
2058 }
2059 kse->k_flags = 0;
2060 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
2061 active_kse_count++;
2062 if (curthread != NULL) {
2063 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
2064 _kse_critical_leave(crit);
2065 }
2066 }
2067 return (kse);
2068}
2069
2070static void
2126 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
2127 _lockuser_destroy(&kse->k_lockusers[i]);
2128 }
2129 free(kse);
2130 return (NULL);
2131 }
2132 kse->k_flags = 0;
2133 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
2134 active_kse_count++;
2135 if (curthread != NULL) {
2136 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
2137 _kse_critical_leave(crit);
2138 }
2139 }
2140 return (kse);
2141}
2142
2143static void
2071kse_reinit(struct kse *kse)
2144kse_reinit(struct kse *kse, int sys_scope)
2072{
2145{
2073 /*
2074 * XXX - For now every kse has its stack.
2075 * In the future, we may want to have it done
2076 * outside the allocation so that scope system
2077 * threads (one thread per KSE) are not required
2078 * to have a stack for an unneeded kse upcall.
2079 */
2146 if (!sys_scope) {
2147 kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
2148 if (kse->k_stack.ss_sp == NULL) {
2149 /* XXX check allocation failure */
2150 kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE);
2151 kse->k_stack.ss_size = KSE_STACKSIZE;
2152 }
2153 kse->k_mbx.km_quantum = 20000;
2154 } else {
2155 kse->k_mbx.km_func = (kse_func_t *)kse_sched_single;
2156 if (kse->k_stack.ss_sp)
2157 free(kse->k_stack.ss_sp);
2158 kse->k_stack.ss_sp = NULL;
2159 kse->k_stack.ss_size = 0;
2160 kse->k_mbx.km_quantum = 0;
2161 }
2162 kse->k_mbx.km_stack = kse->k_stack;
2163 kse->k_mbx.km_udata = (void *)kse;
2164 kse->k_mbx.km_curthread = NULL;
2080 kse->k_mbx.km_flags = 0;
2081 kse->k_curthread = 0;
2082 kse->k_kseg = 0;
2083 kse->k_schedq = 0;
2084 kse->k_locklevel = 0;
2085 SIGEMPTYSET(kse->k_sigmask);
2086 bzero(&kse->k_sigq, sizeof(kse->k_sigq));
2087 kse->k_check_sigq = 0;
2088 kse->k_flags = 0;
2089 kse->k_waiting = 0;
2090 kse->k_idle = 0;
2091 kse->k_error = 0;
2092 kse->k_cpu = 0;
2093 kse->k_done = 0;
2094 kse->k_switch = 0;
2165 kse->k_mbx.km_flags = 0;
2166 kse->k_curthread = 0;
2167 kse->k_kseg = 0;
2168 kse->k_schedq = 0;
2169 kse->k_locklevel = 0;
2170 SIGEMPTYSET(kse->k_sigmask);
2171 bzero(&kse->k_sigq, sizeof(kse->k_sigq));
2172 kse->k_check_sigq = 0;
2173 kse->k_flags = 0;
2174 kse->k_waiting = 0;
2175 kse->k_idle = 0;
2176 kse->k_error = 0;
2177 kse->k_cpu = 0;
2178 kse->k_done = 0;
2179 kse->k_switch = 0;
2180 kse->k_sigseqno = 0;
2095}
2096
2097void
2098kse_free_unlocked(struct kse *kse)
2099{
2100 TAILQ_REMOVE(&active_kseq, kse, k_qe);
2101 active_kse_count--;
2102 kse->k_kseg = NULL;

--- 118 unchanged lines hidden (view full) ---

2221 * Initialize the unique id (which GDB uses to track
2222 * threads), add the thread to the list of all threads,
2223 * and
2224 */
2225 thread->uniqueid = next_uniqueid++;
2226 THR_LIST_ADD(thread);
2227 active_threads++;
2228 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2181}
2182
2183void
2184kse_free_unlocked(struct kse *kse)
2185{
2186 TAILQ_REMOVE(&active_kseq, kse, k_qe);
2187 active_kse_count--;
2188 kse->k_kseg = NULL;

--- 118 unchanged lines hidden (view full) ---

2307 * Initialize the unique id (which GDB uses to track
2308 * threads), add the thread to the list of all threads,
2309 * and
2310 */
2311 thread->uniqueid = next_uniqueid++;
2312 THR_LIST_ADD(thread);
2313 active_threads++;
2314 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2229
2230 _kse_critical_leave(crit);
2231}
2232
2233/*
2234 * Remove an active thread.
2235 */
2236static void
2237thr_unlink(struct pthread *thread)
2238{
2239 kse_critical_t crit;
2240 struct kse *curkse;
2241
2242 crit = _kse_critical_enter();
2243 curkse = _get_curkse();
2315 _kse_critical_leave(crit);
2316}
2317
2318/*
2319 * Remove an active thread.
2320 */
2321static void
2322thr_unlink(struct pthread *thread)
2323{
2324 kse_critical_t crit;
2325 struct kse *curkse;
2326
2327 crit = _kse_critical_enter();
2328 curkse = _get_curkse();
2244
2245 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
2246 THR_LIST_REMOVE(thread);
2247 active_threads--;
2248 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2329 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
2330 THR_LIST_REMOVE(thread);
2331 active_threads--;
2332 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2249
2250 _kse_critical_leave(crit);
2251}
2333 _kse_critical_leave(crit);
2334}
2335
2336void
2337_thr_hash_add(struct pthread *thread)
2338{
2339 struct thread_hash_head *head;
2340
2341 head = &thr_hashtable[THREAD_HASH(thread)];
2342 LIST_INSERT_HEAD(head, thread, hle);
2343}
2344
2345void
2346_thr_hash_remove(struct pthread *thread)
2347{
2348 LIST_REMOVE(thread, hle);
2349}
2350
2351struct pthread *
2352_thr_hash_find(struct pthread *thread)
2353{
2354 struct pthread *td;
2355 struct thread_hash_head *head;
2356
2357 head = &thr_hashtable[THREAD_HASH(thread)];
2358 LIST_FOREACH(td, head, hle) {
2359 if (td == thread)
2360 return (thread);
2361 }
2362 return (NULL);
2363}
2364