Deleted Added
full compact
thr_kern.c (113658) thr_kern.c (113661)
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 17 unchanged lines hidden (view full) ---

26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 17 unchanged lines hidden (view full) ---

26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $FreeBSD: head/lib/libkse/thread/thr_kern.c 113658 2003-04-18 05:04:16Z deischen $
34 * $FreeBSD: head/lib/libkse/thread/thr_kern.c 113661 2003-04-18 07:09:43Z deischen $
35 *
36 */
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD");
39
40#include <sys/types.h>
41#include <sys/kse.h>
42#include <sys/signalvar.h>

--- 36 unchanged lines hidden (view full) ---

79#define KSE_STACKSIZE 16384
80
81#define KSE_SET_MBOX(kse, thrd) \
82 (kse)->k_mbx.km_curthread = &(thrd)->tmbx
83
84#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
85
86/*
35 *
36 */
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD");
39
40#include <sys/types.h>
41#include <sys/kse.h>
42#include <sys/signalvar.h>

--- 36 unchanged lines hidden (view full) ---

79#define KSE_STACKSIZE 16384
80
81#define KSE_SET_MBOX(kse, thrd) \
82 (kse)->k_mbx.km_curthread = &(thrd)->tmbx
83
84#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
85
86/*
87 * Add/remove threads from a KSE's scheduling queue.
88 * For now the scheduling queue is hung off the KSEG.
89 */
90#define KSEG_THRQ_ADD(kseg, thr) \
91 TAILQ_INSERT_TAIL(&(kseg)->kg_threadq, thr, kle)
92#define KSEG_THRQ_REMOVE(kseg, thr) \
93 TAILQ_REMOVE(&(kseg)->kg_threadq, thr, kle)
94
95
96/*
97 * Macros for manipulating the run queues. The priority queue
98 * routines use the thread's pqe link and also handle the setting
99 * and clearing of the thread's THR_FLAGS_IN_RUNQ flag.
100 */
101#define KSE_RUNQ_INSERT_HEAD(kse, thrd) \
102 _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd)
103#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
104 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)

--- 6 unchanged lines hidden (view full) ---

111 * We've got to keep track of everything that is allocated, not only
112 * to have a speedy free list, but also so they can be deallocated
113 * after a fork().
114 */
115static TAILQ_HEAD(, kse) active_kseq;
116static TAILQ_HEAD(, kse) free_kseq;
117static TAILQ_HEAD(, kse_group) free_kse_groupq;
118static TAILQ_HEAD(, kse_group) active_kse_groupq;
87 * Macros for manipulating the run queues. The priority queue
88 * routines use the thread's pqe link and also handle the setting
89 * and clearing of the thread's THR_FLAGS_IN_RUNQ flag.
90 */
91#define KSE_RUNQ_INSERT_HEAD(kse, thrd) \
92 _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd)
93#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
94 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)

--- 6 unchanged lines hidden (view full) ---

101 * We've got to keep track of everything that is allocated, not only
102 * to have a speedy free list, but also so they can be deallocated
103 * after a fork().
104 */
105static TAILQ_HEAD(, kse) active_kseq;
106static TAILQ_HEAD(, kse) free_kseq;
107static TAILQ_HEAD(, kse_group) free_kse_groupq;
108static TAILQ_HEAD(, kse_group) active_kse_groupq;
109static TAILQ_HEAD(, kse_group) gc_ksegq;
119static struct lock kse_lock; /* also used for kseg queue */
120static int free_kse_count = 0;
121static int free_kseg_count = 0;
122static TAILQ_HEAD(, pthread) free_threadq;
123static struct lock thread_lock;
124static int free_thread_count = 0;
125static int inited = 0;
126static int active_kse_count = 0;
127static int active_kseg_count = 0;
128
129static void kse_check_completed(struct kse *kse);
130static void kse_check_waitq(struct kse *kse);
131static void kse_check_signals(struct kse *kse);
132static void kse_entry(struct kse_mailbox *mbx);
133static void kse_fini(struct kse *curkse);
134static void kse_sched_multi(struct kse *curkse);
135static void kse_sched_single(struct kse *curkse);
136static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
137static void kse_wait(struct kse *kse);
110static struct lock kse_lock; /* also used for kseg queue */
111static int free_kse_count = 0;
112static int free_kseg_count = 0;
113static TAILQ_HEAD(, pthread) free_threadq;
114static struct lock thread_lock;
115static int free_thread_count = 0;
116static int inited = 0;
117static int active_kse_count = 0;
118static int active_kseg_count = 0;
119
120static void kse_check_completed(struct kse *kse);
121static void kse_check_waitq(struct kse *kse);
122static void kse_check_signals(struct kse *kse);
123static void kse_entry(struct kse_mailbox *mbx);
124static void kse_fini(struct kse *curkse);
125static void kse_sched_multi(struct kse *curkse);
126static void kse_sched_single(struct kse *curkse);
127static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
128static void kse_wait(struct kse *kse);
129static void kse_free_unlocked(struct kse *kse);
138static void kseg_free(struct kse_group *kseg);
139static void kseg_init(struct kse_group *kseg);
140static void kse_waitq_insert(struct pthread *thread);
141static void thr_cleanup(struct kse *kse, struct pthread *curthread);
130static void kseg_free(struct kse_group *kseg);
131static void kseg_init(struct kse_group *kseg);
132static void kse_waitq_insert(struct pthread *thread);
133static void thr_cleanup(struct kse *kse, struct pthread *curthread);
142static void thr_gc(struct kse *curkse);
134#ifdef NOT_YET
143static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
144 ucontext_t *ucp);
135static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
136 ucontext_t *ucp);
137#endif
145static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
146 struct pthread_sigframe *psf);
147static int thr_timedout(struct pthread *thread, struct timespec *curtime);
148
149/*
150 * This is called after a fork().
151 * No locks need to be taken here since we are guaranteed to be
152 * single threaded.

--- 74 unchanged lines hidden (view full) ---

227 free(kse);
228 }
229 active_kse_count = 0;
230
231 /* Free the free KSEGs: */
232 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
233 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
234 _lock_destroy(&kseg->kg_lock);
138static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
139 struct pthread_sigframe *psf);
140static int thr_timedout(struct pthread *thread, struct timespec *curtime);
141
142/*
143 * This is called after a fork().
144 * No locks need to be taken here since we are guaranteed to be
145 * single threaded.

--- 74 unchanged lines hidden (view full) ---

220 free(kse);
221 }
222 active_kse_count = 0;
223
224 /* Free the free KSEGs: */
225 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
226 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
227 _lock_destroy(&kseg->kg_lock);
228 _pq_free(&kseg->kg_schedq.sq_runq);
235 free(kseg);
236 }
237 free_kseg_count = 0;
238
239 /* Free the active KSEGs: */
240 for (kseg = TAILQ_FIRST(&active_kse_groupq);
241 kseg != NULL; kseg = kseg_next) {
242 kseg_next = TAILQ_NEXT(kseg, kg_qe);
243 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
244 _lock_destroy(&kseg->kg_lock);
229 free(kseg);
230 }
231 free_kseg_count = 0;
232
233 /* Free the active KSEGs: */
234 for (kseg = TAILQ_FIRST(&active_kse_groupq);
235 kseg != NULL; kseg = kseg_next) {
236 kseg_next = TAILQ_NEXT(kseg, kg_qe);
237 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
238 _lock_destroy(&kseg->kg_lock);
239 _pq_free(&kseg->kg_schedq.sq_runq);
245 free(kseg);
246 }
247 active_kseg_count = 0;
248
249 /* Free the free threads. */
250 while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
251 TAILQ_REMOVE(&free_threadq, thread, tle);
252 if (thread->specific != NULL)
253 free(thread->specific);
254 for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
255 _lockuser_destroy(&thread->lockusers[i]);
256 }
257 _lock_destroy(&thread->lock);
258 free(thread);
259 }
260 free_thread_count = 0;
261
262 /* Free the to-be-gc'd threads. */
263 while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
240 free(kseg);
241 }
242 active_kseg_count = 0;
243
244 /* Free the free threads. */
245 while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
246 TAILQ_REMOVE(&free_threadq, thread, tle);
247 if (thread->specific != NULL)
248 free(thread->specific);
249 for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
250 _lockuser_destroy(&thread->lockusers[i]);
251 }
252 _lock_destroy(&thread->lock);
253 free(thread);
254 }
255 free_thread_count = 0;
256
257 /* Free the to-be-gc'd threads. */
258 while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
264 TAILQ_REMOVE(&_thread_gc_list, thread, tle);
259 TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
265 free(thread);
266 }
260 free(thread);
261 }
262 TAILQ_INIT(&gc_ksegq);
263 _gc_count = 0;
267
268 if (inited != 0) {
269 /*
270 * Destroy these locks; they'll be recreated to assure they
271 * are in the unlocked state.
272 */
273 _lock_destroy(&kse_lock);
274 _lock_destroy(&thread_lock);

--- 29 unchanged lines hidden (view full) ---

304_kse_init(void)
305{
306 if (inited == 0) {
307 TAILQ_INIT(&active_kseq);
308 TAILQ_INIT(&active_kse_groupq);
309 TAILQ_INIT(&free_kseq);
310 TAILQ_INIT(&free_kse_groupq);
311 TAILQ_INIT(&free_threadq);
264
265 if (inited != 0) {
266 /*
267 * Destroy these locks; they'll be recreated to assure they
268 * are in the unlocked state.
269 */
270 _lock_destroy(&kse_lock);
271 _lock_destroy(&thread_lock);

--- 29 unchanged lines hidden (view full) ---

301_kse_init(void)
302{
303 if (inited == 0) {
304 TAILQ_INIT(&active_kseq);
305 TAILQ_INIT(&active_kse_groupq);
306 TAILQ_INIT(&free_kseq);
307 TAILQ_INIT(&free_kse_groupq);
308 TAILQ_INIT(&free_threadq);
309 TAILQ_INIT(&gc_ksegq);
312 if (_lock_init(&kse_lock, LCK_ADAPTIVE,
313 _kse_lock_wait, _kse_lock_wakeup) != 0)
314 PANIC("Unable to initialize free KSE queue lock");
315 if (_lock_init(&thread_lock, LCK_ADAPTIVE,
316 _kse_lock_wait, _kse_lock_wakeup) != 0)
317 PANIC("Unable to initialize free thread queue lock");
318 if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
319 _kse_lock_wait, _kse_lock_wakeup) != 0)
320 PANIC("Unable to initialize thread list lock");
321 active_kse_count = 0;
322 active_kseg_count = 0;
310 if (_lock_init(&kse_lock, LCK_ADAPTIVE,
311 _kse_lock_wait, _kse_lock_wakeup) != 0)
312 PANIC("Unable to initialize free KSE queue lock");
313 if (_lock_init(&thread_lock, LCK_ADAPTIVE,
314 _kse_lock_wait, _kse_lock_wakeup) != 0)
315 PANIC("Unable to initialize free thread queue lock");
316 if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
317 _kse_lock_wait, _kse_lock_wakeup) != 0)
318 PANIC("Unable to initialize thread list lock");
319 active_kse_count = 0;
320 active_kseg_count = 0;
321 _gc_count = 0;
323 inited = 1;
324 }
325}
326
327int
328_kse_isthreaded(void)
329{
330 return (__isthreaded != 0);

--- 430 unchanged lines hidden (view full) ---

761 }
762 else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
763 kse_switchout_thread(curkse, curthread);
764 curkse->k_curthread = NULL;
765
766 /* This has to be done without the scheduling lock held. */
767 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
768 kse_check_signals(curkse);
322 inited = 1;
323 }
324}
325
326int
327_kse_isthreaded(void)
328{
329 return (__isthreaded != 0);

--- 430 unchanged lines hidden (view full) ---

760 }
761 else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0)
762 kse_switchout_thread(curkse, curthread);
763 curkse->k_curthread = NULL;
764
765 /* This has to be done without the scheduling lock held. */
766 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
767 kse_check_signals(curkse);
769
770 /* Check for GC: */
771 if (_gc_check != 0)
772 thr_gc(curkse);
773 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
774
775 dump_queues(curkse);
776
777 /* Check if there are no threads ready to run: */
778 while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
779 (curkse->k_kseg->kg_threadcount != 0)) {
780 /*
781 * Wait for a thread to become active or until there are
782 * no more threads.
783 */
784 kse_wait(curkse);
785 kse_check_waitq(curkse);
786 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
787 kse_check_signals(curkse);
768 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
769
770 dump_queues(curkse);
771
772 /* Check if there are no threads ready to run: */
773 while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
774 (curkse->k_kseg->kg_threadcount != 0)) {
775 /*
776 * Wait for a thread to become active or until there are
777 * no more threads.
778 */
779 kse_wait(curkse);
780 kse_check_waitq(curkse);
781 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
782 kse_check_signals(curkse);
788 if (_gc_check != 0)
789 thr_gc(curkse);
790 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
791 }
792
793 /* Check for no more threads: */
794 if (curkse->k_kseg->kg_threadcount == 0) {
795 /*
796 * Normally this shouldn't return, but it will if there
797 * are other KSEs running that create new threads that

--- 50 unchanged lines hidden (view full) ---

848
849 /*
850 * The thread's current signal frame will only be NULL if it
851 * is being resumed after being blocked in the kernel. In
852 * this case, and if the thread needs to run down pending
853 * signals or needs a cancellation check, we need to add a
854 * signal frame to the thread's context.
855 */
783 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
784 }
785
786 /* Check for no more threads: */
787 if (curkse->k_kseg->kg_threadcount == 0) {
788 /*
789 * Normally this shouldn't return, but it will if there
790 * are other KSEs running that create new threads that

--- 50 unchanged lines hidden (view full) ---

841
842 /*
843 * The thread's current signal frame will only be NULL if it
844 * is being resumed after being blocked in the kernel. In
845 * this case, and if the thread needs to run down pending
846 * signals or needs a cancellation check, we need to add a
847 * signal frame to the thread's context.
848 */
856#if 0
849#ifdef NOT_YET
857 if ((curframe == NULL) && ((curthread->check_pending != 0) ||
858 (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
859 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
860 signalcontext(&curthread->tmbx.tm_context, 0,
861 (__sighandler_t *)thr_resume_wrapper);
862 }
863#endif
864 /*

--- 34 unchanged lines hidden (view full) ---

899 NULL /* no siginfo */);
900 }
901 }
902 sigemptyset(&sigset);
903 __sys_sigprocmask(SIG_SETMASK, &sigset, NULL);
904 }
905}
906
850 if ((curframe == NULL) && ((curthread->check_pending != 0) ||
851 (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) &&
852 ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0)))) {
853 signalcontext(&curthread->tmbx.tm_context, 0,
854 (__sighandler_t *)thr_resume_wrapper);
855 }
856#endif
857 /*

--- 34 unchanged lines hidden (view full) ---

892 NULL /* no siginfo */);
893 }
894 }
895 sigemptyset(&sigset);
896 __sys_sigprocmask(SIG_SETMASK, &sigset, NULL);
897 }
898}
899
900#ifdef NOT_YET
907static void
908thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
909{
910 struct pthread *curthread = _get_curthread();
911
912 thr_resume_check(curthread, ucp, NULL);
913}
901static void
902thr_resume_wrapper(int unused_1, siginfo_t *unused_2, ucontext_t *ucp)
903{
904 struct pthread *curthread = _get_curthread();
905
906 thr_resume_check(curthread, ucp, NULL);
907}
908#endif
914
915static void
916thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
917 struct pthread_sigframe *psf)
918{
919 /* Check signals before cancellations. */
920 while (curthread->check_pending != 0) {
921 /* Clear the pending flag. */

--- 17 unchanged lines hidden (view full) ---

939 * Clean up a thread. This must be called with the thread's KSE
940 * scheduling lock held. The thread must be a thread from the
941 * KSE's group.
942 */
943static void
944thr_cleanup(struct kse *curkse, struct pthread *thread)
945{
946 struct pthread *joiner;
909
910static void
911thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
912 struct pthread_sigframe *psf)
913{
914 /* Check signals before cancellations. */
915 while (curthread->check_pending != 0) {
916 /* Clear the pending flag. */

--- 17 unchanged lines hidden (view full) ---

934 * Clean up a thread. This must be called with the thread's KSE
935 * scheduling lock held. The thread must be a thread from the
936 * KSE's group.
937 */
938static void
939thr_cleanup(struct kse *curkse, struct pthread *thread)
940{
941 struct pthread *joiner;
947 int free_thread = 0;
948
949 if ((joiner = thread->joiner) != NULL) {
950 thread->joiner = NULL;
951 if ((joiner->state == PS_JOIN) &&
952 (joiner->join_status.thread == thread)) {
953 joiner->join_status.thread = NULL;
954
955 /* Set the return status for the joining thread: */

--- 8 unchanged lines hidden (view full) ---

964 _thr_setrunnable_unlocked(joiner);
965 KSE_SCHED_UNLOCK(curkse, joiner->kseg);
966 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
967 }
968 }
969 thread->attr.flags |= PTHREAD_DETACHED;
970 }
971
942
943 if ((joiner = thread->joiner) != NULL) {
944 thread->joiner = NULL;
945 if ((joiner->state == PS_JOIN) &&
946 (joiner->join_status.thread == thread)) {
947 joiner->join_status.thread = NULL;
948
949 /* Set the return status for the joining thread: */

--- 8 unchanged lines hidden (view full) ---

958 _thr_setrunnable_unlocked(joiner);
959 KSE_SCHED_UNLOCK(curkse, joiner->kseg);
960 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
961 }
962 }
963 thread->attr.flags |= PTHREAD_DETACHED;
964 }
965
966 if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
967 /*
968 * Remove the thread from the KSEG's list of threads.
969 */
970 KSEG_THRQ_REMOVE(thread->kseg, thread);
971 /*
972 * Migrate the thread to the main KSE so that this
973 * KSE and KSEG can be cleaned when their last thread
974 * exits.
975 */
976 thread->kseg = _kse_initial->k_kseg;
977 thread->kse = _kse_initial;
978 }
972 thread->flags |= THR_FLAGS_GC_SAFE;
979 thread->flags |= THR_FLAGS_GC_SAFE;
973 thread->kseg->kg_threadcount--;
980
981 /*
982 * We can't hold the thread list lock while holding the
983 * scheduler lock.
984 */
985 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
986 DBG_MSG("Adding thread %p to GC list\n", thread);
974 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
987 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
975 _thr_stack_free(&thread->attr);
976 if ((thread->attr.flags & PTHREAD_DETACHED) != 0) {
977 /* Remove this thread from the list of all threads: */
978 THR_LIST_REMOVE(thread);
979 if (thread->refcount == 0) {
980 THR_GCLIST_REMOVE(thread);
981 TAILQ_REMOVE(&thread->kseg->kg_threadq, thread, kle);
982 free_thread = 1;
983 }
984 }
988 THR_GCLIST_ADD(thread);
985 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
989 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
986 if (free_thread != 0)
987 _thr_free(curkse, thread);
990 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
988}
989
990void
991}
992
993void
991thr_gc(struct pthread *curthread)
994_thr_gc(struct pthread *curthread)
992{
995{
993 struct pthread *td, *joiner;
994 struct kse_group *free_kseg;
996 struct pthread *td, *td_next;
997 kse_critical_t crit;
998 int clean;
995
999
996 _gc_check = 0;
997 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
998 while ((td = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
1000 crit = _kse_critical_enter();
1001 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
1002
1003 /* Check the threads waiting for GC. */
1004 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
1005 td_next = TAILQ_NEXT(td, gcle);
1006 if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
1007 continue;
1008#ifdef NOT_YET
1009 else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
1010 (td->kse->k_mbx.km_flags == 0)) {
1011 /*
1012 * The thread and KSE are operating on the same
1013 * stack. Wait for the KSE to exit before freeing
1014 * the thread's stack as well as everything else.
1015 */
1016 continue;
1017 }
1018#endif
999 THR_GCLIST_REMOVE(td);
1019 THR_GCLIST_REMOVE(td);
1000 clean = (td->attr.flags & PTHREAD_DETACHED) != 0;
1001 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1020 clean = ((td->attr.flags & PTHREAD_DETACHED) != 0) &&
1021 (td->refcount == 0);
1022 _thr_stack_free(&td->attr);
1023 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
1024 DBG_MSG("Found thread %p in GC list, clean? %d\n", td, clean);
1002
1025
1003 KSE_SCHED_LOCK(curkse, td->kseg);
1004 TAILQ_REMOVE(&td->kseg->kg_threadq, td, kle);
1005 if (TAILQ_EMPTY(&td->kseg->kg_threadq))
1006 free_kseg = td->kseg;
1007 else
1008 free_kseg = NULL;
1009 joiner = NULL;
1010 if ((td->joiner != NULL) && (td->joiner->state == PS_JOIN) &&
1011 (td->joiner->join_status.thread == td)) {
1012 joiner = td->joiner;
1013 joiner->join_status.thread = NULL;
1014
1015 /* Set the return status for the joining thread: */
1016 joiner->join_status.ret = td->ret;
1017
1018 /* Make the thread runnable. */
1019 if (td->kseg == joiner->kseg) {
1020 _thr_setrunnable_unlocked(joiner);
1021 joiner = NULL;
1022 }
1026 if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
1027 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1028 kse_free_unlocked(td->kse);
1029 kseg_free(td->kseg);
1030 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1023 }
1031 }
1024 td->joiner = NULL;
1025 KSE_SCHED_UNLOCK(curkse, td->kseg);
1026 if (free_kseg != NULL)
1027 kseg_free(free_kseg);
1028 if (joiner != NULL) {
1029 KSE_SCHED_LOCK(curkse, joiner->kseg);
1030 _thr_setrunnable_unlocked(joiner);
1031 KSE_SCHED_LOCK(curkse, joiner->kseg);
1032 if (clean != 0) {
1033 _kse_critical_leave(crit);
1034 _thr_free(curthread, td);
1035 crit = _kse_critical_enter();
1032 }
1036 }
1033 _thr_free(curkse, td);
1034 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1037 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
1035 }
1038 }
1036 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1039 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
1040 _kse_critical_leave(crit);
1037}
1038
1039
1040/*
1041 * Only new threads that are running or suspended may be scheduled.
1042 */
1043void
1044_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)

--- 352 unchanged lines hidden (view full) ---

1397/*
1398 * Avoid calling this kse_exit() so as not to confuse it with the
1399 * system call of the same name.
1400 */
1401static void
1402kse_fini(struct kse *kse)
1403{
1404 struct timespec ts;
1041}
1042
1043
1044/*
1045 * Only new threads that are running or suspended may be scheduled.
1046 */
1047void
1048_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)

--- 352 unchanged lines hidden (view full) ---

1401/*
1402 * Avoid calling this kse_exit() so as not to confuse it with the
1403 * system call of the same name.
1404 */
1405static void
1406kse_fini(struct kse *kse)
1407{
1408 struct timespec ts;
1409 struct kse_group *free_kseg = NULL;
1405
1410
1411 if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
1412 kse_exit();
1406 /*
1413 /*
1407 * Check to see if this is the main kse.
1414 * Check to see if this is one of the main kses.
1408 */
1415 */
1409 if (kse == _kse_initial) {
1416 else if (kse->k_kseg != _kse_initial->k_kseg) {
1417 /* Remove this KSE from the KSEG's list of KSEs. */
1418 KSE_SCHED_LOCK(kse, kse->k_kseg);
1419 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
1420 if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
1421 free_kseg = kse->k_kseg;
1422 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1423
1410 /*
1424 /*
1425 * Add this KSE to the list of free KSEs along with
1426 * the KSEG if is now orphaned.
1427 */
1428 KSE_LOCK_ACQUIRE(kse, &kse_lock);
1429 if (free_kseg != NULL)
1430 kseg_free(free_kseg);
1431 kse_free_unlocked(kse);
1432 KSE_LOCK_RELEASE(kse, &kse_lock);
1433 kse_exit();
1434 /* Never returns. */
1435 } else {
1436 /*
1411 * Wait for the last KSE/thread to exit, or for more
1412 * threads to be created (it is possible for additional
1413 * scope process threads to be created after the main
1414 * thread exits).
1415 */
1416 ts.tv_sec = 120;
1417 ts.tv_nsec = 0;
1418 KSE_SET_WAIT(kse);

--- 11 unchanged lines hidden (view full) ---

1430 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1431
1432 /* There are no more threads; exit this process: */
1433 if (kse->k_kseg->kg_threadcount == 0) {
1434 /* kse_exit(); */
1435 __isthreaded = 0;
1436 exit(0);
1437 }
1437 * Wait for the last KSE/thread to exit, or for more
1438 * threads to be created (it is possible for additional
1439 * scope process threads to be created after the main
1440 * thread exits).
1441 */
1442 ts.tv_sec = 120;
1443 ts.tv_nsec = 0;
1444 KSE_SET_WAIT(kse);

--- 11 unchanged lines hidden (view full) ---

1456 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1457
1458 /* There are no more threads; exit this process: */
1459 if (kse->k_kseg->kg_threadcount == 0) {
1460 /* kse_exit(); */
1461 __isthreaded = 0;
1462 exit(0);
1463 }
1438 } else {
1439 /* Mark this KSE for GC: */
1440 KSE_LOCK_ACQUIRE(kse, &_thread_list_lock);
1441 TAILQ_INSERT_TAIL(&free_kseq, kse, k_qe);
1442 KSE_LOCK_RELEASE(kse, &_thread_list_lock);
1443 kse_exit();
1444 }
1445}
1446
1447void
1448_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp)
1449{
1450 struct kse *curkse;
1451

--- 123 unchanged lines hidden (view full) ---

1575_set_curkse(struct kse *kse)
1576{
1577 _ksd_setprivate(&kse->k_ksd);
1578}
1579
1580/*
1581 * Allocate a new KSEG.
1582 *
1464 }
1465}
1466
1467void
1468_thr_sig_add(struct pthread *thread, int sig, siginfo_t *info, ucontext_t *ucp)
1469{
1470 struct kse *curkse;
1471

--- 123 unchanged lines hidden (view full) ---

1595_set_curkse(struct kse *kse)
1596{
1597 _ksd_setprivate(&kse->k_ksd);
1598}
1599
1600/*
1601 * Allocate a new KSEG.
1602 *
1583 * We allow the current KSE (curkse) to be NULL in the case that this
1603 * We allow the current thread to be NULL in the case that this
1584 * is the first time a KSEG is being created (library initialization).
1585 * In this case, we don't need to (and can't) take any locks.
1586 */
1587struct kse_group *
1604 * is the first time a KSEG is being created (library initialization).
1605 * In this case, we don't need to (and can't) take any locks.
1606 */
1607struct kse_group *
1588_kseg_alloc(struct kse *curkse)
1608_kseg_alloc(struct pthread *curthread)
1589{
1590 struct kse_group *kseg = NULL;
1609{
1610 struct kse_group *kseg = NULL;
1611 kse_critical_t crit;
1591
1612
1592 if ((curkse != NULL) && (free_kseg_count > 0)) {
1613 if ((curthread != NULL) && (free_kseg_count > 0)) {
1593 /* Use the kse lock for the kseg queue. */
1614 /* Use the kse lock for the kseg queue. */
1594 KSE_LOCK_ACQUIRE(curkse, &kse_lock);
1615 crit = _kse_critical_enter();
1616 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1595 if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
1596 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
1597 free_kseg_count--;
1598 active_kseg_count++;
1599 TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
1600 }
1617 if ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
1618 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
1619 free_kseg_count--;
1620 active_kseg_count++;
1621 TAILQ_INSERT_TAIL(&active_kse_groupq, kseg, kg_qe);
1622 }
1601 KSE_LOCK_RELEASE(curkse, &kse_lock);
1623 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1624 _kse_critical_leave(crit);
1602 }
1603
1604 /*
1605 * If requested, attempt to allocate a new KSE group only if the
1606 * KSE allocation was successful and a KSE group wasn't found in
1607 * the free list.
1608 */
1609 if ((kseg == NULL) &&
1610 ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
1625 }
1626
1627 /*
1628 * If requested, attempt to allocate a new KSE group only if the
1629 * KSE allocation was successful and a KSE group wasn't found in
1630 * the free list.
1631 */
1632 if ((kseg == NULL) &&
1633 ((kseg = (struct kse_group *)malloc(sizeof(*kseg))) != NULL)) {
1611 THR_ASSERT(_pq_alloc(&kseg->kg_schedq.sq_runq,
1612 THR_MIN_PRIORITY, THR_LAST_PRIORITY) == 0,
1613 "Unable to allocate priority queue.");
1614 kseg_init(kseg);
1615 if (curkse != NULL)
1616 KSE_LOCK_ACQUIRE(curkse, &kse_lock);
1617 kseg_free(kseg);
1618 if (curkse != NULL)
1619 KSE_LOCK_RELEASE(curkse, &kse_lock);
1634 if (_pq_alloc(&kseg->kg_schedq.sq_runq,
1635 THR_MIN_PRIORITY, THR_LAST_PRIORITY) != 0) {
1636 free(kseg);
1637 kseg = NULL;
1638 } else {
1639 kseg_init(kseg);
1640 /* Add the KSEG to the list of active KSEGs. */
1641 if (curthread != NULL) {
1642 crit = _kse_critical_enter();
1643 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1644 active_kseg_count++;
1645 TAILQ_INSERT_TAIL(&active_kse_groupq,
1646 kseg, kg_qe);
1647 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1648 _kse_critical_leave(crit);
1649 } else {
1650 active_kseg_count++;
1651 TAILQ_INSERT_TAIL(&active_kse_groupq,
1652 kseg, kg_qe);
1653 }
1654 }
1620 }
1621 return (kseg);
1622}
1623
1624/*
1625 * This must be called with the kse lock held and when there are
1626 * no more threads that reference it.
1627 */
1628static void
1629kseg_free(struct kse_group *kseg)
1630{
1655 }
1656 return (kseg);
1657}
1658
1659/*
1660 * This must be called with the kse lock held and when there are
1661 * no more threads that reference it.
1662 */
1663static void
1664kseg_free(struct kse_group *kseg)
1665{
1666 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
1631 TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
1632 kseg_init(kseg);
1633 free_kseg_count++;
1634 active_kseg_count--;
1635}
1636
1637/*
1638 * Allocate a new KSE.
1639 *
1667 TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
1668 kseg_init(kseg);
1669 free_kseg_count++;
1670 active_kseg_count--;
1671}
1672
1673/*
1674 * Allocate a new KSE.
1675 *
1640 * We allow the current KSE (curkse) to be NULL in the case that this
1676 * We allow the current thread to be NULL in the case that this
1641 * is the first time a KSE is being created (library initialization).
1642 * In this case, we don't need to (and can't) take any locks.
1643 */
1644struct kse *
1677 * is the first time a KSE is being created (library initialization).
1678 * In this case, we don't need to (and can't) take any locks.
1679 */
1680struct kse *
1645_kse_alloc(struct kse *curkse)
1681_kse_alloc(struct pthread *curthread)
1646{
1647 struct kse *kse = NULL;
1682{
1683 struct kse *kse = NULL;
1684 kse_critical_t crit;
1648 int need_ksd = 0;
1649 int i;
1650
1685 int need_ksd = 0;
1686 int i;
1687
1651 if ((curkse != NULL) && (free_kse_count > 0)) {
1652 KSE_LOCK_ACQUIRE(curkse, &kse_lock);
1688 if ((curthread != NULL) && (free_kse_count > 0)) {
1689 crit = _kse_critical_enter();
1690 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1653 /* Search for a finished KSE. */
1654 kse = TAILQ_FIRST(&free_kseq);
1655#define KEMBX_DONE 0x01
1656 while ((kse != NULL) &&
1657 ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
1658 kse = TAILQ_NEXT(kse, k_qe);
1659 }
1660#undef KEMBX_DONE
1661 if (kse != NULL) {
1662 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1663 free_kse_count--;
1664 active_kse_count++;
1665 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1666 }
1691 /* Search for a finished KSE. */
1692 kse = TAILQ_FIRST(&free_kseq);
1693#define KEMBX_DONE 0x01
1694 while ((kse != NULL) &&
1695 ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
1696 kse = TAILQ_NEXT(kse, k_qe);
1697 }
1698#undef KEMBX_DONE
1699 if (kse != NULL) {
1700 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1701 free_kse_count--;
1702 active_kse_count++;
1703 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1704 }
1667 KSE_LOCK_RELEASE(curkse, &kse_lock);
1705 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1706 _kse_critical_leave(crit);
1668 }
1669 if ((kse == NULL) &&
1670 ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
1671 bzero(kse, sizeof(*kse));
1672
1673 /* Initialize the lockusers. */
1674 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
1675 _lockuser_init(&kse->k_lockusers[i], (void *)kse);

--- 19 unchanged lines hidden (view full) ---

1695 kse->k_mbx.km_quantum = 20000;
1696 if (kse->k_mbx.km_stack.ss_size == NULL) {
1697 free(kse);
1698 kse = NULL;
1699 }
1700 }
1701 if ((kse != NULL) && (need_ksd != 0)) {
1702 /* This KSE needs initialization. */
1707 }
1708 if ((kse == NULL) &&
1709 ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) {
1710 bzero(kse, sizeof(*kse));
1711
1712 /* Initialize the lockusers. */
1713 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
1714 _lockuser_init(&kse->k_lockusers[i], (void *)kse);

--- 19 unchanged lines hidden (view full) ---

1734 kse->k_mbx.km_quantum = 20000;
1735 if (kse->k_mbx.km_stack.ss_size == NULL) {
1736 free(kse);
1737 kse = NULL;
1738 }
1739 }
1740 if ((kse != NULL) && (need_ksd != 0)) {
1741 /* This KSE needs initialization. */
1703 if (curkse != NULL)
1704 KSE_LOCK_ACQUIRE(curkse, &kse_lock);
1742 if (curthread != NULL) {
1743 crit = _kse_critical_enter();
1744 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1745 }
1705 /* Initialize KSD inside of the lock. */
1706 if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
1746 /* Initialize KSD inside of the lock. */
1747 if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) {
1707 if (curkse != NULL)
1708 KSE_LOCK_RELEASE(curkse, &kse_lock);
1748 if (curthread != NULL) {
1749 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1750 _kse_critical_leave(crit);
1751 }
1709 free(kse->k_mbx.km_stack.ss_sp);
1710 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
1711 _lockuser_destroy(&kse->k_lockusers[i]);
1712 }
1713 free(kse);
1714 return (NULL);
1715 }
1716 kse->k_flags = 0;
1717 active_kse_count++;
1718 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1752 free(kse->k_mbx.km_stack.ss_sp);
1753 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
1754 _lockuser_destroy(&kse->k_lockusers[i]);
1755 }
1756 free(kse);
1757 return (NULL);
1758 }
1759 kse->k_flags = 0;
1760 active_kse_count++;
1761 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1719 if (curkse != NULL)
1720 KSE_LOCK_RELEASE(curkse, &kse_lock);
1721
1762 if (curthread != NULL) {
1763 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1764 _kse_critical_leave(crit);
1765 }
1722 }
1723 return (kse);
1724}
1725
1726void
1766 }
1767 return (kse);
1768}
1769
1770void
1727_kse_free(struct kse *curkse, struct kse *kse)
1771kse_free_unlocked(struct kse *kse)
1728{
1772{
1729 struct kse_group *kseg = NULL;
1730
1731 if (curkse == kse)
1732 PANIC("KSE trying to free itself");
1733 KSE_LOCK_ACQUIRE(curkse, &kse_lock);
1734 active_kse_count--;
1773 active_kse_count--;
1735 if ((kseg = kse->k_kseg) != NULL) {
1736 TAILQ_REMOVE(&kseg->kg_kseq, kse, k_qe);
1737 /*
1738 * Free the KSEG if there are no more threads associated
1739 * with it.
1740 */
1741 if (TAILQ_EMPTY(&kseg->kg_threadq))
1742 kseg_free(kseg);
1743 }
1744 kse->k_kseg = NULL;
1745 kse->k_flags &= ~KF_INITIALIZED;
1746 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
1747 free_kse_count++;
1774 kse->k_kseg = NULL;
1775 kse->k_flags &= ~KF_INITIALIZED;
1776 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
1777 free_kse_count++;
1748 KSE_LOCK_RELEASE(curkse, &kse_lock);
1749}
1750
1778}
1779
1780void
1781_kse_free(struct pthread *curthread, struct kse *kse)
1782{
1783 kse_critical_t crit;
1784
1785 if (curthread == NULL)
1786 kse_free_unlocked(kse);
1787 else {
1788 crit = _kse_critical_enter();
1789 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1790 kse_free_unlocked(kse);
1791 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1792 _kse_critical_leave(crit);
1793 }
1794}
1795
1751static void
1752kseg_init(struct kse_group *kseg)
1753{
1754 TAILQ_INIT(&kseg->kg_kseq);
1755 TAILQ_INIT(&kseg->kg_threadq);
1756 TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
1796static void
1797kseg_init(struct kse_group *kseg)
1798{
1799 TAILQ_INIT(&kseg->kg_kseq);
1800 TAILQ_INIT(&kseg->kg_threadq);
1801 TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
1757 TAILQ_INIT(&kseg->kg_schedq.sq_blockedq);
1758 _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
1759 _kse_lock_wakeup);
1760 kseg->kg_threadcount = 0;
1761 kseg->kg_idle_kses = 0;
1762 kseg->kg_flags = 0;
1763}
1764
1765struct pthread *
1766_thr_alloc(struct pthread *curthread)
1767{
1768 kse_critical_t crit;
1769 struct pthread *thread = NULL;
1770
1771 if (curthread != NULL) {
1802 _lock_init(&kseg->kg_lock, LCK_ADAPTIVE, _kse_lock_wait,
1803 _kse_lock_wakeup);
1804 kseg->kg_threadcount = 0;
1805 kseg->kg_idle_kses = 0;
1806 kseg->kg_flags = 0;
1807}
1808
1809struct pthread *
1810_thr_alloc(struct pthread *curthread)
1811{
1812 kse_critical_t crit;
1813 struct pthread *thread = NULL;
1814
1815 if (curthread != NULL) {
1772 if (_gc_check != 0)
1773 thread_gc(curthread);
1816 if (GC_NEEDED())
1817 _thr_gc(curthread);
1774 if (free_thread_count > 0) {
1775 crit = _kse_critical_enter();
1818 if (free_thread_count > 0) {
1819 crit = _kse_critical_enter();
1776 KSE_LOCK_ACQUIRE(curkse, &thread_lock);
1820 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
1777 if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
1778 TAILQ_REMOVE(&free_threadq, thread, tle);
1779 free_thread_count--;
1780 }
1821 if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
1822 TAILQ_REMOVE(&free_threadq, thread, tle);
1823 free_thread_count--;
1824 }
1781 KSE_LOCK_RELEASE(curkse, &thread_lock);
1825 KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
1782 }
1783 }
1784 if (thread == NULL)
1785 thread = (struct pthread *)malloc(sizeof(struct pthread));
1786 return (thread);
1787}
1788
1789void
1790_thr_free(struct pthread *curthread, struct pthread *thread)
1791{
1792 kse_critical_t crit;
1793
1826 }
1827 }
1828 if (thread == NULL)
1829 thread = (struct pthread *)malloc(sizeof(struct pthread));
1830 return (thread);
1831}
1832
1833void
1834_thr_free(struct pthread *curthread, struct pthread *thread)
1835{
1836 kse_critical_t crit;
1837
1838 DBG_MSG("Freeing thread %p\n", thread);
1794 if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
1795 free(thread);
1796 else {
1797 crit = _kse_critical_enter();
1839 if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS))
1840 free(thread);
1841 else {
1842 crit = _kse_critical_enter();
1798 KSE_LOCK_ACQUIRE(curkse, &thread_lock);
1843 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
1844 THR_LIST_REMOVE(thread);
1799 TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
1800 free_thread_count++;
1845 TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
1846 free_thread_count++;
1801 KSE_LOCK_RELEASE(curkse, &thread_lock);
1847 KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
1802 _kse_critical_leave(crit);
1803 }
1804}
1848 _kse_critical_leave(crit);
1849 }
1850}