Deleted Added
full compact
thr_kern.c (115173) thr_kern.c (115278)
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/cdefs.h>
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115173 2003-05-19 23:04:50Z deischen $");
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115278 2003-05-24 02:29:25Z deischen $");
37
38#include <sys/types.h>
39#include <sys/kse.h>
40#include <sys/signalvar.h>
41#include <sys/queue.h>
42#include <machine/atomic.h>
43
44#include <assert.h>

--- 47 unchanged lines hidden (view full) ---

92#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
93 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
94#define KSE_RUNQ_REMOVE(kse, thrd) \
95 _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
96#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
97
98#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
99
37
38#include <sys/types.h>
39#include <sys/kse.h>
40#include <sys/signalvar.h>
41#include <sys/queue.h>
42#include <machine/atomic.h>
43
44#include <assert.h>

--- 47 unchanged lines hidden (view full) ---

92#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
93 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
94#define KSE_RUNQ_REMOVE(kse, thrd) \
95 _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
96#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
97
98#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
99
100#ifndef KMF_DONE
101#define KMF_DONE 0x04
102#endif
103
100/*
101 * We've got to keep track of everything that is allocated, not only
102 * to have a speedy free list, but also so they can be deallocated
103 * after a fork().
104 */
105static TAILQ_HEAD(, kse) active_kseq;
106static TAILQ_HEAD(, kse) free_kseq;
107static TAILQ_HEAD(, kse_group) free_kse_groupq;
108static TAILQ_HEAD(, kse_group) active_kse_groupq;
109static TAILQ_HEAD(, kse_group) gc_ksegq;
110static struct lock kse_lock; /* also used for kseg queue */
111static int free_kse_count = 0;
112static int free_kseg_count = 0;
113static TAILQ_HEAD(, pthread) free_threadq;
114static struct lock thread_lock;
115static int free_thread_count = 0;
116static int inited = 0;
104/*
105 * We've got to keep track of everything that is allocated, not only
106 * to have a speedy free list, but also so they can be deallocated
107 * after a fork().
108 */
109static TAILQ_HEAD(, kse) active_kseq;
110static TAILQ_HEAD(, kse) free_kseq;
111static TAILQ_HEAD(, kse_group) free_kse_groupq;
112static TAILQ_HEAD(, kse_group) active_kse_groupq;
113static TAILQ_HEAD(, kse_group) gc_ksegq;
114static struct lock kse_lock; /* also used for kseg queue */
115static int free_kse_count = 0;
116static int free_kseg_count = 0;
117static TAILQ_HEAD(, pthread) free_threadq;
118static struct lock thread_lock;
119static int free_thread_count = 0;
120static int inited = 0;
121static int active_threads = 1;
117static int active_kse_count = 0;
118static int active_kseg_count = 0;
122static int active_kse_count = 0;
123static int active_kseg_count = 0;
124static u_int64_t next_uniqueid = 1;
119
125
126
120#ifdef DEBUG_THREAD_KERN
121static void dump_queues(struct kse *curkse);
122#endif
123static void kse_check_completed(struct kse *kse);
124static void kse_check_waitq(struct kse *kse);
125static void kse_check_signals(struct kse *kse);
126static void kse_fini(struct kse *curkse);
127static void kse_reinit(struct kse *kse);

--- 6 unchanged lines hidden (view full) ---

134static void kse_free_unlocked(struct kse *kse);
135static void kseg_free_unlocked(struct kse_group *kseg);
136static void kseg_init(struct kse_group *kseg);
137static void kseg_reinit(struct kse_group *kseg);
138static void kse_waitq_insert(struct pthread *thread);
139static void kse_wakeup_multi(struct kse *curkse);
140static void kse_wakeup_one(struct pthread *thread);
141static void thr_cleanup(struct kse *kse, struct pthread *curthread);
127#ifdef DEBUG_THREAD_KERN
128static void dump_queues(struct kse *curkse);
129#endif
130static void kse_check_completed(struct kse *kse);
131static void kse_check_waitq(struct kse *kse);
132static void kse_check_signals(struct kse *kse);
133static void kse_fini(struct kse *curkse);
134static void kse_reinit(struct kse *kse);

--- 6 unchanged lines hidden (view full) ---

141static void kse_free_unlocked(struct kse *kse);
142static void kseg_free_unlocked(struct kse_group *kseg);
143static void kseg_init(struct kse_group *kseg);
144static void kseg_reinit(struct kse_group *kseg);
145static void kse_waitq_insert(struct pthread *thread);
146static void kse_wakeup_multi(struct kse *curkse);
147static void kse_wakeup_one(struct pthread *thread);
148static void thr_cleanup(struct kse *kse, struct pthread *curthread);
149static void thr_link(struct pthread *thread);
142static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
143 ucontext_t *ucp);
144static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
145 struct pthread_sigframe *psf);
146static int thr_timedout(struct pthread *thread, struct timespec *curtime);
150static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
151 ucontext_t *ucp);
152static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
153 struct pthread_sigframe *psf);
154static int thr_timedout(struct pthread *thread, struct timespec *curtime);
155static void thr_unlink(struct pthread *thread);
147
148/*
149 * This is called after a fork().
150 * No locks need to be taken here since we are guaranteed to be
151 * single threaded.
152 */
153void
154_kse_single_thread(struct pthread *curthread)
155{
156
157/*
158 * This is called after a fork().
159 * No locks need to be taken here since we are guaranteed to be
160 * single threaded.
161 */
162void
163_kse_single_thread(struct pthread *curthread)
164{
156 struct kse *kse, *kse_next;
157 struct kse_group *kseg, *kseg_next;
158 struct pthread *thread, *thread_next;
165 struct kse *kse;
166 struct kse_group *kseg;
167 struct pthread *thread;
159 kse_critical_t crit;
160 int i;
161
162 /*
163 * Disable upcalls and clear the threaded flag.
164 * XXX - I don't think we need to disable upcalls after a fork().
165 * but it doesn't hurt.
166 */
167 crit = _kse_critical_enter();
168 __isthreaded = 0;
168 kse_critical_t crit;
169 int i;
170
171 /*
172 * Disable upcalls and clear the threaded flag.
173 * XXX - I don't think we need to disable upcalls after a fork().
174 * but it doesn't hurt.
175 */
176 crit = _kse_critical_enter();
177 __isthreaded = 0;
178 active_threads = 1;
169
170 /*
171 * Enter a loop to remove and free all threads other than
172 * the running thread from the active thread list:
173 */
179
180 /*
181 * Enter a loop to remove and free all threads other than
182 * the running thread from the active thread list:
183 */
174 for (thread = TAILQ_FIRST(&_thread_list); thread != NULL;
175 thread = thread_next) {
184 while ((thread = TAILQ_FIRST(&_thread_list)) != NULL) {
185 THR_GCLIST_REMOVE(thread);
176 /*
186 /*
177 * Advance to the next thread before the destroying
178 * the current thread.
179 */
180 thread_next = TAILQ_NEXT(thread, tle);
181
182 /*
183 * Remove this thread from the list (the current
184 * thread will be removed but re-added by libpthread
185 * initialization.
186 */
187 TAILQ_REMOVE(&_thread_list, thread, tle);
188 /* Make sure this isn't the running thread: */
189 if (thread != curthread) {
190 _thr_stack_free(&thread->attr);

--- 4 unchanged lines hidden (view full) ---

195 }
196 _lock_destroy(&thread->lock);
197 free(thread);
198 }
199 }
200
201 TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
202 curthread->joiner = NULL; /* no joining threads yet */
187 * Remove this thread from the list (the current
188 * thread will be removed but re-added by libpthread
189 * initialization.
190 */
191 TAILQ_REMOVE(&_thread_list, thread, tle);
192 /* Make sure this isn't the running thread: */
193 if (thread != curthread) {
194 _thr_stack_free(&thread->attr);

--- 4 unchanged lines hidden (view full) ---

199 }
200 _lock_destroy(&thread->lock);
201 free(thread);
202 }
203 }
204
205 TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
206 curthread->joiner = NULL; /* no joining threads yet */
207 curthread->refcount = 0;
203 sigemptyset(&curthread->sigpend); /* clear pending signals */
204 if (curthread->specific != NULL) {
205 free(curthread->specific);
206 curthread->specific = NULL;
207 curthread->specific_data_count = 0;
208 }
209
210 /* Free the free KSEs: */
211 while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
212 TAILQ_REMOVE(&free_kseq, kse, k_qe);
208 sigemptyset(&curthread->sigpend); /* clear pending signals */
209 if (curthread->specific != NULL) {
210 free(curthread->specific);
211 curthread->specific = NULL;
212 curthread->specific_data_count = 0;
213 }
214
215 /* Free the free KSEs: */
216 while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
217 TAILQ_REMOVE(&free_kseq, kse, k_qe);
218 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
219 _lockuser_destroy(&kse->k_lockusers[i]);
220 }
221 _lock_destroy(&kse->k_lock);
213 _ksd_destroy(&kse->k_ksd);
214 if (kse->k_stack.ss_sp != NULL)
215 free(kse->k_stack.ss_sp);
216 free(kse);
217 }
218 free_kse_count = 0;
219
220 /* Free the active KSEs: */
222 _ksd_destroy(&kse->k_ksd);
223 if (kse->k_stack.ss_sp != NULL)
224 free(kse->k_stack.ss_sp);
225 free(kse);
226 }
227 free_kse_count = 0;
228
229 /* Free the active KSEs: */
221 for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) {
222 kse_next = TAILQ_NEXT(kse, k_qe);
230 while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) {
223 TAILQ_REMOVE(&active_kseq, kse, k_qe);
224 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
225 _lockuser_destroy(&kse->k_lockusers[i]);
226 }
231 TAILQ_REMOVE(&active_kseq, kse, k_qe);
232 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
233 _lockuser_destroy(&kse->k_lockusers[i]);
234 }
235 _lock_destroy(&kse->k_lock);
227 if (kse->k_stack.ss_sp != NULL)
228 free(kse->k_stack.ss_sp);
236 if (kse->k_stack.ss_sp != NULL)
237 free(kse->k_stack.ss_sp);
229 _lock_destroy(&kse->k_lock);
230 free(kse);
231 }
232 active_kse_count = 0;
233
234 /* Free the free KSEGs: */
235 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
236 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
237 _lock_destroy(&kseg->kg_lock);
238 _pq_free(&kseg->kg_schedq.sq_runq);
239 free(kseg);
240 }
241 free_kseg_count = 0;
242
243 /* Free the active KSEGs: */
238 free(kse);
239 }
240 active_kse_count = 0;
241
242 /* Free the free KSEGs: */
243 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
244 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
245 _lock_destroy(&kseg->kg_lock);
246 _pq_free(&kseg->kg_schedq.sq_runq);
247 free(kseg);
248 }
249 free_kseg_count = 0;
250
251 /* Free the active KSEGs: */
244 for (kseg = TAILQ_FIRST(&active_kse_groupq);
245 kseg != NULL; kseg = kseg_next) {
246 kseg_next = TAILQ_NEXT(kseg, kg_qe);
252 while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) {
247 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
248 _lock_destroy(&kseg->kg_lock);
249 _pq_free(&kseg->kg_schedq.sq_runq);
250 free(kseg);
251 }
252 active_kseg_count = 0;
253
254 /* Free the free threads. */

--- 141 unchanged lines hidden (view full) ---

396
397 if (curkse->k_mbx.km_curthread != NULL)
398 PANIC("kse_lock_wait does not disable upcall.\n");
399 /*
400 * Enter a loop to wait until we get the lock.
401 */
402 ts.tv_sec = 0;
403 ts.tv_nsec = 1000000; /* 1 sec */
253 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
254 _lock_destroy(&kseg->kg_lock);
255 _pq_free(&kseg->kg_schedq.sq_runq);
256 free(kseg);
257 }
258 active_kseg_count = 0;
259
260 /* Free the free threads. */

--- 141 unchanged lines hidden (view full) ---

402
403 if (curkse->k_mbx.km_curthread != NULL)
404 PANIC("kse_lock_wait does not disable upcall.\n");
405 /*
406 * Enter a loop to wait until we get the lock.
407 */
408 ts.tv_sec = 0;
409 ts.tv_nsec = 1000000; /* 1 sec */
404 while (_LCK_BUSY(lu)) {
410 while (!_LCK_GRANTED(lu)) {
405 /*
406 * Yield the kse and wait to be notified when the lock
407 * is granted.
408 */
409 saved_flags = curkse->k_mbx.km_flags;
410 curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED;
411 kse_release(&ts);
412 curkse->k_mbx.km_flags = saved_flags;

--- 35 unchanged lines hidden (view full) ---

448{
449 struct pthread *curthread = (struct pthread *)lu->lu_private;
450
451 do {
452 THR_SCHED_LOCK(curthread, curthread);
453 THR_SET_STATE(curthread, PS_LOCKWAIT);
454 THR_SCHED_UNLOCK(curthread, curthread);
455 _thr_sched_switch(curthread);
411 /*
412 * Yield the kse and wait to be notified when the lock
413 * is granted.
414 */
415 saved_flags = curkse->k_mbx.km_flags;
416 curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED;
417 kse_release(&ts);
418 curkse->k_mbx.km_flags = saved_flags;

--- 35 unchanged lines hidden (view full) ---

454{
455 struct pthread *curthread = (struct pthread *)lu->lu_private;
456
457 do {
458 THR_SCHED_LOCK(curthread, curthread);
459 THR_SET_STATE(curthread, PS_LOCKWAIT);
460 THR_SCHED_UNLOCK(curthread, curthread);
461 _thr_sched_switch(curthread);
456 } while _LCK_BUSY(lu);
462 } while (!_LCK_GRANTED(lu));
457}
458
459void
460_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
461{
462 struct pthread *thread;
463 struct pthread *curthread;
464

--- 222 unchanged lines hidden (view full) ---

687
688 /*
689 * This has to do the job of kse_switchout_thread(), only
690 * for a single threaded KSE/KSEG.
691 */
692
693 switch (curthread->state) {
694 case PS_DEAD:
463}
464
465void
466_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
467{
468 struct pthread *thread;
469 struct pthread *curthread;
470

--- 222 unchanged lines hidden (view full) ---

693
694 /*
695 * This has to do the job of kse_switchout_thread(), only
696 * for a single threaded KSE/KSEG.
697 */
698
699 switch (curthread->state) {
700 case PS_DEAD:
695 /* Unlock the scheduling queue and exit the KSE. */
701 /* Unlock the scheduling queue and exit the KSE and thread. */
702 thr_cleaup(curkse, curthread);
696 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
703 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
697 kse_fini(curkse); /* does not return */
698 break;
699
700 case PS_COND_WAIT:
701 case PS_SLEEP_WAIT:
702 /* Only insert threads that can timeout: */
703 if (curthread->wakeup_time.tv_sec != -1) {
704 /* Insert into the waiting queue: */
705 KSE_WAITQ_INSERT(curkse, curthread);
706 }
707 break;
708
709 case PS_LOCKWAIT:
710 level = curthread->locklevel - 1;
704 break;
705
706 case PS_COND_WAIT:
707 case PS_SLEEP_WAIT:
708 /* Only insert threads that can timeout: */
709 if (curthread->wakeup_time.tv_sec != -1) {
710 /* Insert into the waiting queue: */
711 KSE_WAITQ_INSERT(curkse, curthread);
712 }
713 break;
714
715 case PS_LOCKWAIT:
716 level = curthread->locklevel - 1;
711 if (_LCK_BUSY(&curthread->lockusers[level]))
717 if (!_LCK_GRANTED(&curthread->lockusers[level]))
712 KSE_WAITQ_INSERT(curkse, curthread);
713 else
714 THR_SET_STATE(curthread, PS_RUNNING);
715 break;
716
717 case PS_JOIN:
718 case PS_MUTEX_WAIT:
719 case PS_RUNNING:

--- 90 unchanged lines hidden (view full) ---

810
811 /* Lock the scheduling lock. */
812 curthread = curkse->k_curthread;
813 if ((curthread == NULL) || (curthread->need_switchout == 0)) {
814 /* This is an upcall; take the scheduler lock. */
815 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
816 }
817
718 KSE_WAITQ_INSERT(curkse, curthread);
719 else
720 THR_SET_STATE(curthread, PS_RUNNING);
721 break;
722
723 case PS_JOIN:
724 case PS_MUTEX_WAIT:
725 case PS_RUNNING:

--- 90 unchanged lines hidden (view full) ---

816
817 /* Lock the scheduling lock. */
818 curthread = curkse->k_curthread;
819 if ((curthread == NULL) || (curthread->need_switchout == 0)) {
820 /* This is an upcall; take the scheduler lock. */
821 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
822 }
823
824 if (KSE_IS_IDLE(curkse)) {
825 KSE_CLEAR_IDLE(curkse);
826 curkse->k_kseg->kg_idle_kses--;
827 }
818 /*
819 * If the current thread was completed in another KSE, then
820 * it will be in the run queue. Don't mark it as being blocked.
821 */
822 if ((curthread != NULL) &&
823 ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
824 (curthread->need_switchout == 0)) {
825 /*

--- 80 unchanged lines hidden (view full) ---

906 * are assigned to this KSE[G]. For instance, if a scope
907 * system thread were to create a scope process thread
908 * and this kse[g] is the initial kse[g], then that newly
909 * created thread would be assigned to us (the initial
910 * kse[g]).
911 */
912 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
913 kse_fini(curkse);
828 /*
829 * If the current thread was completed in another KSE, then
830 * it will be in the run queue. Don't mark it as being blocked.
831 */
832 if ((curthread != NULL) &&
833 ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
834 (curthread->need_switchout == 0)) {
835 /*

--- 80 unchanged lines hidden (view full) ---

916 * are assigned to this KSE[G]. For instance, if a scope
917 * system thread were to create a scope process thread
918 * and this kse[g] is the initial kse[g], then that newly
919 * created thread would be assigned to us (the initial
920 * kse[g]).
921 */
922 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
923 kse_fini(curkse);
914 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
915 curthread = KSE_RUNQ_FIRST(curkse);
924 /* never returns */
916 }
917
918 THR_ASSERT(curthread != NULL,
919 "Return from kse_wait/fini without thread.");
920 THR_ASSERT(curthread->state != PS_DEAD,
921 "Trying to resume dead thread!");
922 KSE_RUNQ_REMOVE(curkse, curthread);
923

--- 137 unchanged lines hidden (view full) ---

1061 * Clean up a thread. This must be called with the thread's KSE
1062 * scheduling lock held. The thread must be a thread from the
1063 * KSE's group.
1064 */
1065static void
1066thr_cleanup(struct kse *curkse, struct pthread *thread)
1067{
1068 struct pthread *joiner;
925 }
926
927 THR_ASSERT(curthread != NULL,
928 "Return from kse_wait/fini without thread.");
929 THR_ASSERT(curthread->state != PS_DEAD,
930 "Trying to resume dead thread!");
931 KSE_RUNQ_REMOVE(curkse, curthread);
932

--- 137 unchanged lines hidden (view full) ---

1070 * Clean up a thread. This must be called with the thread's KSE
1071 * scheduling lock held. The thread must be a thread from the
1072 * KSE's group.
1073 */
1074static void
1075thr_cleanup(struct kse *curkse, struct pthread *thread)
1076{
1077 struct pthread *joiner;
1078 int sys_scope;
1069
1070 if ((joiner = thread->joiner) != NULL) {
1079
1080 if ((joiner = thread->joiner) != NULL) {
1071 thread->joiner = NULL;
1072 if ((joiner->state == PS_JOIN) &&
1073 (joiner->join_status.thread == thread)) {
1074 joiner->join_status.thread = NULL;
1075
1076 /* Set the return status for the joining thread: */
1077 joiner->join_status.ret = thread->ret;
1078
1079 /* Make the thread runnable. */
1080 if (joiner->kseg == curkse->k_kseg)
1081 /* Joinee scheduler lock held; joiner won't leave. */
1082 if (joiner->kseg == curkse->k_kseg) {
1083 if (joiner->join_status.thread == thread) {
1084 joiner->join_status.thread = NULL;
1085 joiner->join_status.ret = thread->ret;
1081 _thr_setrunnable_unlocked(joiner);
1086 _thr_setrunnable_unlocked(joiner);
1082 else {
1083 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1087 }
1088 } else {
1089 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1090 /* The joiner may have removed itself and exited. */
1091 if (_thr_ref_add(thread, joiner, 0) == 0) {
1084 KSE_SCHED_LOCK(curkse, joiner->kseg);
1092 KSE_SCHED_LOCK(curkse, joiner->kseg);
1085 _thr_setrunnable_unlocked(joiner);
1093 if (joiner->join_status.thread == thread) {
1094 joiner->join_status.thread = NULL;
1095 joiner->join_status.ret = thread->ret;
1096 _thr_setrunnable_unlocked(joiner);
1097 }
1086 KSE_SCHED_UNLOCK(curkse, joiner->kseg);
1098 KSE_SCHED_UNLOCK(curkse, joiner->kseg);
1087 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1099 _thr_ref_delete(thread, joiner);
1088 }
1100 }
1101 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1089 }
1090 thread->attr.flags |= PTHREAD_DETACHED;
1091 }
1092
1102 }
1103 thread->attr.flags |= PTHREAD_DETACHED;
1104 }
1105
1093 if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
1106 if (!(sys_scope = (thread->attr.flags & PTHREAD_SCOPE_SYSTEM))) {
1094 /*
1095 * Remove the thread from the KSEG's list of threads.
1096 */
1097 KSEG_THRQ_REMOVE(thread->kseg, thread);
1098 /*
1099 * Migrate the thread to the main KSE so that this
1100 * KSE and KSEG can be cleaned when their last thread
1101 * exits.

--- 6 unchanged lines hidden (view full) ---

1108 /*
1109 * We can't hold the thread list lock while holding the
1110 * scheduler lock.
1111 */
1112 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1113 DBG_MSG("Adding thread %p to GC list\n", thread);
1114 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1115 THR_GCLIST_ADD(thread);
1107 /*
1108 * Remove the thread from the KSEG's list of threads.
1109 */
1110 KSEG_THRQ_REMOVE(thread->kseg, thread);
1111 /*
1112 * Migrate the thread to the main KSE so that this
1113 * KSE and KSEG can be cleaned when their last thread
1114 * exits.

--- 6 unchanged lines hidden (view full) ---

1121 /*
1122 * We can't hold the thread list lock while holding the
1123 * scheduler lock.
1124 */
1125 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1126 DBG_MSG("Adding thread %p to GC list\n", thread);
1127 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1128 THR_GCLIST_ADD(thread);
1129 /* Use thread_list_lock */
1130 active_threads--;
1131 if (active_threads == 0) {
1132 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1133 exit(0);
1134 }
1116 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1135 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1136 if (sys_scope) {
1137 /*
1138 * System scope thread is single thread group,
1139 * when thread is exited, its kse and ksegrp should
1140 * be recycled as well.
1141 */
1142 kse_exit();
1143 PANIC("kse_exit() failed for system scope thread");
1144 }
1117 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1118}
1119
1120void
1121_thr_gc(struct pthread *curthread)
1122{
1123 struct pthread *td, *td_next;
1124 kse_critical_t crit;
1125 TAILQ_HEAD(, pthread) worklist;
1126
1127 TAILQ_INIT(&worklist);
1128 crit = _kse_critical_enter();
1129 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
1130
1131 /* Check the threads waiting for GC. */
1132 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
1133 td_next = TAILQ_NEXT(td, gcle);
1134 if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
1135 continue;
1145 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1146}
1147
1148void
1149_thr_gc(struct pthread *curthread)
1150{
1151 struct pthread *td, *td_next;
1152 kse_critical_t crit;
1153 TAILQ_HEAD(, pthread) worklist;
1154
1155 TAILQ_INIT(&worklist);
1156 crit = _kse_critical_enter();
1157 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
1158
1159 /* Check the threads waiting for GC. */
1160 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
1161 td_next = TAILQ_NEXT(td, gcle);
1162 if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
1163 continue;
1136#ifdef NOT_YET
1137 else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
1138 (td->kse->k_mbx.km_flags == 0)) {
1164 else if (((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
1165 ((td->kse->k_mbx.km_flags & KMF_DONE) == 0)) {
1139 /*
1140 * The thread and KSE are operating on the same
1141 * stack. Wait for the KSE to exit before freeing
1142 * the thread's stack as well as everything else.
1143 */
1144 continue;
1145 }
1166 /*
1167 * The thread and KSE are operating on the same
1168 * stack. Wait for the KSE to exit before freeing
1169 * the thread's stack as well as everything else.
1170 */
1171 continue;
1172 }
1146#endif
1147 /*
1148 * Remove the thread from the GC list. If the thread
1149 * isn't yet detached, it will get added back to the
1150 * GC list at a later time.
1151 */
1152 THR_GCLIST_REMOVE(td);
1153 DBG_MSG("Freeing thread %p stack\n", td);
1154 /*

--- 13 unchanged lines hidden (view full) ---

1168 }
1169 }
1170 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
1171 _kse_critical_leave(crit);
1172
1173 while ((td = TAILQ_FIRST(&worklist)) != NULL) {
1174 TAILQ_REMOVE(&worklist, td, gcle);
1175
1173 /*
1174 * Remove the thread from the GC list. If the thread
1175 * isn't yet detached, it will get added back to the
1176 * GC list at a later time.
1177 */
1178 THR_GCLIST_REMOVE(td);
1179 DBG_MSG("Freeing thread %p stack\n", td);
1180 /*

--- 13 unchanged lines hidden (view full) ---

1194 }
1195 }
1196 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
1197 _kse_critical_leave(crit);
1198
1199 while ((td = TAILQ_FIRST(&worklist)) != NULL) {
1200 TAILQ_REMOVE(&worklist, td, gcle);
1201
1176 if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
1202 if ((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1177 crit = _kse_critical_enter();
1178 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1179 kse_free_unlocked(td->kse);
1180 kseg_free_unlocked(td->kseg);
1181 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1182 _kse_critical_leave(crit);
1183 }
1184 DBG_MSG("Freeing thread %p\n", td);
1185 _thr_free(curthread, td);
1186 }
1203 crit = _kse_critical_enter();
1204 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1205 kse_free_unlocked(td->kse);
1206 kseg_free_unlocked(td->kseg);
1207 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1208 _kse_critical_leave(crit);
1209 }
1210 DBG_MSG("Freeing thread %p\n", td);
1211 _thr_free(curthread, td);
1212 }
1213 /* XXX free kse and ksegrp list should be looked as well */
1187}
1188
1189
1190/*
1191 * Only new threads that are running or suspended may be scheduled.
1192 */
1193int
1194_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
1195{
1196 struct kse *curkse;
1197 kse_critical_t crit;
1214}
1215
1216
1217/*
1218 * Only new threads that are running or suspended may be scheduled.
1219 */
1220int
1221_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
1222{
1223 struct kse *curkse;
1224 kse_critical_t crit;
1198 int need_start;
1199 int ret;
1200
1225 int ret;
1226
1227 /* Add the new thread. */
1228 thr_link(newthread);
1229
1201 /*
1202 * If this is the first time creating a thread, make sure
1203 * the mailbox is set for the current thread.
1204 */
1205 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1206#ifdef NOT_YET
1207 /* We use the thread's stack as the KSE's stack. */
1208 new_thread->kse->k_mbx.km_stack.ss_sp =
1209 new_thread->attr.stackaddr_attr;
1210 new_thread->kse->k_mbx.km_stack.ss_size =
1211 new_thread->attr.stacksize_attr;
1212#endif
1213 /*
1214 * No need to lock the scheduling queue since the
1215 * KSE/KSEG pair have not yet been started.
1216 */
1217 KSEG_THRQ_ADD(newthread->kseg, newthread);
1230 /*
1231 * If this is the first time creating a thread, make sure
1232 * the mailbox is set for the current thread.
1233 */
1234 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1235#ifdef NOT_YET
1236 /* We use the thread's stack as the KSE's stack. */
1237 new_thread->kse->k_mbx.km_stack.ss_sp =
1238 new_thread->attr.stackaddr_attr;
1239 new_thread->kse->k_mbx.km_stack.ss_size =
1240 new_thread->attr.stacksize_attr;
1241#endif
1242 /*
1243 * No need to lock the scheduling queue since the
1244 * KSE/KSEG pair have not yet been started.
1245 */
1246 KSEG_THRQ_ADD(newthread->kseg, newthread);
1218 TAILQ_INSERT_TAIL(&newthread->kseg->kg_kseq, newthread->kse,
1219 k_kgqe);
1220 newthread->kseg->kg_ksecount = 1;
1221 if (newthread->state == PS_RUNNING)
1222 THR_RUNQ_INSERT_TAIL(newthread);
1223 newthread->kse->k_curthread = NULL;
1224 newthread->kse->k_mbx.km_flags = 0;
1225 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
1226 newthread->kse->k_mbx.km_quantum = 0;
1227
1228 /*
1229 * This thread needs a new KSE and KSEG.
1230 */
1231 crit = _kse_critical_enter();
1232 curkse = _get_curkse();
1233 _ksd_setprivate(&newthread->kse->k_ksd);
1247 if (newthread->state == PS_RUNNING)
1248 THR_RUNQ_INSERT_TAIL(newthread);
1249 newthread->kse->k_curthread = NULL;
1250 newthread->kse->k_mbx.km_flags = 0;
1251 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
1252 newthread->kse->k_mbx.km_quantum = 0;
1253
1254 /*
1255 * This thread needs a new KSE and KSEG.
1256 */
1257 crit = _kse_critical_enter();
1258 curkse = _get_curkse();
1259 _ksd_setprivate(&newthread->kse->k_ksd);
1234 newthread->kse->k_flags |= KF_INITIALIZED;
1260 newthread->kse->k_flags |= KF_INITIALIZED|KF_STARTED;
1235 ret = kse_create(&newthread->kse->k_mbx, 1);
1236 if (ret != 0)
1237 ret = errno;
1238 _ksd_setprivate(&curkse->k_ksd);
1239 _kse_critical_leave(crit);
1240 }
1241 else {
1242 /*
1243 * Lock the KSE and add the new thread to its list of
1244 * assigned threads. If the new thread is runnable, also
1245 * add it to the KSE's run queue.
1246 */
1261 ret = kse_create(&newthread->kse->k_mbx, 1);
1262 if (ret != 0)
1263 ret = errno;
1264 _ksd_setprivate(&curkse->k_ksd);
1265 _kse_critical_leave(crit);
1266 }
1267 else {
1268 /*
1269 * Lock the KSE and add the new thread to its list of
1270 * assigned threads. If the new thread is runnable, also
1271 * add it to the KSE's run queue.
1272 */
1247 need_start = 0;
1248 KSE_SCHED_LOCK(curthread->kse, newthread->kseg);
1249 KSEG_THRQ_ADD(newthread->kseg, newthread);
1250 if (newthread->state == PS_RUNNING)
1251 THR_RUNQ_INSERT_TAIL(newthread);
1252 if ((newthread->kse->k_flags & KF_STARTED) == 0) {
1253 /*
1254 * This KSE hasn't been started yet. Start it
1255 * outside of holding the lock.
1256 */
1257 newthread->kse->k_flags |= KF_STARTED;
1258 newthread->kse->k_mbx.km_func =
1259 (kse_func_t *)kse_sched_multi;
1260 newthread->kse->k_mbx.km_flags = 0;
1273 KSE_SCHED_LOCK(curthread->kse, newthread->kseg);
1274 KSEG_THRQ_ADD(newthread->kseg, newthread);
1275 if (newthread->state == PS_RUNNING)
1276 THR_RUNQ_INSERT_TAIL(newthread);
1277 if ((newthread->kse->k_flags & KF_STARTED) == 0) {
1278 /*
1279 * This KSE hasn't been started yet. Start it
1280 * outside of holding the lock.
1281 */
1282 newthread->kse->k_flags |= KF_STARTED;
1283 newthread->kse->k_mbx.km_func =
1284 (kse_func_t *)kse_sched_multi;
1285 newthread->kse->k_mbx.km_flags = 0;
1261 need_start = 1;
1262 }
1263 KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
1264
1265 if (need_start != 0)
1266 kse_create(&newthread->kse->k_mbx, 0);
1286 kse_create(&newthread->kse->k_mbx, 0);
1267 else if ((newthread->state == PS_RUNNING) &&
1268 KSE_IS_IDLE(newthread->kse)) {
1287 } else if ((newthread->state == PS_RUNNING) &&
1288 KSE_IS_IDLE(newthread->kse)) {
1269 /*
1270 * The thread is being scheduled on another KSEG.
1271 */
1272 kse_wakeup_one(newthread);
1273 }
1289 /*
1290 * The thread is being scheduled on another KSEG.
1291 */
1292 kse_wakeup_one(newthread);
1293 }
1294 KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
1274 ret = 0;
1275 }
1295 ret = 0;
1296 }
1297 if (ret != 0)
1298 thr_unlink(newthread);
1299
1276 return (ret);
1277}
1278
1279void
1280kse_waitq_insert(struct pthread *thread)
1281{
1282 struct pthread *td;
1283

--- 192 unchanged lines hidden (view full) ---

1476
1477 case PS_LOCKWAIT:
1478 /*
1479 * This state doesn't timeout.
1480 */
1481 thread->wakeup_time.tv_sec = -1;
1482 thread->wakeup_time.tv_nsec = -1;
1483 level = thread->locklevel - 1;
1300 return (ret);
1301}
1302
1303void
1304kse_waitq_insert(struct pthread *thread)
1305{
1306 struct pthread *td;
1307

--- 192 unchanged lines hidden (view full) ---

1500
1501 case PS_LOCKWAIT:
1502 /*
1503 * This state doesn't timeout.
1504 */
1505 thread->wakeup_time.tv_sec = -1;
1506 thread->wakeup_time.tv_nsec = -1;
1507 level = thread->locklevel - 1;
1484 if (_LCK_BUSY(&thread->lockusers[level]))
1508 if (!_LCK_GRANTED(&thread->lockusers[level]))
1485 KSE_WAITQ_INSERT(kse, thread);
1486 else
1487 THR_SET_STATE(thread, PS_RUNNING);
1488 break;
1489
1490 case PS_JOIN:
1491 case PS_MUTEX_WAIT:
1492 case PS_SIGSUSPEND:

--- 103 unchanged lines hidden (view full) ---

1596
1597/*
1598 * Avoid calling this kse_exit() so as not to confuse it with the
1599 * system call of the same name.
1600 */
1601static void
1602kse_fini(struct kse *kse)
1603{
1509 KSE_WAITQ_INSERT(kse, thread);
1510 else
1511 THR_SET_STATE(thread, PS_RUNNING);
1512 break;
1513
1514 case PS_JOIN:
1515 case PS_MUTEX_WAIT:
1516 case PS_SIGSUSPEND:

--- 103 unchanged lines hidden (view full) ---

1620
1621/*
1622 * Avoid calling this kse_exit() so as not to confuse it with the
1623 * system call of the same name.
1624 */
1625static void
1626kse_fini(struct kse *kse)
1627{
1628 /* struct kse_group *free_kseg = NULL; */
1604 struct timespec ts;
1629 struct timespec ts;
1605 struct kse_group *free_kseg = NULL;
1606
1630
1607 if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
1608 kse_exit();
1609 /*
1610 * Check to see if this is one of the main kses.
1611 */
1631 /*
1632 * Check to see if this is one of the main kses.
1633 */
1612 else if (kse->k_kseg != _kse_initial->k_kseg) {
1634 if (kse->k_kseg != _kse_initial->k_kseg) {
1635 PANIC("shouldn't get here");
1636 /* This is for supporting thread groups. */
1637#ifdef NOT_YET
1613 /* Remove this KSE from the KSEG's list of KSEs. */
1614 KSE_SCHED_LOCK(kse, kse->k_kseg);
1615 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
1616 kse->k_kseg->kg_ksecount--;
1617 if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
1618 free_kseg = kse->k_kseg;
1619 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1620
1621 /*
1622 * Add this KSE to the list of free KSEs along with
1623 * the KSEG if is now orphaned.
1624 */
1638 /* Remove this KSE from the KSEG's list of KSEs. */
1639 KSE_SCHED_LOCK(kse, kse->k_kseg);
1640 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
1641 kse->k_kseg->kg_ksecount--;
1642 if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
1643 free_kseg = kse->k_kseg;
1644 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1645
1646 /*
1647 * Add this KSE to the list of free KSEs along with
1648 * the KSEG if is now orphaned.
1649 */
1625#ifdef NOT_YET
1626 KSE_LOCK_ACQUIRE(kse, &kse_lock);
1627 if (free_kseg != NULL)
1628 kseg_free_unlocked(free_kseg);
1629 kse_free_unlocked(kse);
1630 KSE_LOCK_RELEASE(kse, &kse_lock);
1650 KSE_LOCK_ACQUIRE(kse, &kse_lock);
1651 if (free_kseg != NULL)
1652 kseg_free_unlocked(free_kseg);
1653 kse_free_unlocked(kse);
1654 KSE_LOCK_RELEASE(kse, &kse_lock);
1631#endif
1632 kse_exit();
1633 /* Never returns. */
1655 kse_exit();
1656 /* Never returns. */
1657 PANIC("kse_exit()");
1658#endif
1634 } else {
1659 } else {
1660#ifdef NOT_YET
1635 /*
1661 /*
1636 * Wait for the last KSE/thread to exit, or for more
1637 * threads to be created (it is possible for additional
1638 * scope process threads to be created after the main
1639 * thread exits).
1662 * In future, we might allow program to kill
1663 * kse in initial group.
1640 */
1664 */
1665 if (kse != _kse_initial) {
1666 KSE_SCHED_LOCK(kse, kse->k_kseg);
1667 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
1668 kse->k_kseg->kg_ksecount--;
1669 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1670 KSE_LOCK_ACQUIRE(kse, &kse_lock);
1671 kse_free_unlocked(kse);
1672 KSE_LOCK_RELEASE(kse, &kse_lock);
1673 kse_exit();
1674 /* Never returns. */
1675 PANIC("kse_exit() failed for initial kseg");
1676 }
1677#endif
1678 KSE_SCHED_LOCK(kse, kse->k_kseg);
1679 KSE_SET_IDLE(kse);
1680 kse->k_kseg->kg_idle_kses++;
1681 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1641 ts.tv_sec = 120;
1642 ts.tv_nsec = 0;
1682 ts.tv_sec = 120;
1683 ts.tv_nsec = 0;
1643 KSE_SET_WAIT(kse);
1644 KSE_SCHED_LOCK(kse, kse->k_kseg);
1645 if ((active_kse_count > 1) &&
1646 (kse->k_kseg->kg_threadcount == 0)) {
1647 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1648 kse_release(&ts);
1649 /* The above never returns. */
1650 }
1651 else
1652 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1653
1654 /* There are no more threads; exit this process: */
1655 if (kse->k_kseg->kg_threadcount == 0) {
1656 /* kse_exit(); */
1657 __isthreaded = 0;
1658 exit(0);
1659 }
1684 kse->k_mbx.km_flags = 0;
1685 kse_release(&ts);
1686 /* Never reach */
1660 }
1661}
1662
1663void
1664_thr_set_timeout(const struct timespec *timeout)
1665{
1666 struct pthread *curthread = _get_curthread();
1667 struct timespec ts;

--- 243 unchanged lines hidden (view full) ---

1911 int need_ksd = 0;
1912 int i;
1913
1914 if ((curthread != NULL) && (free_kse_count > 0)) {
1915 crit = _kse_critical_enter();
1916 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1917 /* Search for a finished KSE. */
1918 kse = TAILQ_FIRST(&free_kseq);
1687 }
1688}
1689
1690void
1691_thr_set_timeout(const struct timespec *timeout)
1692{
1693 struct pthread *curthread = _get_curthread();
1694 struct timespec ts;

--- 243 unchanged lines hidden (view full) ---

1938 int need_ksd = 0;
1939 int i;
1940
1941 if ((curthread != NULL) && (free_kse_count > 0)) {
1942 crit = _kse_critical_enter();
1943 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1944 /* Search for a finished KSE. */
1945 kse = TAILQ_FIRST(&free_kseq);
1919#ifdef NOT_YET
1920#define KEMBX_DONE 0x04
1921 while ((kse != NULL) &&
1946 while ((kse != NULL) &&
1922 ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
1947 ((kse->k_mbx.km_flags & KMF_DONE) == 0)) {
1923 kse = TAILQ_NEXT(kse, k_qe);
1924 }
1948 kse = TAILQ_NEXT(kse, k_qe);
1949 }
1925#undef KEMBX_DONE
1926#endif
1927 if (kse != NULL) {
1950 if (kse != NULL) {
1951 DBG_MSG("found an unused kse.\n");
1928 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1929 free_kse_count--;
1930 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1931 active_kse_count++;
1932 }
1933 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1934 _kse_critical_leave(crit);
1935 if (kse != NULL)

--- 71 unchanged lines hidden (view full) ---

2007 }
2008 }
2009 return (kse);
2010}
2011
2012static void
2013kse_reinit(struct kse *kse)
2014{
1952 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1953 free_kse_count--;
1954 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1955 active_kse_count++;
1956 }
1957 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1958 _kse_critical_leave(crit);
1959 if (kse != NULL)

--- 71 unchanged lines hidden (view full) ---

2031 }
2032 }
2033 return (kse);
2034}
2035
2036static void
2037kse_reinit(struct kse *kse)
2038{
2015 bzero(&kse->k_mbx, sizeof(struct kse_mailbox));
2039 /*
2040 * XXX - For now every kse has its stack.
2041 * In the future, we may want to have it done
2042 * outside the allocation so that scope system
2043 * threads (one thread per KSE) are not required
2044 * to have a stack for an unneeded kse upcall.
2045 */
2046 kse->k_mbx.km_flags = 0;
2016 kse->k_curthread = 0;
2017 kse->k_kseg = 0;
2018 kse->k_schedq = 0;
2019 kse->k_locklevel = 0;
2020 sigemptyset(&kse->k_sigmask);
2021 bzero(&kse->k_sigq, sizeof(kse->k_sigq));
2022 kse->k_check_sigq = 0;
2023 kse->k_flags = 0;
2024 kse->k_waiting = 0;
2047 kse->k_curthread = 0;
2048 kse->k_kseg = 0;
2049 kse->k_schedq = 0;
2050 kse->k_locklevel = 0;
2051 sigemptyset(&kse->k_sigmask);
2052 bzero(&kse->k_sigq, sizeof(kse->k_sigq));
2053 kse->k_check_sigq = 0;
2054 kse->k_flags = 0;
2055 kse->k_waiting = 0;
2056 kse->k_idle = 0;
2025 kse->k_error = 0;
2026 kse->k_cpu = 0;
2027 kse->k_done = 0;
2028}
2029
2030void
2031kse_free_unlocked(struct kse *kse)
2032{
2033 TAILQ_REMOVE(&active_kseq, kse, k_qe);
2034 active_kse_count--;
2035 kse->k_kseg = NULL;
2036 kse->k_mbx.km_quantum = 20000;
2057 kse->k_error = 0;
2058 kse->k_cpu = 0;
2059 kse->k_done = 0;
2060}
2061
2062void
2063kse_free_unlocked(struct kse *kse)
2064{
2065 TAILQ_REMOVE(&active_kseq, kse, k_qe);
2066 active_kse_count--;
2067 kse->k_kseg = NULL;
2068 kse->k_mbx.km_quantum = 20000;
2037 kse->k_flags &= ~KF_INITIALIZED;
2069 kse->k_flags = 0;
2038 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
2039 free_kse_count++;
2040}
2041
2042void
2043_kse_free(struct pthread *curthread, struct kse *kse)
2044{
2045 kse_critical_t crit;

--- 78 unchanged lines hidden (view full) ---

2124 crit = _kse_critical_enter();
2125 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
2126 TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
2127 free_thread_count++;
2128 KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
2129 _kse_critical_leave(crit);
2130 }
2131}
2070 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
2071 free_kse_count++;
2072}
2073
2074void
2075_kse_free(struct pthread *curthread, struct kse *kse)
2076{
2077 kse_critical_t crit;

--- 78 unchanged lines hidden (view full) ---

2156 crit = _kse_critical_enter();
2157 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
2158 TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
2159 free_thread_count++;
2160 KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
2161 _kse_critical_leave(crit);
2162 }
2163}
2164
2165/*
2166 * Add an active thread:
2167 *
2168 * o Assign the thread a unique id (which GDB uses to track
2169 * threads.
2170 * o Add the thread to the list of all threads and increment
2171 * number of active threads.
2172 */
2173static void
2174thr_link(struct pthread *thread)
2175{
2176 kse_critical_t crit;
2177 struct kse *curkse;
2178
2179 crit = _kse_critical_enter();
2180 curkse = _get_curkse();
2181
2182 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
2183 /*
2184 * Initialize the unique id (which GDB uses to track
2185 * threads), add the thread to the list of all threads,
2186 * and
2187 */
2188 thread->uniqueid = next_uniqueid++;
2189 THR_LIST_ADD(thread);
2190 active_threads++;
2191 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2192
2193 _kse_critical_leave(crit);
2194}
2195
2196/*
2197 * Remove an active thread.
2198 */
2199static void
2200thr_unlink(struct pthread *thread)
2201{
2202 kse_critical_t crit;
2203 struct kse *curkse;
2204
2205 crit = _kse_critical_enter();
2206 curkse = _get_curkse();
2207
2208 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
2209 THR_LIST_REMOVE(thread);
2210 active_threads--;
2211 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
2212
2213 _kse_critical_leave(crit);
2214}