Deleted Added
sdiff udiff text old ( 115173 ) new ( 115278 )
full compact
1/*
2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org>
4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 */
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115173 2003-05-19 23:04:50Z deischen $");
37
38#include <sys/types.h>
39#include <sys/kse.h>
40#include <sys/signalvar.h>
41#include <sys/queue.h>
42#include <machine/atomic.h>
43
44#include <assert.h>

--- 47 unchanged lines hidden (view full) ---

92#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
93 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
94#define KSE_RUNQ_REMOVE(kse, thrd) \
95 _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
96#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq)
97
98#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
99
100/*
101 * We've got to keep track of everything that is allocated, not only
102 * to have a speedy free list, but also so they can be deallocated
103 * after a fork().
104 */
105static TAILQ_HEAD(, kse) active_kseq;
106static TAILQ_HEAD(, kse) free_kseq;
107static TAILQ_HEAD(, kse_group) free_kse_groupq;
108static TAILQ_HEAD(, kse_group) active_kse_groupq;
109static TAILQ_HEAD(, kse_group) gc_ksegq;
110static struct lock kse_lock; /* also used for kseg queue */
111static int free_kse_count = 0;
112static int free_kseg_count = 0;
113static TAILQ_HEAD(, pthread) free_threadq;
114static struct lock thread_lock;
115static int free_thread_count = 0;
116static int inited = 0;
117static int active_kse_count = 0;
118static int active_kseg_count = 0;
119
120#ifdef DEBUG_THREAD_KERN
121static void dump_queues(struct kse *curkse);
122#endif
123static void kse_check_completed(struct kse *kse);
124static void kse_check_waitq(struct kse *kse);
125static void kse_check_signals(struct kse *kse);
126static void kse_fini(struct kse *curkse);
127static void kse_reinit(struct kse *kse);

--- 6 unchanged lines hidden (view full) ---

134static void kse_free_unlocked(struct kse *kse);
135static void kseg_free_unlocked(struct kse_group *kseg);
136static void kseg_init(struct kse_group *kseg);
137static void kseg_reinit(struct kse_group *kseg);
138static void kse_waitq_insert(struct pthread *thread);
139static void kse_wakeup_multi(struct kse *curkse);
140static void kse_wakeup_one(struct pthread *thread);
141static void thr_cleanup(struct kse *kse, struct pthread *curthread);
142static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2,
143 ucontext_t *ucp);
144static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
145 struct pthread_sigframe *psf);
146static int thr_timedout(struct pthread *thread, struct timespec *curtime);
147
148/*
149 * This is called after a fork().
150 * No locks need to be taken here since we are guaranteed to be
151 * single threaded.
152 */
153void
154_kse_single_thread(struct pthread *curthread)
155{
156 struct kse *kse, *kse_next;
157 struct kse_group *kseg, *kseg_next;
158 struct pthread *thread, *thread_next;
159 kse_critical_t crit;
160 int i;
161
162 /*
163 * Disable upcalls and clear the threaded flag.
164 * XXX - I don't think we need to disable upcalls after a fork().
165 * but it doesn't hurt.
166 */
167 crit = _kse_critical_enter();
168 __isthreaded = 0;
169
170 /*
171 * Enter a loop to remove and free all threads other than
172 * the running thread from the active thread list:
173 */
174 for (thread = TAILQ_FIRST(&_thread_list); thread != NULL;
175 thread = thread_next) {
176 /*
177 * Advance to the next thread before the destroying
178 * the current thread.
179 */
180 thread_next = TAILQ_NEXT(thread, tle);
181
182 /*
183 * Remove this thread from the list (the current
184 * thread will be removed but re-added by libpthread
185 * initialization.
186 */
187 TAILQ_REMOVE(&_thread_list, thread, tle);
188 /* Make sure this isn't the running thread: */
189 if (thread != curthread) {
190 _thr_stack_free(&thread->attr);

--- 4 unchanged lines hidden (view full) ---

195 }
196 _lock_destroy(&thread->lock);
197 free(thread);
198 }
199 }
200
201 TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
202 curthread->joiner = NULL; /* no joining threads yet */
203 sigemptyset(&curthread->sigpend); /* clear pending signals */
204 if (curthread->specific != NULL) {
205 free(curthread->specific);
206 curthread->specific = NULL;
207 curthread->specific_data_count = 0;
208 }
209
210 /* Free the free KSEs: */
211 while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
212 TAILQ_REMOVE(&free_kseq, kse, k_qe);
213 _ksd_destroy(&kse->k_ksd);
214 if (kse->k_stack.ss_sp != NULL)
215 free(kse->k_stack.ss_sp);
216 free(kse);
217 }
218 free_kse_count = 0;
219
220 /* Free the active KSEs: */
221 for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) {
222 kse_next = TAILQ_NEXT(kse, k_qe);
223 TAILQ_REMOVE(&active_kseq, kse, k_qe);
224 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
225 _lockuser_destroy(&kse->k_lockusers[i]);
226 }
227 if (kse->k_stack.ss_sp != NULL)
228 free(kse->k_stack.ss_sp);
229 _lock_destroy(&kse->k_lock);
230 free(kse);
231 }
232 active_kse_count = 0;
233
234 /* Free the free KSEGs: */
235 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
236 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
237 _lock_destroy(&kseg->kg_lock);
238 _pq_free(&kseg->kg_schedq.sq_runq);
239 free(kseg);
240 }
241 free_kseg_count = 0;
242
243 /* Free the active KSEGs: */
244 for (kseg = TAILQ_FIRST(&active_kse_groupq);
245 kseg != NULL; kseg = kseg_next) {
246 kseg_next = TAILQ_NEXT(kseg, kg_qe);
247 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
248 _lock_destroy(&kseg->kg_lock);
249 _pq_free(&kseg->kg_schedq.sq_runq);
250 free(kseg);
251 }
252 active_kseg_count = 0;
253
254 /* Free the free threads. */

--- 141 unchanged lines hidden (view full) ---

396
397 if (curkse->k_mbx.km_curthread != NULL)
398 PANIC("kse_lock_wait does not disable upcall.\n");
399 /*
400 * Enter a loop to wait until we get the lock.
401 */
402 ts.tv_sec = 0;
403 ts.tv_nsec = 1000000; /* 1 sec */
404 while (_LCK_BUSY(lu)) {
405 /*
406 * Yield the kse and wait to be notified when the lock
407 * is granted.
408 */
409 saved_flags = curkse->k_mbx.km_flags;
410 curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED;
411 kse_release(&ts);
412 curkse->k_mbx.km_flags = saved_flags;

--- 35 unchanged lines hidden (view full) ---

448{
449 struct pthread *curthread = (struct pthread *)lu->lu_private;
450
451 do {
452 THR_SCHED_LOCK(curthread, curthread);
453 THR_SET_STATE(curthread, PS_LOCKWAIT);
454 THR_SCHED_UNLOCK(curthread, curthread);
455 _thr_sched_switch(curthread);
456 } while _LCK_BUSY(lu);
457}
458
459void
460_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
461{
462 struct pthread *thread;
463 struct pthread *curthread;
464

--- 222 unchanged lines hidden (view full) ---

687
688 /*
689 * This has to do the job of kse_switchout_thread(), only
690 * for a single threaded KSE/KSEG.
691 */
692
693 switch (curthread->state) {
694 case PS_DEAD:
695 /* Unlock the scheduling queue and exit the KSE. */
696 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
697 kse_fini(curkse); /* does not return */
698 break;
699
700 case PS_COND_WAIT:
701 case PS_SLEEP_WAIT:
702 /* Only insert threads that can timeout: */
703 if (curthread->wakeup_time.tv_sec != -1) {
704 /* Insert into the waiting queue: */
705 KSE_WAITQ_INSERT(curkse, curthread);
706 }
707 break;
708
709 case PS_LOCKWAIT:
710 level = curthread->locklevel - 1;
711 if (_LCK_BUSY(&curthread->lockusers[level]))
712 KSE_WAITQ_INSERT(curkse, curthread);
713 else
714 THR_SET_STATE(curthread, PS_RUNNING);
715 break;
716
717 case PS_JOIN:
718 case PS_MUTEX_WAIT:
719 case PS_RUNNING:

--- 90 unchanged lines hidden (view full) ---

810
811 /* Lock the scheduling lock. */
812 curthread = curkse->k_curthread;
813 if ((curthread == NULL) || (curthread->need_switchout == 0)) {
814 /* This is an upcall; take the scheduler lock. */
815 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
816 }
817
818 /*
819 * If the current thread was completed in another KSE, then
820 * it will be in the run queue. Don't mark it as being blocked.
821 */
822 if ((curthread != NULL) &&
823 ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
824 (curthread->need_switchout == 0)) {
825 /*

--- 80 unchanged lines hidden (view full) ---

906 * are assigned to this KSE[G]. For instance, if a scope
907 * system thread were to create a scope process thread
908 * and this kse[g] is the initial kse[g], then that newly
909 * created thread would be assigned to us (the initial
910 * kse[g]).
911 */
912 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
913 kse_fini(curkse);
914 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
915 curthread = KSE_RUNQ_FIRST(curkse);
916 }
917
918 THR_ASSERT(curthread != NULL,
919 "Return from kse_wait/fini without thread.");
920 THR_ASSERT(curthread->state != PS_DEAD,
921 "Trying to resume dead thread!");
922 KSE_RUNQ_REMOVE(curkse, curthread);
923

--- 137 unchanged lines hidden (view full) ---

1061 * Clean up a thread. This must be called with the thread's KSE
1062 * scheduling lock held. The thread must be a thread from the
1063 * KSE's group.
1064 */
1065static void
1066thr_cleanup(struct kse *curkse, struct pthread *thread)
1067{
1068 struct pthread *joiner;
1069
1070 if ((joiner = thread->joiner) != NULL) {
1071 thread->joiner = NULL;
1072 if ((joiner->state == PS_JOIN) &&
1073 (joiner->join_status.thread == thread)) {
1074 joiner->join_status.thread = NULL;
1075
1076 /* Set the return status for the joining thread: */
1077 joiner->join_status.ret = thread->ret;
1078
1079 /* Make the thread runnable. */
1080 if (joiner->kseg == curkse->k_kseg)
1081 _thr_setrunnable_unlocked(joiner);
1082 else {
1083 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1084 KSE_SCHED_LOCK(curkse, joiner->kseg);
1085 _thr_setrunnable_unlocked(joiner);
1086 KSE_SCHED_UNLOCK(curkse, joiner->kseg);
1087 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1088 }
1089 }
1090 thread->attr.flags |= PTHREAD_DETACHED;
1091 }
1092
1093 if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
1094 /*
1095 * Remove the thread from the KSEG's list of threads.
1096 */
1097 KSEG_THRQ_REMOVE(thread->kseg, thread);
1098 /*
1099 * Migrate the thread to the main KSE so that this
1100 * KSE and KSEG can be cleaned when their last thread
1101 * exits.

--- 6 unchanged lines hidden (view full) ---

1108 /*
1109 * We can't hold the thread list lock while holding the
1110 * scheduler lock.
1111 */
1112 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
1113 DBG_MSG("Adding thread %p to GC list\n", thread);
1114 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
1115 THR_GCLIST_ADD(thread);
1116 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
1117 KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1118}
1119
1120void
1121_thr_gc(struct pthread *curthread)
1122{
1123 struct pthread *td, *td_next;
1124 kse_critical_t crit;
1125 TAILQ_HEAD(, pthread) worklist;
1126
1127 TAILQ_INIT(&worklist);
1128 crit = _kse_critical_enter();
1129 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock);
1130
1131 /* Check the threads waiting for GC. */
1132 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
1133 td_next = TAILQ_NEXT(td, gcle);
1134 if ((td->flags & THR_FLAGS_GC_SAFE) == 0)
1135 continue;
1136#ifdef NOT_YET
1137 else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
1138 (td->kse->k_mbx.km_flags == 0)) {
1139 /*
1140 * The thread and KSE are operating on the same
1141 * stack. Wait for the KSE to exit before freeing
1142 * the thread's stack as well as everything else.
1143 */
1144 continue;
1145 }
1146#endif
1147 /*
1148 * Remove the thread from the GC list. If the thread
1149 * isn't yet detached, it will get added back to the
1150 * GC list at a later time.
1151 */
1152 THR_GCLIST_REMOVE(td);
1153 DBG_MSG("Freeing thread %p stack\n", td);
1154 /*

--- 13 unchanged lines hidden (view full) ---

1168 }
1169 }
1170 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock);
1171 _kse_critical_leave(crit);
1172
1173 while ((td = TAILQ_FIRST(&worklist)) != NULL) {
1174 TAILQ_REMOVE(&worklist, td, gcle);
1175
1176 if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
1177 crit = _kse_critical_enter();
1178 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1179 kse_free_unlocked(td->kse);
1180 kseg_free_unlocked(td->kseg);
1181 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1182 _kse_critical_leave(crit);
1183 }
1184 DBG_MSG("Freeing thread %p\n", td);
1185 _thr_free(curthread, td);
1186 }
1187}
1188
1189
1190/*
1191 * Only new threads that are running or suspended may be scheduled.
1192 */
1193int
1194_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
1195{
1196 struct kse *curkse;
1197 kse_critical_t crit;
1198 int need_start;
1199 int ret;
1200
1201 /*
1202 * If this is the first time creating a thread, make sure
1203 * the mailbox is set for the current thread.
1204 */
1205 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1206#ifdef NOT_YET
1207 /* We use the thread's stack as the KSE's stack. */
1208 new_thread->kse->k_mbx.km_stack.ss_sp =
1209 new_thread->attr.stackaddr_attr;
1210 new_thread->kse->k_mbx.km_stack.ss_size =
1211 new_thread->attr.stacksize_attr;
1212#endif
1213 /*
1214 * No need to lock the scheduling queue since the
1215 * KSE/KSEG pair have not yet been started.
1216 */
1217 KSEG_THRQ_ADD(newthread->kseg, newthread);
1218 TAILQ_INSERT_TAIL(&newthread->kseg->kg_kseq, newthread->kse,
1219 k_kgqe);
1220 newthread->kseg->kg_ksecount = 1;
1221 if (newthread->state == PS_RUNNING)
1222 THR_RUNQ_INSERT_TAIL(newthread);
1223 newthread->kse->k_curthread = NULL;
1224 newthread->kse->k_mbx.km_flags = 0;
1225 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi;
1226 newthread->kse->k_mbx.km_quantum = 0;
1227
1228 /*
1229 * This thread needs a new KSE and KSEG.
1230 */
1231 crit = _kse_critical_enter();
1232 curkse = _get_curkse();
1233 _ksd_setprivate(&newthread->kse->k_ksd);
1234 newthread->kse->k_flags |= KF_INITIALIZED;
1235 ret = kse_create(&newthread->kse->k_mbx, 1);
1236 if (ret != 0)
1237 ret = errno;
1238 _ksd_setprivate(&curkse->k_ksd);
1239 _kse_critical_leave(crit);
1240 }
1241 else {
1242 /*
1243 * Lock the KSE and add the new thread to its list of
1244 * assigned threads. If the new thread is runnable, also
1245 * add it to the KSE's run queue.
1246 */
1247 need_start = 0;
1248 KSE_SCHED_LOCK(curthread->kse, newthread->kseg);
1249 KSEG_THRQ_ADD(newthread->kseg, newthread);
1250 if (newthread->state == PS_RUNNING)
1251 THR_RUNQ_INSERT_TAIL(newthread);
1252 if ((newthread->kse->k_flags & KF_STARTED) == 0) {
1253 /*
1254 * This KSE hasn't been started yet. Start it
1255 * outside of holding the lock.
1256 */
1257 newthread->kse->k_flags |= KF_STARTED;
1258 newthread->kse->k_mbx.km_func =
1259 (kse_func_t *)kse_sched_multi;
1260 newthread->kse->k_mbx.km_flags = 0;
1261 need_start = 1;
1262 }
1263 KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
1264
1265 if (need_start != 0)
1266 kse_create(&newthread->kse->k_mbx, 0);
1267 else if ((newthread->state == PS_RUNNING) &&
1268 KSE_IS_IDLE(newthread->kse)) {
1269 /*
1270 * The thread is being scheduled on another KSEG.
1271 */
1272 kse_wakeup_one(newthread);
1273 }
1274 ret = 0;
1275 }
1276 return (ret);
1277}
1278
1279void
1280kse_waitq_insert(struct pthread *thread)
1281{
1282 struct pthread *td;
1283

--- 192 unchanged lines hidden (view full) ---

1476
1477 case PS_LOCKWAIT:
1478 /*
1479 * This state doesn't timeout.
1480 */
1481 thread->wakeup_time.tv_sec = -1;
1482 thread->wakeup_time.tv_nsec = -1;
1483 level = thread->locklevel - 1;
1484 if (_LCK_BUSY(&thread->lockusers[level]))
1485 KSE_WAITQ_INSERT(kse, thread);
1486 else
1487 THR_SET_STATE(thread, PS_RUNNING);
1488 break;
1489
1490 case PS_JOIN:
1491 case PS_MUTEX_WAIT:
1492 case PS_SIGSUSPEND:

--- 103 unchanged lines hidden (view full) ---

1596
1597/*
1598 * Avoid calling this kse_exit() so as not to confuse it with the
1599 * system call of the same name.
1600 */
1601static void
1602kse_fini(struct kse *kse)
1603{
1604 struct timespec ts;
1605 struct kse_group *free_kseg = NULL;
1606
1607 if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
1608 kse_exit();
1609 /*
1610 * Check to see if this is one of the main kses.
1611 */
1612 else if (kse->k_kseg != _kse_initial->k_kseg) {
1613 /* Remove this KSE from the KSEG's list of KSEs. */
1614 KSE_SCHED_LOCK(kse, kse->k_kseg);
1615 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
1616 kse->k_kseg->kg_ksecount--;
1617 if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
1618 free_kseg = kse->k_kseg;
1619 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1620
1621 /*
1622 * Add this KSE to the list of free KSEs along with
1623 * the KSEG if is now orphaned.
1624 */
1625#ifdef NOT_YET
1626 KSE_LOCK_ACQUIRE(kse, &kse_lock);
1627 if (free_kseg != NULL)
1628 kseg_free_unlocked(free_kseg);
1629 kse_free_unlocked(kse);
1630 KSE_LOCK_RELEASE(kse, &kse_lock);
1631#endif
1632 kse_exit();
1633 /* Never returns. */
1634 } else {
1635 /*
1636 * Wait for the last KSE/thread to exit, or for more
1637 * threads to be created (it is possible for additional
1638 * scope process threads to be created after the main
1639 * thread exits).
1640 */
1641 ts.tv_sec = 120;
1642 ts.tv_nsec = 0;
1643 KSE_SET_WAIT(kse);
1644 KSE_SCHED_LOCK(kse, kse->k_kseg);
1645 if ((active_kse_count > 1) &&
1646 (kse->k_kseg->kg_threadcount == 0)) {
1647 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1648 kse_release(&ts);
1649 /* The above never returns. */
1650 }
1651 else
1652 KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1653
1654 /* There are no more threads; exit this process: */
1655 if (kse->k_kseg->kg_threadcount == 0) {
1656 /* kse_exit(); */
1657 __isthreaded = 0;
1658 exit(0);
1659 }
1660 }
1661}
1662
1663void
1664_thr_set_timeout(const struct timespec *timeout)
1665{
1666 struct pthread *curthread = _get_curthread();
1667 struct timespec ts;

--- 243 unchanged lines hidden (view full) ---

1911 int need_ksd = 0;
1912 int i;
1913
1914 if ((curthread != NULL) && (free_kse_count > 0)) {
1915 crit = _kse_critical_enter();
1916 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
1917 /* Search for a finished KSE. */
1918 kse = TAILQ_FIRST(&free_kseq);
1919#ifdef NOT_YET
1920#define KEMBX_DONE 0x04
1921 while ((kse != NULL) &&
1922 ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
1923 kse = TAILQ_NEXT(kse, k_qe);
1924 }
1925#undef KEMBX_DONE
1926#endif
1927 if (kse != NULL) {
1928 TAILQ_REMOVE(&free_kseq, kse, k_qe);
1929 free_kse_count--;
1930 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
1931 active_kse_count++;
1932 }
1933 KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
1934 _kse_critical_leave(crit);
1935 if (kse != NULL)

--- 71 unchanged lines hidden (view full) ---

2007 }
2008 }
2009 return (kse);
2010}
2011
2012static void
2013kse_reinit(struct kse *kse)
2014{
2015 bzero(&kse->k_mbx, sizeof(struct kse_mailbox));
2016 kse->k_curthread = 0;
2017 kse->k_kseg = 0;
2018 kse->k_schedq = 0;
2019 kse->k_locklevel = 0;
2020 sigemptyset(&kse->k_sigmask);
2021 bzero(&kse->k_sigq, sizeof(kse->k_sigq));
2022 kse->k_check_sigq = 0;
2023 kse->k_flags = 0;
2024 kse->k_waiting = 0;
2025 kse->k_error = 0;
2026 kse->k_cpu = 0;
2027 kse->k_done = 0;
2028}
2029
2030void
2031kse_free_unlocked(struct kse *kse)
2032{
2033 TAILQ_REMOVE(&active_kseq, kse, k_qe);
2034 active_kse_count--;
2035 kse->k_kseg = NULL;
2036 kse->k_mbx.km_quantum = 20000;
2037 kse->k_flags &= ~KF_INITIALIZED;
2038 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe);
2039 free_kse_count++;
2040}
2041
2042void
2043_kse_free(struct pthread *curthread, struct kse *kse)
2044{
2045 kse_critical_t crit;

--- 78 unchanged lines hidden (view full) ---

2124 crit = _kse_critical_enter();
2125 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
2126 TAILQ_INSERT_HEAD(&free_threadq, thread, tle);
2127 free_thread_count++;
2128 KSE_LOCK_RELEASE(curthread->kse, &thread_lock);
2129 _kse_critical_leave(crit);
2130 }
2131}