1/* 2 * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org> 3 * Copyright (C) 2002 Jonathon Mini <mini@freebsd.org> 4 * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions --- 19 unchanged lines hidden (view full) --- 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 */ 35#include <sys/cdefs.h> |
36__FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115278 2003-05-24 02:29:25Z deischen $"); |
37 38#include <sys/types.h> 39#include <sys/kse.h> 40#include <sys/signalvar.h> 41#include <sys/queue.h> 42#include <machine/atomic.h> 43 44#include <assert.h> --- 47 unchanged lines hidden (view full) --- 92#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \ 93 _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd) 94#define KSE_RUNQ_REMOVE(kse, thrd) \ 95 _pq_remove(&(kse)->k_schedq->sq_runq, thrd) 96#define KSE_RUNQ_FIRST(kse) _pq_first(&(kse)->k_schedq->sq_runq) 97 98#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads) 99 |
100#ifndef KMF_DONE 101#define KMF_DONE 0x04 102#endif 103 |
104/* 105 * We've got to keep track of everything that is allocated, not only 106 * to have a speedy free list, but also so they can be deallocated 107 * after a fork(). 108 */ 109static TAILQ_HEAD(, kse) active_kseq; 110static TAILQ_HEAD(, kse) free_kseq; 111static TAILQ_HEAD(, kse_group) free_kse_groupq; 112static TAILQ_HEAD(, kse_group) active_kse_groupq; 113static TAILQ_HEAD(, kse_group) gc_ksegq; 114static struct lock kse_lock; /* also used for kseg queue */ 115static int free_kse_count = 0; 116static int free_kseg_count = 0; 117static TAILQ_HEAD(, pthread) free_threadq; 118static struct lock thread_lock; 119static int free_thread_count = 0; 120static int inited = 0; |
121static int active_threads = 1; |
122static int active_kse_count = 0; 123static int active_kseg_count = 0; |
124static u_int64_t next_uniqueid = 1; |
125 |
126 |
127#ifdef DEBUG_THREAD_KERN 128static void dump_queues(struct kse *curkse); 129#endif 130static void kse_check_completed(struct kse *kse); 131static void kse_check_waitq(struct kse *kse); 132static void kse_check_signals(struct kse *kse); 133static void kse_fini(struct kse *curkse); 134static void kse_reinit(struct kse *kse); --- 6 unchanged lines hidden (view full) --- 141static void kse_free_unlocked(struct kse *kse); 142static void kseg_free_unlocked(struct kse_group *kseg); 143static void kseg_init(struct kse_group *kseg); 144static void kseg_reinit(struct kse_group *kseg); 145static void kse_waitq_insert(struct pthread *thread); 146static void kse_wakeup_multi(struct kse *curkse); 147static void kse_wakeup_one(struct pthread *thread); 148static void thr_cleanup(struct kse *kse, struct pthread *curthread); |
149static void thr_link(struct pthread *thread); |
150static void thr_resume_wrapper(int unused_1, siginfo_t *unused_2, 151 ucontext_t *ucp); 152static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp, 153 struct pthread_sigframe *psf); 154static int thr_timedout(struct pthread *thread, struct timespec *curtime); |
155static void thr_unlink(struct pthread *thread); |
156 157/* 158 * This is called after a fork(). 159 * No locks need to be taken here since we are guaranteed to be 160 * single threaded. 161 */ 162void 163_kse_single_thread(struct pthread *curthread) 164{ |
165 struct kse *kse; 166 struct kse_group *kseg; 167 struct pthread *thread; |
168 kse_critical_t crit; 169 int i; 170 171 /* 172 * Disable upcalls and clear the threaded flag. 173 * XXX - I don't think we need to disable upcalls after a fork(). 174 * but it doesn't hurt. 175 */ 176 crit = _kse_critical_enter(); 177 __isthreaded = 0; |
178 active_threads = 1; |
179 180 /* 181 * Enter a loop to remove and free all threads other than 182 * the running thread from the active thread list: 183 */ |
184 while ((thread = TAILQ_FIRST(&_thread_list)) != NULL) { 185 THR_GCLIST_REMOVE(thread); |
186 /* |
187 * Remove this thread from the list (the current 188 * thread will be removed but re-added by libpthread 189 * initialization. 190 */ 191 TAILQ_REMOVE(&_thread_list, thread, tle); 192 /* Make sure this isn't the running thread: */ 193 if (thread != curthread) { 194 _thr_stack_free(&thread->attr); --- 4 unchanged lines hidden (view full) --- 199 } 200 _lock_destroy(&thread->lock); 201 free(thread); 202 } 203 } 204 205 TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */ 206 curthread->joiner = NULL; /* no joining threads yet */ |
207 curthread->refcount = 0; |
208 sigemptyset(&curthread->sigpend); /* clear pending signals */ 209 if (curthread->specific != NULL) { 210 free(curthread->specific); 211 curthread->specific = NULL; 212 curthread->specific_data_count = 0; 213 } 214 215 /* Free the free KSEs: */ 216 while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) { 217 TAILQ_REMOVE(&free_kseq, kse, k_qe); |
218 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { 219 _lockuser_destroy(&kse->k_lockusers[i]); 220 } 221 _lock_destroy(&kse->k_lock); |
222 _ksd_destroy(&kse->k_ksd); 223 if (kse->k_stack.ss_sp != NULL) 224 free(kse->k_stack.ss_sp); 225 free(kse); 226 } 227 free_kse_count = 0; 228 229 /* Free the active KSEs: */ |
230 while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) { |
231 TAILQ_REMOVE(&active_kseq, kse, k_qe); 232 for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { 233 _lockuser_destroy(&kse->k_lockusers[i]); 234 } |
235 _lock_destroy(&kse->k_lock); |
236 if (kse->k_stack.ss_sp != NULL) 237 free(kse->k_stack.ss_sp); |
238 free(kse); 239 } 240 active_kse_count = 0; 241 242 /* Free the free KSEGs: */ 243 while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) { 244 TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe); 245 _lock_destroy(&kseg->kg_lock); 246 _pq_free(&kseg->kg_schedq.sq_runq); 247 free(kseg); 248 } 249 free_kseg_count = 0; 250 251 /* Free the active KSEGs: */ |
252 while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) { |
253 TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe); 254 _lock_destroy(&kseg->kg_lock); 255 _pq_free(&kseg->kg_schedq.sq_runq); 256 free(kseg); 257 } 258 active_kseg_count = 0; 259 260 /* Free the free threads. */ --- 141 unchanged lines hidden (view full) --- 402 403 if (curkse->k_mbx.km_curthread != NULL) 404 PANIC("kse_lock_wait does not disable upcall.\n"); 405 /* 406 * Enter a loop to wait until we get the lock. 407 */ 408 ts.tv_sec = 0; 409 ts.tv_nsec = 1000000; /* 1 sec */ |
410 while (!_LCK_GRANTED(lu)) { |
411 /* 412 * Yield the kse and wait to be notified when the lock 413 * is granted. 414 */ 415 saved_flags = curkse->k_mbx.km_flags; 416 curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED; 417 kse_release(&ts); 418 curkse->k_mbx.km_flags = saved_flags; --- 35 unchanged lines hidden (view full) --- 454{ 455 struct pthread *curthread = (struct pthread *)lu->lu_private; 456 457 do { 458 THR_SCHED_LOCK(curthread, curthread); 459 THR_SET_STATE(curthread, PS_LOCKWAIT); 460 THR_SCHED_UNLOCK(curthread, curthread); 461 _thr_sched_switch(curthread); |
462 } while (!_LCK_GRANTED(lu)); |
463} 464 465void 466_thr_lock_wakeup(struct lock *lock, struct lockuser *lu) 467{ 468 struct pthread *thread; 469 struct pthread *curthread; 470 --- 222 unchanged lines hidden (view full) --- 693 694 /* 695 * This has to do the job of kse_switchout_thread(), only 696 * for a single threaded KSE/KSEG. 697 */ 698 699 switch (curthread->state) { 700 case PS_DEAD: |
701 /* Unlock the scheduling queue and exit the KSE and thread. */ 702 thr_cleaup(curkse, curthread); |
703 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); |
704 break; 705 706 case PS_COND_WAIT: 707 case PS_SLEEP_WAIT: 708 /* Only insert threads that can timeout: */ 709 if (curthread->wakeup_time.tv_sec != -1) { 710 /* Insert into the waiting queue: */ 711 KSE_WAITQ_INSERT(curkse, curthread); 712 } 713 break; 714 715 case PS_LOCKWAIT: 716 level = curthread->locklevel - 1; |
717 if (!_LCK_GRANTED(&curthread->lockusers[level])) |
718 KSE_WAITQ_INSERT(curkse, curthread); 719 else 720 THR_SET_STATE(curthread, PS_RUNNING); 721 break; 722 723 case PS_JOIN: 724 case PS_MUTEX_WAIT: 725 case PS_RUNNING: --- 90 unchanged lines hidden (view full) --- 816 817 /* Lock the scheduling lock. */ 818 curthread = curkse->k_curthread; 819 if ((curthread == NULL) || (curthread->need_switchout == 0)) { 820 /* This is an upcall; take the scheduler lock. */ 821 KSE_SCHED_LOCK(curkse, curkse->k_kseg); 822 } 823 |
824 if (KSE_IS_IDLE(curkse)) { 825 KSE_CLEAR_IDLE(curkse); 826 curkse->k_kseg->kg_idle_kses--; 827 } |
828 /* 829 * If the current thread was completed in another KSE, then 830 * it will be in the run queue. Don't mark it as being blocked. 831 */ 832 if ((curthread != NULL) && 833 ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) && 834 (curthread->need_switchout == 0)) { 835 /* --- 80 unchanged lines hidden (view full) --- 916 * are assigned to this KSE[G]. For instance, if a scope 917 * system thread were to create a scope process thread 918 * and this kse[g] is the initial kse[g], then that newly 919 * created thread would be assigned to us (the initial 920 * kse[g]). 921 */ 922 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); 923 kse_fini(curkse); |
924 /* never returns */ |
925 } 926 927 THR_ASSERT(curthread != NULL, 928 "Return from kse_wait/fini without thread."); 929 THR_ASSERT(curthread->state != PS_DEAD, 930 "Trying to resume dead thread!"); 931 KSE_RUNQ_REMOVE(curkse, curthread); 932 --- 137 unchanged lines hidden (view full) --- 1070 * Clean up a thread. This must be called with the thread's KSE 1071 * scheduling lock held. The thread must be a thread from the 1072 * KSE's group. 1073 */ 1074static void 1075thr_cleanup(struct kse *curkse, struct pthread *thread) 1076{ 1077 struct pthread *joiner; |
1078 int sys_scope; |
1079 1080 if ((joiner = thread->joiner) != NULL) { |
1081 /* Joinee scheduler lock held; joiner won't leave. */ 1082 if (joiner->kseg == curkse->k_kseg) { 1083 if (joiner->join_status.thread == thread) { 1084 joiner->join_status.thread = NULL; 1085 joiner->join_status.ret = thread->ret; |
1086 _thr_setrunnable_unlocked(joiner); |
1087 } 1088 } else { 1089 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); 1090 /* The joiner may have removed itself and exited. */ 1091 if (_thr_ref_add(thread, joiner, 0) == 0) { |
1092 KSE_SCHED_LOCK(curkse, joiner->kseg); |
1093 if (joiner->join_status.thread == thread) { 1094 joiner->join_status.thread = NULL; 1095 joiner->join_status.ret = thread->ret; 1096 _thr_setrunnable_unlocked(joiner); 1097 } |
1098 KSE_SCHED_UNLOCK(curkse, joiner->kseg); |
1099 _thr_ref_delete(thread, joiner); |
1100 } |
1101 KSE_SCHED_LOCK(curkse, curkse->k_kseg); |
1102 } 1103 thread->attr.flags |= PTHREAD_DETACHED; 1104 } 1105 |
1106 if (!(sys_scope = (thread->attr.flags & PTHREAD_SCOPE_SYSTEM))) { |
1107 /* 1108 * Remove the thread from the KSEG's list of threads. 1109 */ 1110 KSEG_THRQ_REMOVE(thread->kseg, thread); 1111 /* 1112 * Migrate the thread to the main KSE so that this 1113 * KSE and KSEG can be cleaned when their last thread 1114 * exits. --- 6 unchanged lines hidden (view full) --- 1121 /* 1122 * We can't hold the thread list lock while holding the 1123 * scheduler lock. 1124 */ 1125 KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); 1126 DBG_MSG("Adding thread %p to GC list\n", thread); 1127 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); 1128 THR_GCLIST_ADD(thread); |
1129 /* Use thread_list_lock */ 1130 active_threads--; 1131 if (active_threads == 0) { 1132 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); 1133 exit(0); 1134 } |
1135 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); |
1136 if (sys_scope) { 1137 /* 1138 * System scope thread is single thread group, 1139 * when thread is exited, its kse and ksegrp should 1140 * be recycled as well. 1141 */ 1142 kse_exit(); 1143 PANIC("kse_exit() failed for system scope thread"); 1144 } |
1145 KSE_SCHED_LOCK(curkse, curkse->k_kseg); 1146} 1147 1148void 1149_thr_gc(struct pthread *curthread) 1150{ 1151 struct pthread *td, *td_next; 1152 kse_critical_t crit; 1153 TAILQ_HEAD(, pthread) worklist; 1154 1155 TAILQ_INIT(&worklist); 1156 crit = _kse_critical_enter(); 1157 KSE_LOCK_ACQUIRE(curthread->kse, &_thread_list_lock); 1158 1159 /* Check the threads waiting for GC. */ 1160 for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) { 1161 td_next = TAILQ_NEXT(td, gcle); 1162 if ((td->flags & THR_FLAGS_GC_SAFE) == 0) 1163 continue; |
1164 else if (((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && 1165 ((td->kse->k_mbx.km_flags & KMF_DONE) == 0)) { |
1166 /* 1167 * The thread and KSE are operating on the same 1168 * stack. Wait for the KSE to exit before freeing 1169 * the thread's stack as well as everything else. 1170 */ 1171 continue; 1172 } |
1173 /* 1174 * Remove the thread from the GC list. If the thread 1175 * isn't yet detached, it will get added back to the 1176 * GC list at a later time. 1177 */ 1178 THR_GCLIST_REMOVE(td); 1179 DBG_MSG("Freeing thread %p stack\n", td); 1180 /* --- 13 unchanged lines hidden (view full) --- 1194 } 1195 } 1196 KSE_LOCK_RELEASE(curthread->kse, &_thread_list_lock); 1197 _kse_critical_leave(crit); 1198 1199 while ((td = TAILQ_FIRST(&worklist)) != NULL) { 1200 TAILQ_REMOVE(&worklist, td, gcle); 1201 |
1202 if ((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { |
1203 crit = _kse_critical_enter(); 1204 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); 1205 kse_free_unlocked(td->kse); 1206 kseg_free_unlocked(td->kseg); 1207 KSE_LOCK_RELEASE(curthread->kse, &kse_lock); 1208 _kse_critical_leave(crit); 1209 } 1210 DBG_MSG("Freeing thread %p\n", td); 1211 _thr_free(curthread, td); 1212 } |
1213 /* XXX free kse and ksegrp list should be looked as well */ |
1214} 1215 1216 1217/* 1218 * Only new threads that are running or suspended may be scheduled. 1219 */ 1220int 1221_thr_schedule_add(struct pthread *curthread, struct pthread *newthread) 1222{ 1223 struct kse *curkse; 1224 kse_critical_t crit; |
1225 int ret; 1226 |
1227 /* Add the new thread. */ 1228 thr_link(newthread); 1229 |
1230 /* 1231 * If this is the first time creating a thread, make sure 1232 * the mailbox is set for the current thread. 1233 */ 1234 if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { 1235#ifdef NOT_YET 1236 /* We use the thread's stack as the KSE's stack. */ 1237 new_thread->kse->k_mbx.km_stack.ss_sp = 1238 new_thread->attr.stackaddr_attr; 1239 new_thread->kse->k_mbx.km_stack.ss_size = 1240 new_thread->attr.stacksize_attr; 1241#endif 1242 /* 1243 * No need to lock the scheduling queue since the 1244 * KSE/KSEG pair have not yet been started. 1245 */ 1246 KSEG_THRQ_ADD(newthread->kseg, newthread); |
1247 if (newthread->state == PS_RUNNING) 1248 THR_RUNQ_INSERT_TAIL(newthread); 1249 newthread->kse->k_curthread = NULL; 1250 newthread->kse->k_mbx.km_flags = 0; 1251 newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi; 1252 newthread->kse->k_mbx.km_quantum = 0; 1253 1254 /* 1255 * This thread needs a new KSE and KSEG. 1256 */ 1257 crit = _kse_critical_enter(); 1258 curkse = _get_curkse(); 1259 _ksd_setprivate(&newthread->kse->k_ksd); |
1260 newthread->kse->k_flags |= KF_INITIALIZED|KF_STARTED; |
1261 ret = kse_create(&newthread->kse->k_mbx, 1); 1262 if (ret != 0) 1263 ret = errno; 1264 _ksd_setprivate(&curkse->k_ksd); 1265 _kse_critical_leave(crit); 1266 } 1267 else { 1268 /* 1269 * Lock the KSE and add the new thread to its list of 1270 * assigned threads. If the new thread is runnable, also 1271 * add it to the KSE's run queue. 1272 */ |
1273 KSE_SCHED_LOCK(curthread->kse, newthread->kseg); 1274 KSEG_THRQ_ADD(newthread->kseg, newthread); 1275 if (newthread->state == PS_RUNNING) 1276 THR_RUNQ_INSERT_TAIL(newthread); 1277 if ((newthread->kse->k_flags & KF_STARTED) == 0) { 1278 /* 1279 * This KSE hasn't been started yet. Start it 1280 * outside of holding the lock. 1281 */ 1282 newthread->kse->k_flags |= KF_STARTED; 1283 newthread->kse->k_mbx.km_func = 1284 (kse_func_t *)kse_sched_multi; 1285 newthread->kse->k_mbx.km_flags = 0; |
1286 kse_create(&newthread->kse->k_mbx, 0); |
1287 } else if ((newthread->state == PS_RUNNING) && 1288 KSE_IS_IDLE(newthread->kse)) { |
1289 /* 1290 * The thread is being scheduled on another KSEG. 1291 */ 1292 kse_wakeup_one(newthread); 1293 } |
1294 KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg); |
1295 ret = 0; 1296 } |
1297 if (ret != 0) 1298 thr_unlink(newthread); 1299 |
1300 return (ret); 1301} 1302 1303void 1304kse_waitq_insert(struct pthread *thread) 1305{ 1306 struct pthread *td; 1307 --- 192 unchanged lines hidden (view full) --- 1500 1501 case PS_LOCKWAIT: 1502 /* 1503 * This state doesn't timeout. 1504 */ 1505 thread->wakeup_time.tv_sec = -1; 1506 thread->wakeup_time.tv_nsec = -1; 1507 level = thread->locklevel - 1; |
1508 if (!_LCK_GRANTED(&thread->lockusers[level])) |
1509 KSE_WAITQ_INSERT(kse, thread); 1510 else 1511 THR_SET_STATE(thread, PS_RUNNING); 1512 break; 1513 1514 case PS_JOIN: 1515 case PS_MUTEX_WAIT: 1516 case PS_SIGSUSPEND: --- 103 unchanged lines hidden (view full) --- 1620 1621/* 1622 * Avoid calling this kse_exit() so as not to confuse it with the 1623 * system call of the same name. 1624 */ 1625static void 1626kse_fini(struct kse *kse) 1627{ |
1628 /* struct kse_group *free_kseg = NULL; */ |
1629 struct timespec ts; |
1630 |
1631 /* 1632 * Check to see if this is one of the main kses. 1633 */ |
1634 if (kse->k_kseg != _kse_initial->k_kseg) { 1635 PANIC("shouldn't get here"); 1636 /* This is for supporting thread groups. */ 1637#ifdef NOT_YET |
1638 /* Remove this KSE from the KSEG's list of KSEs. */ 1639 KSE_SCHED_LOCK(kse, kse->k_kseg); 1640 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe); 1641 kse->k_kseg->kg_ksecount--; 1642 if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq)) 1643 free_kseg = kse->k_kseg; 1644 KSE_SCHED_UNLOCK(kse, kse->k_kseg); 1645 1646 /* 1647 * Add this KSE to the list of free KSEs along with 1648 * the KSEG if is now orphaned. 1649 */ |
1650 KSE_LOCK_ACQUIRE(kse, &kse_lock); 1651 if (free_kseg != NULL) 1652 kseg_free_unlocked(free_kseg); 1653 kse_free_unlocked(kse); 1654 KSE_LOCK_RELEASE(kse, &kse_lock); |
1655 kse_exit(); 1656 /* Never returns. */ |
1657 PANIC("kse_exit()"); 1658#endif |
1659 } else { |
1660#ifdef NOT_YET |
1661 /* |
1662 * In future, we might allow program to kill 1663 * kse in initial group. |
1664 */ |
1665 if (kse != _kse_initial) { 1666 KSE_SCHED_LOCK(kse, kse->k_kseg); 1667 TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe); 1668 kse->k_kseg->kg_ksecount--; 1669 KSE_SCHED_UNLOCK(kse, kse->k_kseg); 1670 KSE_LOCK_ACQUIRE(kse, &kse_lock); 1671 kse_free_unlocked(kse); 1672 KSE_LOCK_RELEASE(kse, &kse_lock); 1673 kse_exit(); 1674 /* Never returns. */ 1675 PANIC("kse_exit() failed for initial kseg"); 1676 } 1677#endif 1678 KSE_SCHED_LOCK(kse, kse->k_kseg); 1679 KSE_SET_IDLE(kse); 1680 kse->k_kseg->kg_idle_kses++; 1681 KSE_SCHED_UNLOCK(kse, kse->k_kseg); |
1682 ts.tv_sec = 120; 1683 ts.tv_nsec = 0; |
1684 kse->k_mbx.km_flags = 0; 1685 kse_release(&ts); 1686 /* Never reach */ |
1687 } 1688} 1689 1690void 1691_thr_set_timeout(const struct timespec *timeout) 1692{ 1693 struct pthread *curthread = _get_curthread(); 1694 struct timespec ts; --- 243 unchanged lines hidden (view full) --- 1938 int need_ksd = 0; 1939 int i; 1940 1941 if ((curthread != NULL) && (free_kse_count > 0)) { 1942 crit = _kse_critical_enter(); 1943 KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); 1944 /* Search for a finished KSE. */ 1945 kse = TAILQ_FIRST(&free_kseq); |
1946 while ((kse != NULL) && |
1947 ((kse->k_mbx.km_flags & KMF_DONE) == 0)) { |
1948 kse = TAILQ_NEXT(kse, k_qe); 1949 } |
1950 if (kse != NULL) { |
1951 DBG_MSG("found an unused kse.\n"); |
1952 TAILQ_REMOVE(&free_kseq, kse, k_qe); 1953 free_kse_count--; 1954 TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); 1955 active_kse_count++; 1956 } 1957 KSE_LOCK_RELEASE(curthread->kse, &kse_lock); 1958 _kse_critical_leave(crit); 1959 if (kse != NULL) --- 71 unchanged lines hidden (view full) --- 2031 } 2032 } 2033 return (kse); 2034} 2035 2036static void 2037kse_reinit(struct kse *kse) 2038{ |
2039 /* 2040 * XXX - For now every kse has its stack. 2041 * In the future, we may want to have it done 2042 * outside the allocation so that scope system 2043 * threads (one thread per KSE) are not required 2044 * to have a stack for an unneeded kse upcall. 2045 */ 2046 kse->k_mbx.km_flags = 0; |
2047 kse->k_curthread = 0; 2048 kse->k_kseg = 0; 2049 kse->k_schedq = 0; 2050 kse->k_locklevel = 0; 2051 sigemptyset(&kse->k_sigmask); 2052 bzero(&kse->k_sigq, sizeof(kse->k_sigq)); 2053 kse->k_check_sigq = 0; 2054 kse->k_flags = 0; 2055 kse->k_waiting = 0; |
2056 kse->k_idle = 0; |
2057 kse->k_error = 0; 2058 kse->k_cpu = 0; 2059 kse->k_done = 0; 2060} 2061 2062void 2063kse_free_unlocked(struct kse *kse) 2064{ 2065 TAILQ_REMOVE(&active_kseq, kse, k_qe); 2066 active_kse_count--; 2067 kse->k_kseg = NULL; 2068 kse->k_mbx.km_quantum = 20000; |
2069 kse->k_flags = 0; |
2070 TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe); 2071 free_kse_count++; 2072} 2073 2074void 2075_kse_free(struct pthread *curthread, struct kse *kse) 2076{ 2077 kse_critical_t crit; --- 78 unchanged lines hidden (view full) --- 2156 crit = _kse_critical_enter(); 2157 KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); 2158 TAILQ_INSERT_HEAD(&free_threadq, thread, tle); 2159 free_thread_count++; 2160 KSE_LOCK_RELEASE(curthread->kse, &thread_lock); 2161 _kse_critical_leave(crit); 2162 } 2163} |
2164 2165/* 2166 * Add an active thread: 2167 * 2168 * o Assign the thread a unique id (which GDB uses to track 2169 * threads. 2170 * o Add the thread to the list of all threads and increment 2171 * number of active threads. 2172 */ 2173static void 2174thr_link(struct pthread *thread) 2175{ 2176 kse_critical_t crit; 2177 struct kse *curkse; 2178 2179 crit = _kse_critical_enter(); 2180 curkse = _get_curkse(); 2181 2182 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); 2183 /* 2184 * Initialize the unique id (which GDB uses to track 2185 * threads), add the thread to the list of all threads, 2186 * and 2187 */ 2188 thread->uniqueid = next_uniqueid++; 2189 THR_LIST_ADD(thread); 2190 active_threads++; 2191 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); 2192 2193 _kse_critical_leave(crit); 2194} 2195 2196/* 2197 * Remove an active thread. 2198 */ 2199static void 2200thr_unlink(struct pthread *thread) 2201{ 2202 kse_critical_t crit; 2203 struct kse *curkse; 2204 2205 crit = _kse_critical_enter(); 2206 curkse = _get_curkse(); 2207 2208 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock); 2209 THR_LIST_REMOVE(thread); 2210 active_threads--; 2211 KSE_LOCK_RELEASE(curkse, &_thread_list_lock); 2212 2213 _kse_critical_leave(crit); 2214} |