Deleted Added
full compact
36c36
< __FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115173 2003-05-19 23:04:50Z deischen $");
---
> __FBSDID("$FreeBSD: head/lib/libkse/thread/thr_kern.c 115278 2003-05-24 02:29:25Z deischen $");
99a100,103
> #ifndef KMF_DONE
> #define KMF_DONE 0x04
> #endif
>
116a121
> static int active_threads = 1;
118a124
> static u_int64_t next_uniqueid = 1;
119a126
>
141a149
> static void thr_link(struct pthread *thread);
146a155
> static void thr_unlink(struct pthread *thread);
156,158c165,167
< struct kse *kse, *kse_next;
< struct kse_group *kseg, *kseg_next;
< struct pthread *thread, *thread_next;
---
> struct kse *kse;
> struct kse_group *kseg;
> struct pthread *thread;
168a178
> active_threads = 1;
174,175c184,185
< for (thread = TAILQ_FIRST(&_thread_list); thread != NULL;
< thread = thread_next) {
---
> while ((thread = TAILQ_FIRST(&_thread_list)) != NULL) {
> THR_GCLIST_REMOVE(thread);
177,182d186
< * Advance to the next thread before the destroying
< * the current thread.
< */
< thread_next = TAILQ_NEXT(thread, tle);
<
< /*
202a207
> curthread->refcount = 0;
212a218,221
> for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
> _lockuser_destroy(&kse->k_lockusers[i]);
> }
> _lock_destroy(&kse->k_lock);
221,222c230
< for (kse = TAILQ_FIRST(&active_kseq); kse != NULL; kse = kse_next) {
< kse_next = TAILQ_NEXT(kse, k_qe);
---
> while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) {
226a235
> _lock_destroy(&kse->k_lock);
229d237
< _lock_destroy(&kse->k_lock);
244,246c252
< for (kseg = TAILQ_FIRST(&active_kse_groupq);
< kseg != NULL; kseg = kseg_next) {
< kseg_next = TAILQ_NEXT(kseg, kg_qe);
---
> while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) {
404c410
< while (_LCK_BUSY(lu)) {
---
> while (!_LCK_GRANTED(lu)) {
456c462
< } while _LCK_BUSY(lu);
---
> } while (!_LCK_GRANTED(lu));
695c701,702
< /* Unlock the scheduling queue and exit the KSE. */
---
> /* Unlock the scheduling queue and exit the KSE and thread. */
> thr_cleaup(curkse, curthread);
697d703
< kse_fini(curkse); /* does not return */
711c717
< if (_LCK_BUSY(&curthread->lockusers[level]))
---
> if (!_LCK_GRANTED(&curthread->lockusers[level]))
817a824,827
> if (KSE_IS_IDLE(curkse)) {
> KSE_CLEAR_IDLE(curkse);
> curkse->k_kseg->kg_idle_kses--;
> }
914,915c924
< KSE_SCHED_LOCK(curkse, curkse->k_kseg);
< curthread = KSE_RUNQ_FIRST(curkse);
---
> /* never returns */
1068a1078
> int sys_scope;
1071,1080c1081,1085
< thread->joiner = NULL;
< if ((joiner->state == PS_JOIN) &&
< (joiner->join_status.thread == thread)) {
< joiner->join_status.thread = NULL;
<
< /* Set the return status for the joining thread: */
< joiner->join_status.ret = thread->ret;
<
< /* Make the thread runnable. */
< if (joiner->kseg == curkse->k_kseg)
---
> /* Joinee scheduler lock held; joiner won't leave. */
> if (joiner->kseg == curkse->k_kseg) {
> if (joiner->join_status.thread == thread) {
> joiner->join_status.thread = NULL;
> joiner->join_status.ret = thread->ret;
1082,1083c1087,1091
< else {
< KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
---
> }
> } else {
> KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
> /* The joiner may have removed itself and exited. */
> if (_thr_ref_add(thread, joiner, 0) == 0) {
1085c1093,1097
< _thr_setrunnable_unlocked(joiner);
---
> if (joiner->join_status.thread == thread) {
> joiner->join_status.thread = NULL;
> joiner->join_status.ret = thread->ret;
> _thr_setrunnable_unlocked(joiner);
> }
1087c1099
< KSE_SCHED_LOCK(curkse, curkse->k_kseg);
---
> _thr_ref_delete(thread, joiner);
1088a1101
> KSE_SCHED_LOCK(curkse, curkse->k_kseg);
1093c1106
< if ((thread->attr.flags & PTHREAD_SCOPE_PROCESS) == 0) {
---
> if (!(sys_scope = (thread->attr.flags & PTHREAD_SCOPE_SYSTEM))) {
1115a1129,1134
> /* Use thread_list_lock */
> active_threads--;
> if (active_threads == 0) {
> KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
> exit(0);
> }
1116a1136,1144
> if (sys_scope) {
> /*
> * System scope thread is single thread group,
> * when thread is exited, its kse and ksegrp should
> * be recycled as well.
> */
> kse_exit();
> PANIC("kse_exit() failed for system scope thread");
> }
1136,1138c1164,1165
< #ifdef NOT_YET
< else if (((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) &&
< (td->kse->k_mbx.km_flags == 0)) {
---
> else if (((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) &&
> ((td->kse->k_mbx.km_flags & KMF_DONE) == 0)) {
1146d1172
< #endif
1176c1202
< if ((td->attr.flags & PTHREAD_SCOPE_PROCESS) != 0) {
---
> if ((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
1186a1213
> /* XXX free kse and ksegrp list should be looked as well */
1198d1224
< int need_start;
1200a1227,1229
> /* Add the new thread. */
> thr_link(newthread);
>
1218,1220d1246
< TAILQ_INSERT_TAIL(&newthread->kseg->kg_kseq, newthread->kse,
< k_kgqe);
< newthread->kseg->kg_ksecount = 1;
1234c1260
< newthread->kse->k_flags |= KF_INITIALIZED;
---
> newthread->kse->k_flags |= KF_INITIALIZED|KF_STARTED;
1247d1272
< need_start = 0;
1261,1265d1285
< need_start = 1;
< }
< KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
<
< if (need_start != 0)
1267,1268c1287,1288
< else if ((newthread->state == PS_RUNNING) &&
< KSE_IS_IDLE(newthread->kse)) {
---
> } else if ((newthread->state == PS_RUNNING) &&
> KSE_IS_IDLE(newthread->kse)) {
1273a1294
> KSE_SCHED_UNLOCK(curthread->kse, newthread->kseg);
1275a1297,1299
> if (ret != 0)
> thr_unlink(newthread);
>
1484c1508
< if (_LCK_BUSY(&thread->lockusers[level]))
---
> if (!_LCK_GRANTED(&thread->lockusers[level]))
1603a1628
> /* struct kse_group *free_kseg = NULL; */
1605d1629
< struct kse_group *free_kseg = NULL;
1607,1608d1630
< if ((kse->k_kseg->kg_flags & KGF_SINGLE_THREAD) != 0)
< kse_exit();
1612c1634,1637
< else if (kse->k_kseg != _kse_initial->k_kseg) {
---
> if (kse->k_kseg != _kse_initial->k_kseg) {
> PANIC("shouldn't get here");
> /* This is for supporting thread groups. */
> #ifdef NOT_YET
1625d1649
< #ifdef NOT_YET
1631d1654
< #endif
1633a1657,1658
> PANIC("kse_exit()");
> #endif
1634a1660
> #ifdef NOT_YET
1636,1639c1662,1663
< * Wait for the last KSE/thread to exit, or for more
< * threads to be created (it is possible for additional
< * scope process threads to be created after the main
< * thread exits).
---
> * In future, we might allow program to kill
> * kse in initial group.
1640a1665,1681
> if (kse != _kse_initial) {
> KSE_SCHED_LOCK(kse, kse->k_kseg);
> TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
> kse->k_kseg->kg_ksecount--;
> KSE_SCHED_UNLOCK(kse, kse->k_kseg);
> KSE_LOCK_ACQUIRE(kse, &kse_lock);
> kse_free_unlocked(kse);
> KSE_LOCK_RELEASE(kse, &kse_lock);
> kse_exit();
> /* Never returns. */
> PANIC("kse_exit() failed for initial kseg");
> }
> #endif
> KSE_SCHED_LOCK(kse, kse->k_kseg);
> KSE_SET_IDLE(kse);
> kse->k_kseg->kg_idle_kses++;
> KSE_SCHED_UNLOCK(kse, kse->k_kseg);
1643,1659c1684,1686
< KSE_SET_WAIT(kse);
< KSE_SCHED_LOCK(kse, kse->k_kseg);
< if ((active_kse_count > 1) &&
< (kse->k_kseg->kg_threadcount == 0)) {
< KSE_SCHED_UNLOCK(kse, kse->k_kseg);
< kse_release(&ts);
< /* The above never returns. */
< }
< else
< KSE_SCHED_UNLOCK(kse, kse->k_kseg);
<
< /* There are no more threads; exit this process: */
< if (kse->k_kseg->kg_threadcount == 0) {
< /* kse_exit(); */
< __isthreaded = 0;
< exit(0);
< }
---
> kse->k_mbx.km_flags = 0;
> kse_release(&ts);
> /* Never reach */
1919,1920d1945
< #ifdef NOT_YET
< #define KEMBX_DONE 0x04
1922c1947
< ((kse->k_mbx.km_flags & KEMBX_DONE) == 0)) {
---
> ((kse->k_mbx.km_flags & KMF_DONE) == 0)) {
1925,1926d1949
< #undef KEMBX_DONE
< #endif
1927a1951
> DBG_MSG("found an unused kse.\n");
2015c2039,2046
< bzero(&kse->k_mbx, sizeof(struct kse_mailbox));
---
> /*
> * XXX - For now every kse has its stack.
> * In the future, we may want to have it done
> * outside the allocation so that scope system
> * threads (one thread per KSE) are not required
> * to have a stack for an unneeded kse upcall.
> */
> kse->k_mbx.km_flags = 0;
2024a2056
> kse->k_idle = 0;
2037c2069
< kse->k_flags &= ~KF_INITIALIZED;
---
> kse->k_flags = 0;
2131a2164,2214
>
> /*
> * Add an active thread:
> *
> * o Assign the thread a unique id (which GDB uses to track
> * threads.
> * o Add the thread to the list of all threads and increment
> * number of active threads.
> */
> static void
> thr_link(struct pthread *thread)
> {
> kse_critical_t crit;
> struct kse *curkse;
>
> crit = _kse_critical_enter();
> curkse = _get_curkse();
>
> KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
> /*
> * Initialize the unique id (which GDB uses to track
> * threads), add the thread to the list of all threads,
> * and
> */
> thread->uniqueid = next_uniqueid++;
> THR_LIST_ADD(thread);
> active_threads++;
> KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
>
> _kse_critical_leave(crit);
> }
>
> /*
> * Remove an active thread.
> */
> static void
> thr_unlink(struct pthread *thread)
> {
> kse_critical_t crit;
> struct kse *curkse;
>
> crit = _kse_critical_enter();
> curkse = _get_curkse();
>
> KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
> THR_LIST_REMOVE(thread);
> active_threads--;
> KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
>
> _kse_critical_leave(crit);
> }