kern_thread.c revision 104031
199026Sjulian/* 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 1099026Sjulian * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian * 2899026Sjulian * $FreeBSD: head/sys/kern/kern_thread.c 104031 2002-09-27 07:11:11Z julian $ 2999026Sjulian */ 3099026Sjulian 3199026Sjulian#include <sys/param.h> 3299026Sjulian#include <sys/systm.h> 3399026Sjulian#include <sys/kernel.h> 3499026Sjulian#include <sys/lock.h> 3599026Sjulian#include <sys/malloc.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 3899026Sjulian#include <sys/sysctl.h> 3999026Sjulian#include <sys/filedesc.h> 4099026Sjulian#include <sys/tty.h> 4199026Sjulian#include <sys/signalvar.h> 4299026Sjulian#include <sys/sx.h> 4399026Sjulian#include <sys/user.h> 4499026Sjulian#include <sys/jail.h> 4599026Sjulian#include <sys/kse.h> 4699026Sjulian#include <sys/ktr.h> 47103410Smini#include <sys/ucontext.h> 4899026Sjulian 4999026Sjulian#include <vm/vm.h> 5099026Sjulian#include <vm/vm_object.h> 5199026Sjulian#include <vm/pmap.h> 5299026Sjulian#include <vm/uma.h> 5399026Sjulian#include <vm/vm_map.h> 5499026Sjulian 55100273Speter#include <machine/frame.h> 56100273Speter 5799026Sjulian/* 58103367Sjulian * KSEGRP related storage. 5999026Sjulian */ 60103367Sjulianstatic uma_zone_t ksegrp_zone; 61103367Sjulianstatic uma_zone_t kse_zone; 6299026Sjulianstatic uma_zone_t thread_zone; 6399026Sjulian 64103367Sjulian/* DEBUG ONLY */ 6599026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 6699026Sjulianstatic int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */ 6799026SjulianSYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW, 6899026Sjulian &oiks_debug, 0, "OIKS thread debug"); 6999026Sjulian 70103838Sjulianstatic int max_threads_per_proc = 6; 71103367SjulianSYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW, 72103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 73103367Sjulian 7499026Sjulian#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 7599026Sjulian 7699026Sjulianstruct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 7799026Sjulianstruct mtx zombie_thread_lock; 7899026SjulianMTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock, 7999026Sjulian "zombie_thread_lock", MTX_SPIN); 8099026Sjulian 8199026Sjulian/* 8299026Sjulian * Pepare a thread for use. 8399026Sjulian */ 8499026Sjulianstatic void 8599026Sjulianthread_ctor(void *mem, int size, void *arg) 8699026Sjulian{ 8799026Sjulian struct thread *td; 8899026Sjulian 8999026Sjulian KASSERT((size == sizeof(struct thread)), 9099552Speter ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread))); 9199026Sjulian 9299026Sjulian td = (struct thread *)mem; 93103216Sjulian td->td_state = TDS_INACTIVE; 9499026Sjulian td->td_flags |= TDF_UNBOUND; 9599026Sjulian} 9699026Sjulian 9799026Sjulian/* 9899026Sjulian * Reclaim a thread after use. 9999026Sjulian */ 10099026Sjulianstatic void 10199026Sjulianthread_dtor(void *mem, int size, void *arg) 10299026Sjulian{ 10399026Sjulian struct thread *td; 10499026Sjulian 10599026Sjulian KASSERT((size == sizeof(struct thread)), 10699552Speter ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread))); 10799026Sjulian 10899026Sjulian td = (struct thread *)mem; 10999026Sjulian 11099026Sjulian#ifdef INVARIANTS 11199026Sjulian /* Verify that this thread is in a safe state to free. */ 11299026Sjulian switch (td->td_state) { 113103216Sjulian case TDS_INHIBITED: 114103216Sjulian case TDS_RUNNING: 115103216Sjulian case TDS_CAN_RUN: 11699026Sjulian case TDS_RUNQ: 11799026Sjulian /* 11899026Sjulian * We must never unlink a thread that is in one of 11999026Sjulian * these states, because it is currently active. 12099026Sjulian */ 12199026Sjulian panic("bad state for thread unlinking"); 12299026Sjulian /* NOTREACHED */ 123103216Sjulian case TDS_INACTIVE: 12499026Sjulian break; 12599026Sjulian default: 12699026Sjulian panic("bad thread state"); 12799026Sjulian /* NOTREACHED */ 12899026Sjulian } 12999026Sjulian#endif 13099026Sjulian} 13199026Sjulian 13299026Sjulian/* 13399026Sjulian * Initialize type-stable parts of a thread (when newly created). 13499026Sjulian */ 13599026Sjulianstatic void 13699026Sjulianthread_init(void *mem, int size) 13799026Sjulian{ 13899026Sjulian struct thread *td; 13999026Sjulian 14099026Sjulian KASSERT((size == sizeof(struct thread)), 14199552Speter ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread))); 14299026Sjulian 14399026Sjulian td = (struct thread *)mem; 144103312Sjulian mtx_lock(&Giant); 14599026Sjulian pmap_new_thread(td); 146103312Sjulian mtx_unlock(&Giant); 14799026Sjulian cpu_thread_setup(td); 14899026Sjulian} 14999026Sjulian 15099026Sjulian/* 15199026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 15299026Sjulian */ 15399026Sjulianstatic void 15499026Sjulianthread_fini(void *mem, int size) 15599026Sjulian{ 15699026Sjulian struct thread *td; 15799026Sjulian 15899026Sjulian KASSERT((size == sizeof(struct thread)), 15999552Speter ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread))); 16099026Sjulian 16199026Sjulian td = (struct thread *)mem; 16299026Sjulian pmap_dispose_thread(td); 16399026Sjulian} 16499026Sjulian 16599026Sjulian/* 166103410Smini * Fill a ucontext_t with a thread's context information. 167103410Smini * 168103410Smini * This is an analogue to getcontext(3). 169103410Smini */ 170103410Sminivoid 171103410Sminithread_getcontext(struct thread *td, ucontext_t *uc) 172103410Smini{ 173103410Smini 174103464Speter/* 175103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but 176103464Speter * is used in MI code. 177103464Speter */ 178103463Speter#ifdef __i386__ 179103410Smini get_mcontext(td, &uc->uc_mcontext); 180103463Speter#endif 181103410Smini uc->uc_sigmask = td->td_proc->p_sigmask; 182103410Smini} 183103410Smini 184103410Smini/* 185103410Smini * Set a thread's context from a ucontext_t. 186103410Smini * 187103410Smini * This is an analogue to setcontext(3). 188103410Smini */ 189103410Sminiint 190103410Sminithread_setcontext(struct thread *td, ucontext_t *uc) 191103410Smini{ 192103410Smini int ret; 193103410Smini 194103464Speter/* 195103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but 196103464Speter * is used in MI code. 197103464Speter */ 198103463Speter#ifdef __i386__ 199103410Smini ret = set_mcontext(td, &uc->uc_mcontext); 200103463Speter#else 201103463Speter ret = ENOSYS; 202103463Speter#endif 203103410Smini if (ret == 0) { 204103410Smini SIG_CANTMASK(uc->uc_sigmask); 205103410Smini PROC_LOCK(td->td_proc); 206103410Smini td->td_proc->p_sigmask = uc->uc_sigmask; 207103410Smini PROC_UNLOCK(td->td_proc); 208103410Smini } 209103410Smini return (ret); 210103410Smini} 211103410Smini 212103410Smini/* 21399026Sjulian * Initialize global thread allocation resources. 21499026Sjulian */ 21599026Sjulianvoid 21699026Sjulianthreadinit(void) 21799026Sjulian{ 21899026Sjulian 21999026Sjulian thread_zone = uma_zcreate("THREAD", sizeof (struct thread), 22099026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 22199026Sjulian UMA_ALIGN_CACHE, 0); 222103367Sjulian ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp), 223103367Sjulian NULL, NULL, NULL, NULL, 224103367Sjulian UMA_ALIGN_CACHE, 0); 225103367Sjulian kse_zone = uma_zcreate("KSE", sizeof (struct kse), 226103367Sjulian NULL, NULL, NULL, NULL, 227103367Sjulian UMA_ALIGN_CACHE, 0); 22899026Sjulian} 22999026Sjulian 23099026Sjulian/* 231103002Sjulian * Stash an embarasingly extra thread into the zombie thread queue. 23299026Sjulian */ 23399026Sjulianvoid 23499026Sjulianthread_stash(struct thread *td) 23599026Sjulian{ 23699026Sjulian mtx_lock_spin(&zombie_thread_lock); 23799026Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 23899026Sjulian mtx_unlock_spin(&zombie_thread_lock); 23999026Sjulian} 24099026Sjulian 241103410Smini/* 242103410Smini * Reap zombie threads. 24399026Sjulian */ 24499026Sjulianvoid 24599026Sjulianthread_reap(void) 24699026Sjulian{ 24799026Sjulian struct thread *td_reaped; 24899026Sjulian 24999026Sjulian /* 25099026Sjulian * don't even bother to lock if none at this instant 25199026Sjulian * We really don't care about the next instant.. 25299026Sjulian */ 25399026Sjulian if (!TAILQ_EMPTY(&zombie_threads)) { 25499026Sjulian mtx_lock_spin(&zombie_thread_lock); 25599026Sjulian while (!TAILQ_EMPTY(&zombie_threads)) { 25699026Sjulian td_reaped = TAILQ_FIRST(&zombie_threads); 25799026Sjulian TAILQ_REMOVE(&zombie_threads, td_reaped, td_runq); 25899026Sjulian mtx_unlock_spin(&zombie_thread_lock); 25999026Sjulian thread_free(td_reaped); 26099026Sjulian mtx_lock_spin(&zombie_thread_lock); 26199026Sjulian } 26299026Sjulian mtx_unlock_spin(&zombie_thread_lock); 26399026Sjulian } 26499026Sjulian} 26599026Sjulian 26699026Sjulian/* 267103367Sjulian * Allocate a ksegrp. 268103367Sjulian */ 269103367Sjulianstruct ksegrp * 270103367Sjulianksegrp_alloc(void) 271103367Sjulian{ 272103367Sjulian return (uma_zalloc(ksegrp_zone, M_WAITOK)); 273103367Sjulian} 274103367Sjulian 275103367Sjulian/* 276103367Sjulian * Allocate a kse. 277103367Sjulian */ 278103367Sjulianstruct kse * 279103367Sjuliankse_alloc(void) 280103367Sjulian{ 281103367Sjulian return (uma_zalloc(kse_zone, M_WAITOK)); 282103367Sjulian} 283103367Sjulian 284103367Sjulian/* 28599026Sjulian * Allocate a thread. 28699026Sjulian */ 28799026Sjulianstruct thread * 28899026Sjulianthread_alloc(void) 28999026Sjulian{ 29099026Sjulian thread_reap(); /* check if any zombies to get */ 29199026Sjulian return (uma_zalloc(thread_zone, M_WAITOK)); 29299026Sjulian} 29399026Sjulian 29499026Sjulian/* 295103367Sjulian * Deallocate a ksegrp. 296103367Sjulian */ 297103367Sjulianvoid 298103367Sjulianksegrp_free(struct ksegrp *td) 299103367Sjulian{ 300103367Sjulian uma_zfree(ksegrp_zone, td); 301103367Sjulian} 302103367Sjulian 303103367Sjulian/* 304103367Sjulian * Deallocate a kse. 305103367Sjulian */ 306103367Sjulianvoid 307103367Sjuliankse_free(struct kse *td) 308103367Sjulian{ 309103367Sjulian uma_zfree(kse_zone, td); 310103367Sjulian} 311103367Sjulian 312103367Sjulian/* 31399026Sjulian * Deallocate a thread. 31499026Sjulian */ 31599026Sjulianvoid 31699026Sjulianthread_free(struct thread *td) 31799026Sjulian{ 31899026Sjulian uma_zfree(thread_zone, td); 31999026Sjulian} 32099026Sjulian 32199026Sjulian/* 32299026Sjulian * Store the thread context in the UTS's mailbox. 323104031Sjulian * then add the mailbox at the head of a list we are building in user space. 324104031Sjulian * The list is anchored in the ksegrp structure. 32599026Sjulian */ 32699026Sjulianint 32799026Sjulianthread_export_context(struct thread *td) 32899026Sjulian{ 329104031Sjulian struct ksegrp *kg; 330104031Sjulian uintptr_t mbx; 331104031Sjulian void *addr; 33299026Sjulian int error; 333103410Smini ucontext_t uc; 33499026Sjulian 335104031Sjulian /* Export the user/machine context. */ 336104031Sjulian#if 0 337104031Sjulian addr = (caddr_t)td->td_mailbox + 338104031Sjulian offsetof(struct kse_thr_mailbox, tm_context); 339104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */ 340104031Sjulian addr = (void *)(&td->td_mailbox->tm_context); 341100271Speter#endif 342104031Sjulian error = copyin(addr, &uc, sizeof(ucontext_t)); 343103410Smini if (error == 0) { 344103410Smini thread_getcontext(td, &uc); 345104031Sjulian error = copyout(&uc, addr, sizeof(ucontext_t)); 346104031Sjulian 347103410Smini } 348104031Sjulian if (error) { 349104031Sjulian psignal(td->td_proc, SIGSEGV); 350104031Sjulian return (error); 351104031Sjulian } 352104031Sjulian /* get address in latest mbox of list pointer */ 353104031Sjulian#if 0 354104031Sjulian addr = (caddr_t)td->td_mailbox 355104031Sjulian + offsetof(struct kse_thr_mailbox , tm_next); 356104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */ 357104031Sjulian addr = (void *)(&td->td_mailbox->tm_next); 358104031Sjulian#endif 359104031Sjulian /* 360104031Sjulian * Put the saved address of the previous first 361104031Sjulian * entry into this one 362104031Sjulian */ 363104031Sjulian kg = td->td_ksegrp; 364104031Sjulian for (;;) { 365104031Sjulian mbx = (uintptr_t)kg->kg_completed; 366104031Sjulian if (suword(addr, mbx)) { 367104031Sjulian psignal(kg->kg_proc, SIGSEGV); 368104031Sjulian return (EFAULT); 369104031Sjulian } 370104031Sjulian PROC_LOCK(kg->kg_proc); 371104031Sjulian if (mbx == (uintptr_t)kg->kg_completed) { 372104031Sjulian kg->kg_completed = td->td_mailbox; 373104031Sjulian PROC_UNLOCK(kg->kg_proc); 374104031Sjulian break; 375104031Sjulian } 376104031Sjulian PROC_UNLOCK(kg->kg_proc); 377104031Sjulian } 378104031Sjulian return (0); 379104031Sjulian} 38099026Sjulian 381104031Sjulian/* 382104031Sjulian * Take the list of completed mailboxes for this KSEGRP and put them on this 383104031Sjulian * KSE's mailbox as it's the next one going up. 384104031Sjulian */ 385104031Sjulianstatic int 386104031Sjulianthread_link_mboxes(struct ksegrp *kg, struct kse *ke) 387104031Sjulian{ 388104031Sjulian void *addr; 389104031Sjulian uintptr_t mbx; 390104031Sjulian 391104031Sjulian#if 0 392104031Sjulian addr = (caddr_t)ke->ke_mailbox 393104031Sjulian + offsetof(struct kse_mailbox, km_completed); 394104031Sjulian#else /* if user pointer arithmetic is valid in the kernel */ 395104031Sjulian addr = (void *)(&ke->ke_mailbox->km_completed); 396104031Sjulian#endif 397104031Sjulian for (;;) { 398104031Sjulian mbx = (uintptr_t)kg->kg_completed; 399104031Sjulian if (suword(addr, mbx)) { 400104031Sjulian psignal(kg->kg_proc, SIGSEGV); 401104031Sjulian return (EFAULT); 402104031Sjulian } 403104031Sjulian /* XXXKSE could use atomic CMPXCH here */ 404104031Sjulian PROC_LOCK(kg->kg_proc); 405104031Sjulian if (mbx == (uintptr_t)kg->kg_completed) { 406104031Sjulian kg->kg_completed = NULL; 407104031Sjulian PROC_UNLOCK(kg->kg_proc); 408104031Sjulian break; 409104031Sjulian } 410104031Sjulian PROC_UNLOCK(kg->kg_proc); 41199026Sjulian } 412104031Sjulian return (0); 41399026Sjulian} 41499026Sjulian 41599026Sjulian/* 41699026Sjulian * Discard the current thread and exit from its context. 41799026Sjulian * 41899026Sjulian * Because we can't free a thread while we're operating under its context, 41999026Sjulian * push the current thread into our KSE's ke_tdspare slot, freeing the 42099026Sjulian * thread that might be there currently. Because we know that only this 42199026Sjulian * processor will run our KSE, we needn't worry about someone else grabbing 42299026Sjulian * our context before we do a cpu_throw. 42399026Sjulian */ 42499026Sjulianvoid 42599026Sjulianthread_exit(void) 42699026Sjulian{ 42799026Sjulian struct thread *td; 42899026Sjulian struct kse *ke; 42999026Sjulian struct proc *p; 43099026Sjulian struct ksegrp *kg; 43199026Sjulian 43299026Sjulian td = curthread; 43399026Sjulian kg = td->td_ksegrp; 43499026Sjulian p = td->td_proc; 43599026Sjulian ke = td->td_kse; 43699026Sjulian 43799026Sjulian mtx_assert(&sched_lock, MA_OWNED); 438102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 439102581Sjulian KASSERT(ke != NULL, ("thread exiting without a kse")); 440102581Sjulian KASSERT(kg != NULL, ("thread exiting without a kse group")); 44199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 44299026Sjulian CTR1(KTR_PROC, "thread_exit: thread %p", td); 44399026Sjulian KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 44499026Sjulian 44599026Sjulian if (ke->ke_tdspare != NULL) { 446103216Sjulian thread_stash(ke->ke_tdspare); 44799026Sjulian ke->ke_tdspare = NULL; 44899026Sjulian } 44999026Sjulian cpu_thread_exit(td); /* XXXSMP */ 45099026Sjulian 451102581Sjulian /* 452103002Sjulian * The last thread is left attached to the process 453103002Sjulian * So that the whole bundle gets recycled. Skip 454103002Sjulian * all this stuff. 455102581Sjulian */ 456103002Sjulian if (p->p_numthreads > 1) { 457103002Sjulian /* Reassign this thread's KSE. */ 458103002Sjulian ke->ke_thread = NULL; 459103002Sjulian td->td_kse = NULL; 460103002Sjulian ke->ke_state = KES_UNQUEUED; 461103002Sjulian kse_reassign(ke); 462103002Sjulian 463103002Sjulian /* Unlink this thread from its proc. and the kseg */ 464103002Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 465103002Sjulian p->p_numthreads--; 466103002Sjulian TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 467103002Sjulian kg->kg_numthreads--; 468103002Sjulian /* 469103002Sjulian * The test below is NOT true if we are the 470103002Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 471103002Sjulian * in exit1() after it is the only survivor. 472103002Sjulian */ 473103002Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 474103002Sjulian if (p->p_numthreads == p->p_suspcount) { 475103216Sjulian thread_unsuspend_one(p->p_singlethread); 476103002Sjulian } 47799026Sjulian } 478103002Sjulian PROC_UNLOCK(p); 479103216Sjulian td->td_state = TDS_INACTIVE; 480103002Sjulian td->td_proc = NULL; 481103002Sjulian td->td_ksegrp = NULL; 482103002Sjulian td->td_last_kse = NULL; 483103002Sjulian ke->ke_tdspare = td; 484103002Sjulian } else { 485103002Sjulian PROC_UNLOCK(p); 48699026Sjulian } 487103002Sjulian 48899026Sjulian cpu_throw(); 48999026Sjulian /* NOTREACHED */ 49099026Sjulian} 49199026Sjulian 49299026Sjulian/* 49399026Sjulian * Link a thread to a process. 494103002Sjulian * set up anything that needs to be initialized for it to 495103002Sjulian * be used by the process. 49699026Sjulian * 49799026Sjulian * Note that we do not link to the proc's ucred here. 49899026Sjulian * The thread is linked as if running but no KSE assigned. 49999026Sjulian */ 50099026Sjulianvoid 50199026Sjulianthread_link(struct thread *td, struct ksegrp *kg) 50299026Sjulian{ 50399026Sjulian struct proc *p; 50499026Sjulian 50599026Sjulian p = kg->kg_proc; 506103216Sjulian td->td_state = TDS_INACTIVE; 50799026Sjulian td->td_proc = p; 50899026Sjulian td->td_ksegrp = kg; 50999026Sjulian td->td_last_kse = NULL; 51099026Sjulian 511103002Sjulian LIST_INIT(&td->td_contested); 512103002Sjulian callout_init(&td->td_slpcallout, 1); 51399026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 51499026Sjulian TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 51599026Sjulian p->p_numthreads++; 51699026Sjulian kg->kg_numthreads++; 517103367Sjulian if (oiks_debug && p->p_numthreads > max_threads_per_proc) { 51899026Sjulian printf("OIKS %d\n", p->p_numthreads); 51999026Sjulian if (oiks_debug > 1) 52099026Sjulian Debugger("OIKS"); 52199026Sjulian } 52299026Sjulian td->td_kse = NULL; 52399026Sjulian} 52499026Sjulian 52599026Sjulian/* 526103410Smini * Create a thread and schedule it for upcall on the KSE given. 52799026Sjulian */ 52899026Sjulianstruct thread * 52999026Sjulianthread_schedule_upcall(struct thread *td, struct kse *ke) 53099026Sjulian{ 53199026Sjulian struct thread *td2; 53299026Sjulian 53399026Sjulian mtx_assert(&sched_lock, MA_OWNED); 53499026Sjulian if (ke->ke_tdspare != NULL) { 53599026Sjulian td2 = ke->ke_tdspare; 53699026Sjulian ke->ke_tdspare = NULL; 53799026Sjulian } else { 53899026Sjulian mtx_unlock_spin(&sched_lock); 53999026Sjulian td2 = thread_alloc(); 54099026Sjulian mtx_lock_spin(&sched_lock); 54199026Sjulian } 54299026Sjulian CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 54399026Sjulian td, td->td_proc->p_pid, td->td_proc->p_comm); 544103072Sjulian bzero(&td2->td_startzero, 545103002Sjulian (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 546103002Sjulian bcopy(&td->td_startcopy, &td2->td_startcopy, 547103002Sjulian (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 54899026Sjulian thread_link(td2, ke->ke_ksegrp); 549103410Smini cpu_set_upcall(td2, td->td_pcb); 550103410Smini bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe)); 551103410Smini /* 552103410Smini * The user context for this thread is selected when we choose 553103410Smini * a KSE and return to userland on it. All we need do here is 554103410Smini * note that the thread exists in order to perform an upcall. 555103410Smini * 556103410Smini * Since selecting a KSE to perform the upcall involves locking 557103410Smini * that KSE's context to our upcall, its best to wait until the 558103410Smini * last possible moment before grabbing a KSE. We do this in 559103410Smini * userret(). 560103410Smini */ 56199026Sjulian td2->td_ucred = crhold(td->td_ucred); 56299026Sjulian td2->td_flags = TDF_UNBOUND|TDF_UPCALLING; 563103216Sjulian TD_SET_CAN_RUN(td2); 56499026Sjulian setrunqueue(td2); 56599026Sjulian return (td2); 56699026Sjulian} 56799026Sjulian 56899026Sjulian/* 569103410Smini * Schedule an upcall to notify a KSE process recieved signals. 57099026Sjulian * 571103410Smini * XXX - Modifying a sigset_t like this is totally bogus. 572103410Smini */ 573103410Sministruct thread * 574103410Sminisignal_upcall(struct proc *p, int sig) 575103410Smini{ 576103410Smini struct thread *td, *td2; 577103410Smini struct kse *ke; 578103410Smini sigset_t ss; 579103410Smini int error; 580103410Smini 581103410Smini PROC_LOCK_ASSERT(p, MA_OWNED); 582103410Smini 583103410Smini td = FIRST_THREAD_IN_PROC(p); 584103410Smini ke = td->td_kse; 585103410Smini PROC_UNLOCK(p); 586103410Smini error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 587103410Smini PROC_LOCK(p); 588103410Smini if (error) 589103410Smini return (NULL); 590103410Smini SIGADDSET(ss, sig); 591103410Smini PROC_UNLOCK(p); 592103410Smini error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t)); 593103410Smini PROC_LOCK(p); 594103410Smini if (error) 595103410Smini return (NULL); 596103410Smini mtx_lock_spin(&sched_lock); 597103410Smini td2 = thread_schedule_upcall(td, ke); 598103410Smini mtx_unlock_spin(&sched_lock); 599103410Smini return (td2); 600103410Smini} 601103410Smini 602103410Smini/* 603103410Smini * Consider whether or not an upcall should be made, and update the 604103410Smini * TDF_UPCALLING flag appropriately. 605103410Smini * 606103410Smini * This function is called when the current thread had been bound to a user 607103410Smini * thread that performed a syscall that blocked, and is now returning. 608103410Smini * Got that? syscall -> msleep -> wakeup -> syscall_return -> us. 609103410Smini * 610103410Smini * This thread will be returned to the UTS in its mailbox as a completed 611103410Smini * thread. We need to decide whether or not to perform an upcall now, 612103410Smini * or simply queue the thread for later. 613103410Smini * 614103410Smini * XXXKSE Future enhancement: We could also return back to 615103410Smini * the thread if we haven't had to do an upcall since then. 616103410Smini * If the KSE's copy is == the thread's copy, and there are 617103410Smini * no other completed threads. 618103410Smini */ 619103410Sministatic int 620103838Sjulianthread_consider_upcalling(struct thread *td) 621103410Smini{ 622103838Sjulian struct proc *p; 623103838Sjulian struct ksegrp *kg; 624103410Smini int error; 625103410Smini 626103410Smini /* 627103410Smini * Save the thread's context, and link it 628104031Sjulian * into the KSEGRP's list of completed threads. 629103410Smini */ 630103410Smini error = thread_export_context(td); 631104031Sjulian td->td_flags &= ~TDF_UNBOUND; 632103410Smini td->td_mailbox = NULL; 633103410Smini if (error) 634103410Smini /* 635103410Smini * Failing to do the KSE operation just defaults 636103410Smini * back to synchonous operation, so just return from 637103410Smini * the syscall. 638103410Smini */ 639103410Smini return (error); 640103410Smini 641103410Smini /* 642104031Sjulian * Decide whether to perform an upcall now. 643103410Smini */ 644103410Smini /* Make sure there are no other threads waiting to run. */ 645103838Sjulian p = td->td_proc; 646103838Sjulian kg = td->td_ksegrp; 647103838Sjulian PROC_LOCK(p); 648103838Sjulian mtx_lock_spin(&sched_lock); 649103838Sjulian /* bogus test, ok for testing though */ 650103838Sjulian if (TAILQ_FIRST(&kg->kg_runq) && 651103838Sjulian (TAILQ_LAST(&kg->kg_runq, threadqueue) 652103838Sjulian != kg->kg_last_assigned)) { 653103410Smini /* 654103410Smini * Another thread in this KSEG needs to run. 655103410Smini * Switch to it instead of performing an upcall, 656103410Smini * abondoning this thread. Perform the upcall 657103410Smini * later; discard this thread for now. 658103410Smini * 659103410Smini * XXXKSE - As for the other threads to run; 660103410Smini * we COULD rush through all the threads 661103410Smini * in this KSEG at this priority, or we 662103410Smini * could throw the ball back into the court 663103410Smini * and just run the highest prio kse available. 664103410Smini * What is OUR priority? The priority of the highest 665103410Smini * sycall waiting to be returned? 666103410Smini * For now, just let another KSE run (easiest). 667103410Smini */ 668103410Smini thread_exit(); /* Abandon current thread. */ 669103410Smini /* NOTREACHED */ 670104031Sjulian } 671104031Sjulian /* 672104031Sjulian * Perform an upcall now. 673104031Sjulian * 674104031Sjulian * XXXKSE - Assumes we are going to userland, and not 675104031Sjulian * nested in the kernel. 676104031Sjulian */ 677104031Sjulian td->td_flags |= TDF_UPCALLING; 678103838Sjulian mtx_unlock_spin(&sched_lock); 679103838Sjulian PROC_UNLOCK(p); 680103410Smini return (0); 681103410Smini} 682103410Smini 683103410Smini/* 684103410Smini * The extra work we go through if we are a threaded process when we 685103410Smini * return to userland. 686103410Smini * 68799026Sjulian * If we are a KSE process and returning to user mode, check for 68899026Sjulian * extra work to do before we return (e.g. for more syscalls 68999026Sjulian * to complete first). If we were in a critical section, we should 69099026Sjulian * just return to let it finish. Same if we were in the UTS (in 691103410Smini * which case the mailbox's context's busy indicator will be set). 692103410Smini * The only traps we suport will have set the mailbox. 693103410Smini * We will clear it here. 69499026Sjulian */ 69599026Sjulianint 696103838Sjulianthread_userret(struct thread *td, struct trapframe *frame) 69799026Sjulian{ 698103410Smini int error; 699104031Sjulian int unbound; 700104031Sjulian struct kse *ke; 70199026Sjulian 702104031Sjulian /* Make the thread bound from now on, but remember what it was. */ 703104031Sjulian unbound = td->td_flags & TDF_UNBOUND; 704104031Sjulian td->td_flags &= ~TDF_UNBOUND; 705103410Smini /* 706103410Smini * Ensure that we have a spare thread available. 707103410Smini */ 708104031Sjulian ke = td->td_kse; 70999026Sjulian if (ke->ke_tdspare == NULL) { 710103410Smini mtx_lock(&Giant); 71199026Sjulian ke->ke_tdspare = thread_alloc(); 712103410Smini mtx_unlock(&Giant); 71399026Sjulian } 714103410Smini /* 715104031Sjulian * Originally bound threads need no additional work. 716103410Smini */ 717104031Sjulian if (unbound == 0) 718103410Smini return (0); 719103410Smini error = 0; 720103410Smini /* 721103410Smini * Decide whether or not we should perform an upcall now. 722103410Smini */ 723104031Sjulian if (((td->td_flags & TDF_UPCALLING) == 0) && unbound) { 724104031Sjulian /* if we have other threads to run we will not return */ 725104031Sjulian if ((error = thread_consider_upcalling(td))) 726104031Sjulian return (error); /* coundn't go async , just go sync. */ 727103410Smini } 728103410Smini if (td->td_flags & TDF_UPCALLING) { 72999026Sjulian /* 730103410Smini * There is no more work to do and we are going to ride 731104031Sjulian * this thead/KSE up to userland as an upcall. 73299026Sjulian */ 733103410Smini CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 734103859Sjulian td, td->td_proc->p_pid, td->td_proc->p_comm); 73599026Sjulian 73699026Sjulian /* 737103410Smini * Set user context to the UTS. 73899026Sjulian */ 739104031Sjulian cpu_set_upcall_kse(td, ke); 740104031Sjulian 741104031Sjulian /* 742104031Sjulian * Put any completed mailboxes on this KSE's list. 743104031Sjulian */ 744104031Sjulian error = thread_link_mboxes(td->td_ksegrp, ke); 745103410Smini if (error) 746104031Sjulian goto bad; 74799026Sjulian 74899026Sjulian /* 749103410Smini * Set state and mailbox. 75099026Sjulian */ 751103410Smini td->td_flags &= ~TDF_UPCALLING; 752104031Sjulian#if 0 753104031Sjulian error = suword((caddr_t)ke->ke_mailbox + 754103410Smini offsetof(struct kse_mailbox, km_curthread), 755103410Smini 0); 756104031Sjulian#else /* if user pointer arithmetic is ok in the kernel */ 757104031Sjulian error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0); 758104031Sjulian#endif 759104031Sjulian if (error) 760104031Sjulian goto bad; 761103410Smini } 762103410Smini /* 763103410Smini * Stop any chance that we may be separated from 764103410Smini * the KSE we are currently on. This is "biting the bullet", 765103410Smini * we are committing to go to user space as as this KSE here. 766103410Smini */ 76799026Sjulian return (error); 768104031Sjulianbad: 769104031Sjulian /* 770104031Sjulian * Things are going to be so screwed we should just kill the process. 771104031Sjulian * how do we do that? 772104031Sjulian */ 773104031Sjulian panic ("thread_userret.. need to kill proc..... how?"); 77499026Sjulian} 77599026Sjulian 77699026Sjulian/* 77799026Sjulian * Enforce single-threading. 77899026Sjulian * 77999026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 78099026Sjulian * exit the process or similar). Process is locked! 78199026Sjulian * Returns 0 when you are successfully the only thread running. 78299026Sjulian * A process has successfully single threaded in the suspend mode when 78399026Sjulian * There are no threads in user mode. Threads in the kernel must be 78499026Sjulian * allowed to continue until they get to the user boundary. They may even 78599026Sjulian * copy out their return values and data before suspending. They may however be 78699026Sjulian * accellerated in reaching the user boundary as we will wake up 78799026Sjulian * any sleeping threads that are interruptable. (PCATCH). 78899026Sjulian */ 78999026Sjulianint 79099026Sjulianthread_single(int force_exit) 79199026Sjulian{ 79299026Sjulian struct thread *td; 79399026Sjulian struct thread *td2; 79499026Sjulian struct proc *p; 79599026Sjulian 79699026Sjulian td = curthread; 79799026Sjulian p = td->td_proc; 79899026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 79999026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 80099026Sjulian 80199026Sjulian if ((p->p_flag & P_KSES) == 0) 80299026Sjulian return (0); 80399026Sjulian 804100648Sjulian /* Is someone already single threading? */ 805100648Sjulian if (p->p_singlethread) 80699026Sjulian return (1); 80799026Sjulian 808102950Sdavidxu if (force_exit == SINGLE_EXIT) 80999026Sjulian p->p_flag |= P_SINGLE_EXIT; 81099026Sjulian else 81199026Sjulian p->p_flag &= ~P_SINGLE_EXIT; 812102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 81399026Sjulian p->p_singlethread = td; 81499026Sjulian while ((p->p_numthreads - p->p_suspcount) != 1) { 815103216Sjulian mtx_lock_spin(&sched_lock); 81699026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 81799026Sjulian if (td2 == td) 81899026Sjulian continue; 819103216Sjulian if (TD_IS_INHIBITED(td2)) { 820103216Sjulian if (TD_IS_SUSPENDED(td2)) { 821103216Sjulian if (force_exit == SINGLE_EXIT) { 822103216Sjulian thread_unsuspend_one(td2); 823103216Sjulian } 82499026Sjulian } 825103216Sjulian if ( TD_IS_SLEEPING(td2)) { 826103216Sjulian if (td2->td_flags & TDF_CVWAITQ) 827103216Sjulian cv_waitq_remove(td2); 828103216Sjulian else 829103216Sjulian unsleep(td2); 830103216Sjulian break; 831103216Sjulian } 832103216Sjulian if (TD_CAN_RUN(td2)) 833103216Sjulian setrunqueue(td2); 83499026Sjulian } 83599026Sjulian } 83699026Sjulian /* 83799026Sjulian * Wake us up when everyone else has suspended. 838100648Sjulian * In the mean time we suspend as well. 83999026Sjulian */ 840103216Sjulian thread_suspend_one(td); 84199026Sjulian mtx_unlock(&Giant); 84299026Sjulian PROC_UNLOCK(p); 84399026Sjulian mi_switch(); 84499026Sjulian mtx_unlock_spin(&sched_lock); 84599026Sjulian mtx_lock(&Giant); 84699026Sjulian PROC_LOCK(p); 84799026Sjulian } 84899026Sjulian return (0); 84999026Sjulian} 85099026Sjulian 85199026Sjulian/* 85299026Sjulian * Called in from locations that can safely check to see 85399026Sjulian * whether we have to suspend or at least throttle for a 85499026Sjulian * single-thread event (e.g. fork). 85599026Sjulian * 85699026Sjulian * Such locations include userret(). 85799026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 85899026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 85999026Sjulian * 86099026Sjulian * The 'return_instead' argument tells the function if it may do a 86199026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 86299026Sjulian * out instead. 86399026Sjulian * 86499026Sjulian * If the thread that set the single_threading request has set the 86599026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 86699026Sjulian * if 'return_instead' is false, but will exit. 86799026Sjulian * 86899026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 86999026Sjulian *---------------+--------------------+--------------------- 87099026Sjulian * 0 | returns 0 | returns 0 or 1 87199026Sjulian * | when ST ends | immediatly 87299026Sjulian *---------------+--------------------+--------------------- 87399026Sjulian * 1 | thread exits | returns 1 87499026Sjulian * | | immediatly 87599026Sjulian * 0 = thread_exit() or suspension ok, 87699026Sjulian * other = return error instead of stopping the thread. 87799026Sjulian * 87899026Sjulian * While a full suspension is under effect, even a single threading 87999026Sjulian * thread would be suspended if it made this call (but it shouldn't). 88099026Sjulian * This call should only be made from places where 88199026Sjulian * thread_exit() would be safe as that may be the outcome unless 88299026Sjulian * return_instead is set. 88399026Sjulian */ 88499026Sjulianint 88599026Sjulianthread_suspend_check(int return_instead) 88699026Sjulian{ 88799026Sjulian struct thread *td = curthread; 88899026Sjulian struct proc *p = td->td_proc; 88999026Sjulian 89099026Sjulian td = curthread; 89199026Sjulian p = td->td_proc; 89299026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 89399026Sjulian while (P_SHOULDSTOP(p)) { 894102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 89599026Sjulian KASSERT(p->p_singlethread != NULL, 89699026Sjulian ("singlethread not set")); 89799026Sjulian /* 898100648Sjulian * The only suspension in action is a 899100648Sjulian * single-threading. Single threader need not stop. 900100646Sjulian * XXX Should be safe to access unlocked 901100646Sjulian * as it can only be set to be true by us. 90299026Sjulian */ 903100648Sjulian if (p->p_singlethread == td) 90499026Sjulian return (0); /* Exempt from stopping. */ 90599026Sjulian } 906100648Sjulian if (return_instead) 90799026Sjulian return (1); 90899026Sjulian 90999026Sjulian /* 91099026Sjulian * If the process is waiting for us to exit, 91199026Sjulian * this thread should just suicide. 912102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 91399026Sjulian */ 91499026Sjulian if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 91599026Sjulian mtx_lock_spin(&sched_lock); 91699026Sjulian while (mtx_owned(&Giant)) 91799026Sjulian mtx_unlock(&Giant); 91899026Sjulian thread_exit(); 91999026Sjulian } 92099026Sjulian 92199026Sjulian /* 92299026Sjulian * When a thread suspends, it just 92399026Sjulian * moves to the processes's suspend queue 92499026Sjulian * and stays there. 92599026Sjulian * 92699026Sjulian * XXXKSE if TDF_BOUND is true 92799026Sjulian * it will not release it's KSE which might 92899026Sjulian * lead to deadlock if there are not enough KSEs 92999026Sjulian * to complete all waiting threads. 93099026Sjulian * Maybe be able to 'lend' it out again. 93199026Sjulian * (lent kse's can not go back to userland?) 93299026Sjulian * and can only be lent in STOPPED state. 93399026Sjulian */ 934102238Sjulian mtx_lock_spin(&sched_lock); 935102950Sdavidxu if ((p->p_flag & P_STOPPED_SIG) && 936102238Sjulian (p->p_suspcount+1 == p->p_numthreads)) { 937102238Sjulian mtx_unlock_spin(&sched_lock); 938102238Sjulian PROC_LOCK(p->p_pptr); 939102238Sjulian if ((p->p_pptr->p_procsig->ps_flag & 940102238Sjulian PS_NOCLDSTOP) == 0) { 941102238Sjulian psignal(p->p_pptr, SIGCHLD); 942102238Sjulian } 943102238Sjulian PROC_UNLOCK(p->p_pptr); 944103055Sjulian mtx_lock_spin(&sched_lock); 945102238Sjulian } 94699026Sjulian mtx_assert(&Giant, MA_NOTOWNED); 947103216Sjulian thread_suspend_one(td); 94899026Sjulian PROC_UNLOCK(p); 949102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 950100632Sjulian if (p->p_numthreads == p->p_suspcount) { 951103216Sjulian thread_unsuspend_one(p->p_singlethread); 952100632Sjulian } 953100632Sjulian } 954100594Sjulian p->p_stats->p_ru.ru_nivcsw++; 95599026Sjulian mi_switch(); 95699026Sjulian mtx_unlock_spin(&sched_lock); 95799026Sjulian PROC_LOCK(p); 95899026Sjulian } 95999026Sjulian return (0); 96099026Sjulian} 96199026Sjulian 962102898Sdavidxuvoid 963102898Sdavidxuthread_suspend_one(struct thread *td) 964102898Sdavidxu{ 965102898Sdavidxu struct proc *p = td->td_proc; 966102898Sdavidxu 967102898Sdavidxu mtx_assert(&sched_lock, MA_OWNED); 968102898Sdavidxu p->p_suspcount++; 969103216Sjulian TD_SET_SUSPENDED(td); 970102898Sdavidxu TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 971103216Sjulian /* 972103216Sjulian * Hack: If we are suspending but are on the sleep queue 973103216Sjulian * then we are in msleep or the cv equivalent. We 974103216Sjulian * want to look like we have two Inhibitors. 975103216Sjulian */ 976103216Sjulian if (TD_ON_SLEEPQ(td)) 977103216Sjulian TD_SET_SLEEPING(td); 978102898Sdavidxu} 979102898Sdavidxu 980102898Sdavidxuvoid 981102898Sdavidxuthread_unsuspend_one(struct thread *td) 982102898Sdavidxu{ 983102898Sdavidxu struct proc *p = td->td_proc; 984102898Sdavidxu 985102898Sdavidxu mtx_assert(&sched_lock, MA_OWNED); 986102898Sdavidxu TAILQ_REMOVE(&p->p_suspended, td, td_runq); 987103216Sjulian TD_CLR_SUSPENDED(td); 988102898Sdavidxu p->p_suspcount--; 989103216Sjulian setrunnable(td); 990102898Sdavidxu} 991102898Sdavidxu 99299026Sjulian/* 99399026Sjulian * Allow all threads blocked by single threading to continue running. 99499026Sjulian */ 99599026Sjulianvoid 99699026Sjulianthread_unsuspend(struct proc *p) 99799026Sjulian{ 99899026Sjulian struct thread *td; 99999026Sjulian 1000100646Sjulian mtx_assert(&sched_lock, MA_OWNED); 100199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 100299026Sjulian if (!P_SHOULDSTOP(p)) { 100399026Sjulian while (( td = TAILQ_FIRST(&p->p_suspended))) { 1004102898Sdavidxu thread_unsuspend_one(td); 100599026Sjulian } 1006102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 100799026Sjulian (p->p_numthreads == p->p_suspcount)) { 100899026Sjulian /* 100999026Sjulian * Stopping everything also did the job for the single 101099026Sjulian * threading request. Now we've downgraded to single-threaded, 101199026Sjulian * let it continue. 101299026Sjulian */ 1013102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 101499026Sjulian } 101599026Sjulian} 101699026Sjulian 101799026Sjulianvoid 101899026Sjulianthread_single_end(void) 101999026Sjulian{ 102099026Sjulian struct thread *td; 102199026Sjulian struct proc *p; 102299026Sjulian 102399026Sjulian td = curthread; 102499026Sjulian p = td->td_proc; 102599026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 1026102950Sdavidxu p->p_flag &= ~P_STOPPED_SINGLE; 102799026Sjulian p->p_singlethread = NULL; 1028102292Sjulian /* 1029102292Sjulian * If there are other threads they mey now run, 1030102292Sjulian * unless of course there is a blanket 'stop order' 1031102292Sjulian * on the process. The single threader must be allowed 1032102292Sjulian * to continue however as this is a bad place to stop. 1033102292Sjulian */ 1034102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 1035102292Sjulian mtx_lock_spin(&sched_lock); 1036102292Sjulian while (( td = TAILQ_FIRST(&p->p_suspended))) { 1037103216Sjulian thread_unsuspend_one(td); 1038102292Sjulian } 1039102292Sjulian mtx_unlock_spin(&sched_lock); 1040102292Sjulian } 104199026Sjulian} 104299026Sjulian 1043102292Sjulian 1044