kern_thread.c revision 170598
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29116182Sobrien#include <sys/cdefs.h> 30116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 170598 2007-06-12 07:24:46Z jeff $"); 31116182Sobrien 3299026Sjulian#include <sys/param.h> 3399026Sjulian#include <sys/systm.h> 3499026Sjulian#include <sys/kernel.h> 3599026Sjulian#include <sys/lock.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 38156705Sdavidxu#include <sys/resourcevar.h> 39130355Sjulian#include <sys/smp.h> 4099026Sjulian#include <sys/sysctl.h> 41107126Sjeff#include <sys/sched.h> 42126326Sjhb#include <sys/sleepqueue.h> 43122514Sjhb#include <sys/turnstile.h> 4499026Sjulian#include <sys/ktr.h> 45143149Sdavidxu#include <sys/umtx.h> 4699026Sjulian 47155195Srwatson#include <security/audit/audit.h> 48155195Srwatson 4999026Sjulian#include <vm/vm.h> 50116355Salc#include <vm/vm_extern.h> 5199026Sjulian#include <vm/uma.h> 5299026Sjulian 5399026Sjulian/* 54163709Sjb * thread related storage. 55163709Sjb */ 5699026Sjulianstatic uma_zone_t thread_zone; 5799026Sjulian 5899026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 5999026Sjulian 60130199Sjulianint max_threads_per_proc = 1500; 61107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 62103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 63103367Sjulian 64130199Sjulianint max_threads_hits; 65111115SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 66111115Sdavidxu &max_threads_hits, 0, ""); 67111115Sdavidxu 68163709Sjb#ifdef KSE 69130355Sjulianint virtual_cpu; 70111028Sjeff 71163709Sjb#endif 72111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 73170296Sjeffstruct mtx zombie_lock; 74170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7599026Sjulian 76170598Sjeffstatic void thread_zombie(struct thread *); 77170598Sjeff 78163709Sjb#ifdef KSE 79130355Sjulianstatic int 80130355Sjuliansysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 81130355Sjulian{ 82130355Sjulian int error, new_val; 83130355Sjulian int def_val; 84111028Sjeff 85130355Sjulian def_val = mp_ncpus; 86130355Sjulian if (virtual_cpu == 0) 87130355Sjulian new_val = def_val; 88130355Sjulian else 89130355Sjulian new_val = virtual_cpu; 90130355Sjulian error = sysctl_handle_int(oidp, &new_val, 0, req); 91133713Sjulian if (error != 0 || req->newptr == NULL) 92130355Sjulian return (error); 93130355Sjulian if (new_val < 0) 94130355Sjulian return (EINVAL); 95130355Sjulian virtual_cpu = new_val; 96130355Sjulian return (0); 97130355Sjulian} 98130355Sjulian 99130355Sjulian/* DEBUG ONLY */ 100130355SjulianSYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 101130355Sjulian 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 102130355Sjulian "debug virtual cpus"); 103163709Sjb#endif 104130355Sjulian 105127794Smarcelstruct mtx tid_lock; 106143802Sphkstatic struct unrhdr *tid_unrhdr; 107127794Smarcel 108127794Smarcel/* 109107719Sjulian * Prepare a thread for use. 11099026Sjulian */ 111132987Sgreenstatic int 112132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 11399026Sjulian{ 11499026Sjulian struct thread *td; 11599026Sjulian 11699026Sjulian td = (struct thread *)mem; 117103216Sjulian td->td_state = TDS_INACTIVE; 118135573Sjhb td->td_oncpu = NOCPU; 119130269Sjmallett 120143840Sphk td->td_tid = alloc_unr(tid_unrhdr); 121167352Smohans td->td_syscalls = 0; 122143840Sphk 123130269Sjmallett /* 124130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 125130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 126170296Sjeff * end of a context switch. 127130269Sjmallett */ 128118442Sjhb td->td_critnest = 1; 129155195Srwatson 130155195Srwatson#ifdef AUDIT 131155195Srwatson audit_thread_alloc(td); 132155195Srwatson#endif 133161678Sdavidxu umtx_thread_alloc(td); 134132987Sgreen return (0); 13599026Sjulian} 13699026Sjulian 13799026Sjulian/* 13899026Sjulian * Reclaim a thread after use. 13999026Sjulian */ 14099026Sjulianstatic void 14199026Sjulianthread_dtor(void *mem, int size, void *arg) 14299026Sjulian{ 143127794Smarcel struct thread *td; 14499026Sjulian 14599026Sjulian td = (struct thread *)mem; 14699026Sjulian 14799026Sjulian#ifdef INVARIANTS 14899026Sjulian /* Verify that this thread is in a safe state to free. */ 14999026Sjulian switch (td->td_state) { 150103216Sjulian case TDS_INHIBITED: 151103216Sjulian case TDS_RUNNING: 152103216Sjulian case TDS_CAN_RUN: 15399026Sjulian case TDS_RUNQ: 15499026Sjulian /* 15599026Sjulian * We must never unlink a thread that is in one of 15699026Sjulian * these states, because it is currently active. 15799026Sjulian */ 15899026Sjulian panic("bad state for thread unlinking"); 15999026Sjulian /* NOTREACHED */ 160103216Sjulian case TDS_INACTIVE: 16199026Sjulian break; 16299026Sjulian default: 16399026Sjulian panic("bad thread state"); 16499026Sjulian /* NOTREACHED */ 16599026Sjulian } 16699026Sjulian#endif 167155353Srwatson#ifdef AUDIT 168155353Srwatson audit_thread_free(td); 169155353Srwatson#endif 170143840Sphk free_unr(tid_unrhdr, td->td_tid); 171134791Sjulian sched_newthread(td); 17299026Sjulian} 17399026Sjulian 17499026Sjulian/* 17599026Sjulian * Initialize type-stable parts of a thread (when newly created). 17699026Sjulian */ 177132987Sgreenstatic int 178132987Sgreenthread_init(void *mem, int size, int flags) 17999026Sjulian{ 180131149Smarcel struct thread *td; 18199026Sjulian 18299026Sjulian td = (struct thread *)mem; 183131149Smarcel 184116355Salc vm_thread_new(td, 0); 18599026Sjulian cpu_thread_setup(td); 186126326Sjhb td->td_sleepqueue = sleepq_alloc(); 187122514Sjhb td->td_turnstile = turnstile_alloc(); 188107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 189134791Sjulian sched_newthread(td); 190161678Sdavidxu umtx_thread_init(td); 191132987Sgreen return (0); 19299026Sjulian} 19399026Sjulian 19499026Sjulian/* 19599026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 19699026Sjulian */ 19799026Sjulianstatic void 19899026Sjulianthread_fini(void *mem, int size) 19999026Sjulian{ 200131149Smarcel struct thread *td; 20199026Sjulian 20299026Sjulian td = (struct thread *)mem; 203122514Sjhb turnstile_free(td->td_turnstile); 204126326Sjhb sleepq_free(td->td_sleepqueue); 205161678Sdavidxu umtx_thread_fini(td); 206116355Salc vm_thread_dispose(td); 20799026Sjulian} 208111028Sjeff 209107126Sjeff/* 210111028Sjeff * For a newly created process, 211111028Sjeff * link up all the structures and its initial threads etc. 212134791Sjulian * called from: 213134791Sjulian * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 214134791Sjulian * proc_dtor() (should go away) 215134791Sjulian * proc_init() 216105854Sjulian */ 217105854Sjulianvoid 218163709Sjbproc_linkup(struct proc *p, struct thread *td) 219105854Sjulian{ 220170296Sjeff 221105854Sjulian TAILQ_INIT(&p->p_threads); /* all threads in proc */ 222164936Sjulian TAILQ_INIT(&p->p_upcalls); /* upcall list */ 223151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 224153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 225153253Sdavidxu if (p->p_ksi != NULL) { 226153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 227153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 228152185Sdavidxu } 229152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 230105854Sjulian p->p_numthreads = 0; 231163709Sjb thread_link(td, p); 232105854Sjulian} 233105854Sjulian 234111028Sjeff/* 23599026Sjulian * Initialize global thread allocation resources. 23699026Sjulian */ 23799026Sjulianvoid 23899026Sjulianthreadinit(void) 23999026Sjulian{ 24099026Sjulian 241143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 242143802Sphk tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock); 243143802Sphk 244107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 24599026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 246167944Sjhb 16 - 1, 0); 247163709Sjb#ifdef KSE 248134791Sjulian kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 249163709Sjb#endif 25099026Sjulian} 25199026Sjulian 25299026Sjulian/* 253170598Sjeff * Place an unused thread on the zombie list. 254164936Sjulian * Use the slpq as that must be unused by now. 25599026Sjulian */ 25699026Sjulianvoid 257170598Sjeffthread_zombie(struct thread *td) 25899026Sjulian{ 259170296Sjeff mtx_lock_spin(&zombie_lock); 260164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 261170296Sjeff mtx_unlock_spin(&zombie_lock); 26299026Sjulian} 26399026Sjulian 264103410Smini/* 265170598Sjeff * Release a thread that has exited after cpu_throw(). 266170598Sjeff */ 267170598Sjeffvoid 268170598Sjeffthread_stash(struct thread *td) 269170598Sjeff{ 270170598Sjeff atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 271170598Sjeff thread_zombie(td); 272170598Sjeff} 273170598Sjeff 274170598Sjeff/* 275111028Sjeff * Reap zombie kse resource. 27699026Sjulian */ 27799026Sjulianvoid 27899026Sjulianthread_reap(void) 27999026Sjulian{ 280105854Sjulian struct thread *td_first, *td_next; 28199026Sjulian 28299026Sjulian /* 283111028Sjeff * Don't even bother to lock if none at this instant, 284111028Sjeff * we really don't care about the next instant.. 28599026Sjulian */ 286163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 287170296Sjeff mtx_lock_spin(&zombie_lock); 288105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 289105854Sjulian if (td_first) 290105854Sjulian TAILQ_INIT(&zombie_threads); 291170296Sjeff mtx_unlock_spin(&zombie_lock); 292105854Sjulian while (td_first) { 293164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 294111028Sjeff if (td_first->td_ucred) 295111028Sjeff crfree(td_first->td_ucred); 296105854Sjulian thread_free(td_first); 297105854Sjulian td_first = td_next; 29899026Sjulian } 29999026Sjulian } 30099026Sjulian} 30199026Sjulian 30299026Sjulian/* 30399026Sjulian * Allocate a thread. 30499026Sjulian */ 30599026Sjulianstruct thread * 30699026Sjulianthread_alloc(void) 30799026Sjulian{ 308163709Sjb 30999026Sjulian thread_reap(); /* check if any zombies to get */ 310111119Simp return (uma_zalloc(thread_zone, M_WAITOK)); 31199026Sjulian} 31299026Sjulian 313103367Sjulian 314103367Sjulian/* 31599026Sjulian * Deallocate a thread. 31699026Sjulian */ 31799026Sjulianvoid 31899026Sjulianthread_free(struct thread *td) 31999026Sjulian{ 320107719Sjulian 321107719Sjulian cpu_thread_clean(td); 32299026Sjulian uma_zfree(thread_zone, td); 32399026Sjulian} 32499026Sjulian 32599026Sjulian/* 32699026Sjulian * Discard the current thread and exit from its context. 327130355Sjulian * Always called with scheduler locked. 32899026Sjulian * 32999026Sjulian * Because we can't free a thread while we're operating under its context, 330107719Sjulian * push the current thread into our CPU's deadthread holder. This means 331107719Sjulian * we needn't worry about someone else grabbing our context before we 332130355Sjulian * do a cpu_throw(). This may not be needed now as we are under schedlock. 333130355Sjulian * Maybe we can just do a thread_stash() as thr_exit1 does. 33499026Sjulian */ 335130355Sjulian/* XXX 336130355Sjulian * libthr expects its thread exit to return for the last 337130355Sjulian * thread, meaning that the program is back to non-threaded 338130355Sjulian * mode I guess. Because we do this (cpu_throw) unconditionally 339130355Sjulian * here, they have their own version of it. (thr_exit1()) 340130355Sjulian * that doesn't do it all if this was the last thread. 341130355Sjulian * It is also called from thread_suspend_check(). 342130355Sjulian * Of course in the end, they end up coming here through exit1 343130355Sjulian * anyhow.. After fixing 'thr' to play by the rules we should be able 344130355Sjulian * to merge these two functions together. 345134791Sjulian * 346134791Sjulian * called from: 347134791Sjulian * exit1() 348134791Sjulian * kse_exit() 349134791Sjulian * thr_exit() 350163709Sjb * ifdef KSE 351134791Sjulian * thread_user_enter() 352134791Sjulian * thread_userret() 353163709Sjb * endif 354134791Sjulian * thread_suspend_check() 355130355Sjulian */ 35699026Sjulianvoid 35799026Sjulianthread_exit(void) 35899026Sjulian{ 359156705Sdavidxu uint64_t new_switchtime; 36099026Sjulian struct thread *td; 361170174Sjeff struct thread *td2; 36299026Sjulian struct proc *p; 36399026Sjulian 36499026Sjulian td = curthread; 36599026Sjulian p = td->td_proc; 36699026Sjulian 367170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 368134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 369170296Sjeff 370134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 371102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 372133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 373133234Srwatson (long)p->p_pid, p->p_comm); 374151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 37599026Sjulian 376155376Srwatson#ifdef AUDIT 377155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 378155376Srwatson#endif 379155376Srwatson 380163709Sjb#ifdef KSE 381104695Sjulian if (td->td_standin != NULL) { 382134791Sjulian /* 383134791Sjulian * Note that we don't need to free the cred here as it 384134791Sjulian * is done in thread_reap(). 385134791Sjulian */ 386170598Sjeff thread_zombie(td->td_standin); 387104695Sjulian td->td_standin = NULL; 388104695Sjulian } 389163709Sjb#endif 390104695Sjulian 391161678Sdavidxu umtx_thread_exit(td); 392161678Sdavidxu 393134791Sjulian /* 394134791Sjulian * drop FPU & debug register state storage, or any other 395134791Sjulian * architecture specific resources that 396134791Sjulian * would not be on a new untouched process. 397134791Sjulian */ 39899026Sjulian cpu_thread_exit(td); /* XXXSMP */ 39999026Sjulian 400156705Sdavidxu /* Do the same timestamp bookkeeping that mi_switch() would do. */ 401156705Sdavidxu new_switchtime = cpu_ticks(); 402156705Sdavidxu p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 403156705Sdavidxu PCPU_SET(switchtime, new_switchtime); 404156705Sdavidxu PCPU_SET(switchticks, ticks); 405170292Sattilio PCPU_INC(cnt.v_swtch); 406170466Sattilio /* Save our resource usage in our process. */ 407170466Sattilio td->td_ru.ru_nvcsw++; 408170466Sattilio rucollect(&p->p_ru, &td->td_ru); 409134791Sjulian /* 410103002Sjulian * The last thread is left attached to the process 411103002Sjulian * So that the whole bundle gets recycled. Skip 412134791Sjulian * all this stuff if we never had threads. 413134791Sjulian * EXIT clears all sign of other threads when 414134791Sjulian * it goes to single threading, so the last thread always 415134791Sjulian * takes the short path. 416102581Sjulian */ 417134791Sjulian if (p->p_flag & P_HADTHREADS) { 418134791Sjulian if (p->p_numthreads > 1) { 419170296Sjeff thread_lock(td); 420170296Sjeff#ifdef KSE 421170296Sjeff kse_unlink(td); 422170296Sjeff#else 423134791Sjulian thread_unlink(td); 424170296Sjeff#endif 425170296Sjeff thread_unlock(td); 426170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 427170174Sjeff sched_exit_thread(td2, td); 428134791Sjulian 429134791Sjulian /* 430134791Sjulian * The test below is NOT true if we are the 431134791Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 432134791Sjulian * in exit1() after it is the only survivor. 433134791Sjulian */ 434134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 435134791Sjulian if (p->p_numthreads == p->p_suspcount) { 436170296Sjeff thread_lock(p->p_singlethread); 437134791Sjulian thread_unsuspend_one(p->p_singlethread); 438170296Sjeff thread_unlock(p->p_singlethread); 439134791Sjulian } 440103002Sjulian } 441104695Sjulian 442163709Sjb#ifdef KSE 443134791Sjulian /* 444134791Sjulian * Because each upcall structure has an owner thread, 445134791Sjulian * owner thread exits only when process is in exiting 446134791Sjulian * state, so upcall to userland is no longer needed, 447134791Sjulian * deleting upcall structure is safe here. 448134791Sjulian * So when all threads in a group is exited, all upcalls 449134791Sjulian * in the group should be automatically freed. 450134791Sjulian * XXXKSE This is a KSE thing and should be exported 451134791Sjulian * there somehow. 452134791Sjulian */ 453111028Sjeff upcall_remove(td); 454164936Sjulian#endif 455170598Sjeff atomic_add_int(&td->td_proc->p_exitthreads, 1); 456134791Sjulian PCPU_SET(deadthread, td); 457134791Sjulian } else { 458134791Sjulian /* 459134791Sjulian * The last thread is exiting.. but not through exit() 460134791Sjulian * what should we do? 461134791Sjulian * Theoretically this can't happen 462134791Sjulian * exit1() - clears threading flags before coming here 463134791Sjulian * kse_exit() - treats last thread specially 464134791Sjulian * thr_exit() - treats last thread specially 465163709Sjb * ifdef KSE 466134791Sjulian * thread_user_enter() - only if more exist 467134791Sjulian * thread_userret() - only if more exist 468163709Sjb * endif 469134791Sjulian * thread_suspend_check() - only if more exist 470134791Sjulian */ 471134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 472119488Sdavidxu } 473170296Sjeff } 474170296Sjeff PROC_UNLOCK(p); 475170296Sjeff thread_lock(td); 476170466Sattilio /* Save our tick information with both the thread and proc locked */ 477170296Sjeff ruxagg(&p->p_rux, td); 478170296Sjeff PROC_SUNLOCK(p); 479133396Sjulian td->td_state = TDS_INACTIVE; 480133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 481170296Sjeff sched_throw(td); 482112993Speter panic("I'm a teapot!"); 48399026Sjulian /* NOTREACHED */ 48499026Sjulian} 48599026Sjulian 486124350Sschweikh/* 487107719Sjulian * Do any thread specific cleanups that may be needed in wait() 488126932Speter * called with Giant, proc and schedlock not held. 489107719Sjulian */ 490107719Sjulianvoid 491107719Sjulianthread_wait(struct proc *p) 492107719Sjulian{ 493107719Sjulian struct thread *td; 494107719Sjulian 495126932Speter mtx_assert(&Giant, MA_NOTOWNED); 496124350Sschweikh KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 497170598Sjeff td = FIRST_THREAD_IN_PROC(p); 498163709Sjb#ifdef KSE 499170598Sjeff if (td->td_standin != NULL) { 500170598Sjeff if (td->td_standin->td_ucred != NULL) { 501170598Sjeff crfree(td->td_standin->td_ucred); 502170598Sjeff td->td_standin->td_ucred = NULL; 503107719Sjulian } 504170598Sjeff thread_free(td->td_standin); 505170598Sjeff td->td_standin = NULL; 506170598Sjeff } 507163709Sjb#endif 508170598Sjeff /* Lock the last thread so we spin until it exits cpu_throw(). */ 509170598Sjeff thread_lock(td); 510170598Sjeff thread_unlock(td); 511170598Sjeff /* Wait for any remaining threads to exit cpu_throw(). */ 512170598Sjeff while (p->p_exitthreads) 513170598Sjeff sched_relinquish(curthread); 514170598Sjeff cpu_thread_clean(td); 515170598Sjeff crfree(td->td_ucred); 516107719Sjulian thread_reap(); /* check for zombie threads etc. */ 517107719Sjulian} 518107719Sjulian 51999026Sjulian/* 52099026Sjulian * Link a thread to a process. 521103002Sjulian * set up anything that needs to be initialized for it to 522103002Sjulian * be used by the process. 52399026Sjulian * 52499026Sjulian * Note that we do not link to the proc's ucred here. 52599026Sjulian * The thread is linked as if running but no KSE assigned. 526134791Sjulian * Called from: 527134791Sjulian * proc_linkup() 528134791Sjulian * thread_schedule_upcall() 529134791Sjulian * thr_create() 53099026Sjulian */ 53199026Sjulianvoid 532163709Sjbthread_link(struct thread *td, struct proc *p) 53399026Sjulian{ 53499026Sjulian 535170296Sjeff /* 536170296Sjeff * XXX This can't be enabled because it's called for proc0 before 537170296Sjeff * it's spinlock has been created. 538170296Sjeff * PROC_SLOCK_ASSERT(p, MA_OWNED); 539170296Sjeff */ 540111028Sjeff td->td_state = TDS_INACTIVE; 541111028Sjeff td->td_proc = p; 542111028Sjeff td->td_flags = 0; 54399026Sjulian 544103002Sjulian LIST_INIT(&td->td_contested); 545151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 546119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 54799026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 54899026Sjulian p->p_numthreads++; 54999026Sjulian} 55099026Sjulian 551134791Sjulian/* 552136160Sjulian * Convert a process with one thread to an unthreaded process. 553134791Sjulian * Called from: 554136160Sjulian * thread_single(exit) (called from execve and exit) 555136160Sjulian * kse_exit() XXX may need cleaning up wrt KSE stuff 556136160Sjulian */ 557136160Sjulianvoid 558136160Sjulianthread_unthread(struct thread *td) 559136160Sjulian{ 560136160Sjulian struct proc *p = td->td_proc; 561136160Sjulian 562136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 563163709Sjb#ifdef KSE 564136160Sjulian upcall_remove(td); 565136160Sjulian p->p_flag &= ~(P_SA|P_HADTHREADS); 566136160Sjulian td->td_mailbox = NULL; 567136160Sjulian td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 568136160Sjulian if (td->td_standin != NULL) { 569170598Sjeff thread_zombie(td->td_standin); 570136160Sjulian td->td_standin = NULL; 571136160Sjulian } 572164936Sjulian sched_set_concurrency(p, 1); 573163709Sjb#else 574163709Sjb p->p_flag &= ~P_HADTHREADS; 575163709Sjb#endif 576136160Sjulian} 577136160Sjulian 578136160Sjulian/* 579136160Sjulian * Called from: 580134791Sjulian * thread_exit() 581134791Sjulian */ 582113641Sjulianvoid 583113641Sjulianthread_unlink(struct thread *td) 584124350Sschweikh{ 585113641Sjulian struct proc *p = td->td_proc; 586113920Sjhb 587170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 588113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 589113641Sjulian p->p_numthreads--; 590113641Sjulian /* could clear a few other things here */ 591163709Sjb /* Must NOT clear links to proc! */ 592124350Sschweikh} 593113641Sjulian 594111028Sjeff/* 59599026Sjulian * Enforce single-threading. 59699026Sjulian * 59799026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 59899026Sjulian * exit the process or similar). Process is locked! 59999026Sjulian * Returns 0 when you are successfully the only thread running. 60099026Sjulian * A process has successfully single threaded in the suspend mode when 60199026Sjulian * There are no threads in user mode. Threads in the kernel must be 60299026Sjulian * allowed to continue until they get to the user boundary. They may even 60399026Sjulian * copy out their return values and data before suspending. They may however be 604160048Smaxim * accelerated in reaching the user boundary as we will wake up 60599026Sjulian * any sleeping threads that are interruptable. (PCATCH). 60699026Sjulian */ 60799026Sjulianint 608136177Sdavidxuthread_single(int mode) 60999026Sjulian{ 61099026Sjulian struct thread *td; 61199026Sjulian struct thread *td2; 61299026Sjulian struct proc *p; 613130674Sdavidxu int remaining; 61499026Sjulian 61599026Sjulian td = curthread; 61699026Sjulian p = td->td_proc; 617126932Speter mtx_assert(&Giant, MA_NOTOWNED); 61899026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 61999026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 62099026Sjulian 621134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 62299026Sjulian return (0); 62399026Sjulian 624100648Sjulian /* Is someone already single threading? */ 625136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 62699026Sjulian return (1); 62799026Sjulian 628136177Sdavidxu if (mode == SINGLE_EXIT) { 629136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 630136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 631136177Sdavidxu } else { 632136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 633136177Sdavidxu if (mode == SINGLE_BOUNDARY) 634136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 635136177Sdavidxu else 636136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 637136177Sdavidxu } 638102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 639170296Sjeff PROC_SLOCK(p); 64099026Sjulian p->p_singlethread = td; 641136177Sdavidxu if (mode == SINGLE_EXIT) 642130674Sdavidxu remaining = p->p_numthreads; 643136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 644136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 645136177Sdavidxu else 646130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 647130674Sdavidxu while (remaining != 1) { 648156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 649156942Sdavidxu goto stopme; 65099026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 65199026Sjulian if (td2 == td) 65299026Sjulian continue; 653170296Sjeff thread_lock(td2); 654113705Sdavidxu td2->td_flags |= TDF_ASTPENDING; 655103216Sjulian if (TD_IS_INHIBITED(td2)) { 656136177Sdavidxu switch (mode) { 657136177Sdavidxu case SINGLE_EXIT: 658132087Sdavidxu if (td->td_flags & TDF_DBSUSPEND) 659132087Sdavidxu td->td_flags &= ~TDF_DBSUSPEND; 660136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 661103216Sjulian thread_unsuspend_one(td2); 662105911Sjulian if (TD_ON_SLEEPQ(td2) && 663136177Sdavidxu (td2->td_flags & TDF_SINTR)) 664155741Sdavidxu sleepq_abort(td2, EINTR); 665136177Sdavidxu break; 666136177Sdavidxu case SINGLE_BOUNDARY: 667136177Sdavidxu if (TD_IS_SUSPENDED(td2) && 668136177Sdavidxu !(td2->td_flags & TDF_BOUNDARY)) 669136177Sdavidxu thread_unsuspend_one(td2); 670136177Sdavidxu if (TD_ON_SLEEPQ(td2) && 671136177Sdavidxu (td2->td_flags & TDF_SINTR)) 672155741Sdavidxu sleepq_abort(td2, ERESTART); 673136177Sdavidxu break; 674136177Sdavidxu default: 675170296Sjeff if (TD_IS_SUSPENDED(td2)) { 676170296Sjeff thread_unlock(td2); 677105874Sdavidxu continue; 678170296Sjeff } 679111028Sjeff /* 680165693Srwatson * maybe other inhibited states too? 681111028Sjeff */ 682137281Sdavidxu if ((td2->td_flags & TDF_SINTR) && 683137281Sdavidxu (td2->td_inhibitors & 684137281Sdavidxu (TDI_SLEEPING | TDI_SWAPPED))) 685105911Sjulian thread_suspend_one(td2); 686136177Sdavidxu break; 68799026Sjulian } 68899026Sjulian } 689155594Sdavidxu#ifdef SMP 690155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 691155594Sdavidxu forward_signal(td2); 692155594Sdavidxu } 693155594Sdavidxu#endif 694170296Sjeff thread_unlock(td2); 69599026Sjulian } 696136177Sdavidxu if (mode == SINGLE_EXIT) 697130674Sdavidxu remaining = p->p_numthreads; 698136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 699136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 700130674Sdavidxu else 701130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 702130674Sdavidxu 703124350Sschweikh /* 704124350Sschweikh * Maybe we suspended some threads.. was it enough? 705105911Sjulian */ 706130674Sdavidxu if (remaining == 1) 707105911Sjulian break; 708105911Sjulian 709156942Sdavidxustopme: 71099026Sjulian /* 71199026Sjulian * Wake us up when everyone else has suspended. 712100648Sjulian * In the mean time we suspend as well. 71399026Sjulian */ 714170296Sjeff thread_suspend_switch(td); 715136177Sdavidxu if (mode == SINGLE_EXIT) 716130674Sdavidxu remaining = p->p_numthreads; 717136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 718136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 719130674Sdavidxu else 720130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 72199026Sjulian } 722136177Sdavidxu if (mode == SINGLE_EXIT) { 723135269Sjulian /* 724135269Sjulian * We have gotten rid of all the other threads and we 725135269Sjulian * are about to either exit or exec. In either case, 726135269Sjulian * we try our utmost to revert to being a non-threaded 727135269Sjulian * process. 728135269Sjulian */ 729136160Sjulian p->p_singlethread = NULL; 730137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 731136160Sjulian thread_unthread(td); 732111028Sjeff } 733170296Sjeff PROC_SUNLOCK(p); 73499026Sjulian return (0); 73599026Sjulian} 73699026Sjulian 73799026Sjulian/* 73899026Sjulian * Called in from locations that can safely check to see 73999026Sjulian * whether we have to suspend or at least throttle for a 74099026Sjulian * single-thread event (e.g. fork). 74199026Sjulian * 74299026Sjulian * Such locations include userret(). 74399026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 74499026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 74599026Sjulian * 74699026Sjulian * The 'return_instead' argument tells the function if it may do a 74799026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 74899026Sjulian * out instead. 74999026Sjulian * 75099026Sjulian * If the thread that set the single_threading request has set the 75199026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 75299026Sjulian * if 'return_instead' is false, but will exit. 75399026Sjulian * 75499026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 75599026Sjulian *---------------+--------------------+--------------------- 75699026Sjulian * 0 | returns 0 | returns 0 or 1 75799026Sjulian * | when ST ends | immediatly 75899026Sjulian *---------------+--------------------+--------------------- 75999026Sjulian * 1 | thread exits | returns 1 76099026Sjulian * | | immediatly 76199026Sjulian * 0 = thread_exit() or suspension ok, 76299026Sjulian * other = return error instead of stopping the thread. 76399026Sjulian * 76499026Sjulian * While a full suspension is under effect, even a single threading 76599026Sjulian * thread would be suspended if it made this call (but it shouldn't). 76699026Sjulian * This call should only be made from places where 767124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 76899026Sjulian * return_instead is set. 76999026Sjulian */ 77099026Sjulianint 77199026Sjulianthread_suspend_check(int return_instead) 77299026Sjulian{ 773104502Sjmallett struct thread *td; 774104502Sjmallett struct proc *p; 77599026Sjulian 77699026Sjulian td = curthread; 77799026Sjulian p = td->td_proc; 778126932Speter mtx_assert(&Giant, MA_NOTOWNED); 77999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 780132087Sdavidxu while (P_SHOULDSTOP(p) || 781132087Sdavidxu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 782102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 78399026Sjulian KASSERT(p->p_singlethread != NULL, 78499026Sjulian ("singlethread not set")); 78599026Sjulian /* 786100648Sjulian * The only suspension in action is a 787100648Sjulian * single-threading. Single threader need not stop. 788124350Sschweikh * XXX Should be safe to access unlocked 789100646Sjulian * as it can only be set to be true by us. 79099026Sjulian */ 791100648Sjulian if (p->p_singlethread == td) 79299026Sjulian return (0); /* Exempt from stopping. */ 793124350Sschweikh } 794134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 795155741Sdavidxu return (EINTR); 79699026Sjulian 797136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 798136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 799136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 800155741Sdavidxu return (ERESTART); 801136177Sdavidxu 802151316Sdavidxu /* If thread will exit, flush its pending signals */ 803151316Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 804151316Sdavidxu sigqueue_flush(&td->td_sigqueue); 805151316Sdavidxu 806170296Sjeff PROC_SLOCK(p); 807112071Sdavidxu thread_stopped(p); 80899026Sjulian /* 80999026Sjulian * If the process is waiting for us to exit, 81099026Sjulian * this thread should just suicide. 811102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 81299026Sjulian */ 813136177Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 814134791Sjulian thread_exit(); 815170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 816170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 817170296Sjeff thread_lock(p->p_singlethread); 818170296Sjeff thread_unsuspend_one(p->p_singlethread); 819170296Sjeff thread_unlock(p->p_singlethread); 820170296Sjeff } 821170296Sjeff } 822170296Sjeff PROC_UNLOCK(p); 823170296Sjeff thread_lock(td); 82499026Sjulian /* 82599026Sjulian * When a thread suspends, it just 826164936Sjulian * gets taken off all queues. 82799026Sjulian */ 828103216Sjulian thread_suspend_one(td); 829136177Sdavidxu if (return_instead == 0) { 830136177Sdavidxu p->p_boundary_count++; 831136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 832136177Sdavidxu } 833170296Sjeff PROC_SUNLOCK(p); 834131473Sjhb mi_switch(SW_INVOL, NULL); 835170296Sjeff if (return_instead == 0) 836136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 837170296Sjeff thread_unlock(td); 83899026Sjulian PROC_LOCK(p); 839170296Sjeff if (return_instead == 0) 840170296Sjeff p->p_boundary_count--; 84199026Sjulian } 84299026Sjulian return (0); 84399026Sjulian} 84499026Sjulian 845102898Sdavidxuvoid 846170296Sjeffthread_suspend_switch(struct thread *td) 847170296Sjeff{ 848170296Sjeff struct proc *p; 849170296Sjeff 850170296Sjeff p = td->td_proc; 851170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 852170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 853170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 854170296Sjeff /* 855170296Sjeff * We implement thread_suspend_one in stages here to avoid 856170296Sjeff * dropping the proc lock while the thread lock is owned. 857170296Sjeff */ 858170296Sjeff thread_stopped(p); 859170296Sjeff p->p_suspcount++; 860170296Sjeff PROC_UNLOCK(p); 861170296Sjeff thread_lock(td); 862170296Sjeff TD_SET_SUSPENDED(td); 863170296Sjeff PROC_SUNLOCK(p); 864170296Sjeff DROP_GIANT(); 865170296Sjeff mi_switch(SW_VOL, NULL); 866170296Sjeff thread_unlock(td); 867170296Sjeff PICKUP_GIANT(); 868170296Sjeff PROC_LOCK(p); 869170296Sjeff PROC_SLOCK(p); 870170296Sjeff} 871170296Sjeff 872170296Sjeffvoid 873102898Sdavidxuthread_suspend_one(struct thread *td) 874102898Sdavidxu{ 875102898Sdavidxu struct proc *p = td->td_proc; 876102898Sdavidxu 877170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 878170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 879112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 880102898Sdavidxu p->p_suspcount++; 881103216Sjulian TD_SET_SUSPENDED(td); 882102898Sdavidxu} 883102898Sdavidxu 884102898Sdavidxuvoid 885102898Sdavidxuthread_unsuspend_one(struct thread *td) 886102898Sdavidxu{ 887102898Sdavidxu struct proc *p = td->td_proc; 888102898Sdavidxu 889170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 890170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 891164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 892103216Sjulian TD_CLR_SUSPENDED(td); 893102898Sdavidxu p->p_suspcount--; 894103216Sjulian setrunnable(td); 895102898Sdavidxu} 896102898Sdavidxu 89799026Sjulian/* 89899026Sjulian * Allow all threads blocked by single threading to continue running. 89999026Sjulian */ 90099026Sjulianvoid 90199026Sjulianthread_unsuspend(struct proc *p) 90299026Sjulian{ 90399026Sjulian struct thread *td; 90499026Sjulian 90599026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 906170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 90799026Sjulian if (!P_SHOULDSTOP(p)) { 908164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 909170296Sjeff thread_lock(td); 910164936Sjulian if (TD_IS_SUSPENDED(td)) { 911164936Sjulian thread_unsuspend_one(td); 912164936Sjulian } 913170296Sjeff thread_unlock(td); 91499026Sjulian } 915102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 91699026Sjulian (p->p_numthreads == p->p_suspcount)) { 91799026Sjulian /* 91899026Sjulian * Stopping everything also did the job for the single 91999026Sjulian * threading request. Now we've downgraded to single-threaded, 92099026Sjulian * let it continue. 92199026Sjulian */ 922170296Sjeff thread_lock(p->p_singlethread); 923102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 924170296Sjeff thread_unlock(p->p_singlethread); 92599026Sjulian } 92699026Sjulian} 92799026Sjulian 928134791Sjulian/* 929134791Sjulian * End the single threading mode.. 930134791Sjulian */ 93199026Sjulianvoid 93299026Sjulianthread_single_end(void) 93399026Sjulian{ 93499026Sjulian struct thread *td; 93599026Sjulian struct proc *p; 93699026Sjulian 93799026Sjulian td = curthread; 93899026Sjulian p = td->td_proc; 93999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 940136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 941170296Sjeff PROC_SLOCK(p); 94299026Sjulian p->p_singlethread = NULL; 943102292Sjulian /* 944102292Sjulian * If there are other threads they mey now run, 945102292Sjulian * unless of course there is a blanket 'stop order' 946102292Sjulian * on the process. The single threader must be allowed 947102292Sjulian * to continue however as this is a bad place to stop. 948102292Sjulian */ 949102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 950164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 951170296Sjeff thread_lock(td); 952164936Sjulian if (TD_IS_SUSPENDED(td)) { 953164936Sjulian thread_unsuspend_one(td); 954164936Sjulian } 955170296Sjeff thread_unlock(td); 956102292Sjulian } 957102292Sjulian } 958170296Sjeff PROC_SUNLOCK(p); 95999026Sjulian} 960128721Sdeischen 961151990Sdavidxustruct thread * 962151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 963151990Sdavidxu{ 964151990Sdavidxu struct thread *td; 965151990Sdavidxu 966151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 967170296Sjeff PROC_SLOCK(p); 968151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 969151990Sdavidxu if (td->td_tid == tid) 970151990Sdavidxu break; 971151990Sdavidxu } 972170296Sjeff PROC_SUNLOCK(p); 973151990Sdavidxu return (td); 974151990Sdavidxu} 975