kern_thread.c revision 170296
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29116182Sobrien#include <sys/cdefs.h> 30116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 170296 2007-06-04 23:52:24Z jeff $"); 31116182Sobrien 3299026Sjulian#include <sys/param.h> 3399026Sjulian#include <sys/systm.h> 3499026Sjulian#include <sys/kernel.h> 3599026Sjulian#include <sys/lock.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 38156705Sdavidxu#include <sys/resourcevar.h> 39130355Sjulian#include <sys/smp.h> 4099026Sjulian#include <sys/sysctl.h> 41107126Sjeff#include <sys/sched.h> 42126326Sjhb#include <sys/sleepqueue.h> 43122514Sjhb#include <sys/turnstile.h> 4499026Sjulian#include <sys/ktr.h> 45143149Sdavidxu#include <sys/umtx.h> 4699026Sjulian 47155195Srwatson#include <security/audit/audit.h> 48155195Srwatson 4999026Sjulian#include <vm/vm.h> 50116355Salc#include <vm/vm_extern.h> 5199026Sjulian#include <vm/uma.h> 5299026Sjulian 5399026Sjulian/* 54163709Sjb * thread related storage. 55163709Sjb */ 5699026Sjulianstatic uma_zone_t thread_zone; 5799026Sjulian 5899026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 5999026Sjulian 60130199Sjulianint max_threads_per_proc = 1500; 61107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 62103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 63103367Sjulian 64130199Sjulianint max_threads_hits; 65111115SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 66111115Sdavidxu &max_threads_hits, 0, ""); 67111115Sdavidxu 68163709Sjb#ifdef KSE 69130355Sjulianint virtual_cpu; 70111028Sjeff 71163709Sjb#endif 72111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 73170296Sjeffstruct mtx zombie_lock; 74170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7599026Sjulian 76163709Sjb#ifdef KSE 77130355Sjulianstatic int 78130355Sjuliansysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 79130355Sjulian{ 80130355Sjulian int error, new_val; 81130355Sjulian int def_val; 82111028Sjeff 83130355Sjulian def_val = mp_ncpus; 84130355Sjulian if (virtual_cpu == 0) 85130355Sjulian new_val = def_val; 86130355Sjulian else 87130355Sjulian new_val = virtual_cpu; 88130355Sjulian error = sysctl_handle_int(oidp, &new_val, 0, req); 89133713Sjulian if (error != 0 || req->newptr == NULL) 90130355Sjulian return (error); 91130355Sjulian if (new_val < 0) 92130355Sjulian return (EINVAL); 93130355Sjulian virtual_cpu = new_val; 94130355Sjulian return (0); 95130355Sjulian} 96130355Sjulian 97130355Sjulian/* DEBUG ONLY */ 98130355SjulianSYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 99130355Sjulian 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 100130355Sjulian "debug virtual cpus"); 101163709Sjb#endif 102130355Sjulian 103127794Smarcelstruct mtx tid_lock; 104143802Sphkstatic struct unrhdr *tid_unrhdr; 105127794Smarcel 106127794Smarcel/* 107107719Sjulian * Prepare a thread for use. 10899026Sjulian */ 109132987Sgreenstatic int 110132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 11199026Sjulian{ 11299026Sjulian struct thread *td; 11399026Sjulian 11499026Sjulian td = (struct thread *)mem; 115103216Sjulian td->td_state = TDS_INACTIVE; 116135573Sjhb td->td_oncpu = NOCPU; 117130269Sjmallett 118143840Sphk td->td_tid = alloc_unr(tid_unrhdr); 119167352Smohans td->td_syscalls = 0; 120143840Sphk 121130269Sjmallett /* 122130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 123130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 124170296Sjeff * end of a context switch. 125130269Sjmallett */ 126118442Sjhb td->td_critnest = 1; 127155195Srwatson 128155195Srwatson#ifdef AUDIT 129155195Srwatson audit_thread_alloc(td); 130155195Srwatson#endif 131161678Sdavidxu umtx_thread_alloc(td); 132132987Sgreen return (0); 13399026Sjulian} 13499026Sjulian 13599026Sjulian/* 13699026Sjulian * Reclaim a thread after use. 13799026Sjulian */ 13899026Sjulianstatic void 13999026Sjulianthread_dtor(void *mem, int size, void *arg) 14099026Sjulian{ 141127794Smarcel struct thread *td; 14299026Sjulian 14399026Sjulian td = (struct thread *)mem; 14499026Sjulian 14599026Sjulian#ifdef INVARIANTS 14699026Sjulian /* Verify that this thread is in a safe state to free. */ 14799026Sjulian switch (td->td_state) { 148103216Sjulian case TDS_INHIBITED: 149103216Sjulian case TDS_RUNNING: 150103216Sjulian case TDS_CAN_RUN: 15199026Sjulian case TDS_RUNQ: 15299026Sjulian /* 15399026Sjulian * We must never unlink a thread that is in one of 15499026Sjulian * these states, because it is currently active. 15599026Sjulian */ 15699026Sjulian panic("bad state for thread unlinking"); 15799026Sjulian /* NOTREACHED */ 158103216Sjulian case TDS_INACTIVE: 15999026Sjulian break; 16099026Sjulian default: 16199026Sjulian panic("bad thread state"); 16299026Sjulian /* NOTREACHED */ 16399026Sjulian } 16499026Sjulian#endif 165155353Srwatson#ifdef AUDIT 166155353Srwatson audit_thread_free(td); 167155353Srwatson#endif 168143840Sphk free_unr(tid_unrhdr, td->td_tid); 169134791Sjulian sched_newthread(td); 17099026Sjulian} 17199026Sjulian 17299026Sjulian/* 17399026Sjulian * Initialize type-stable parts of a thread (when newly created). 17499026Sjulian */ 175132987Sgreenstatic int 176132987Sgreenthread_init(void *mem, int size, int flags) 17799026Sjulian{ 178131149Smarcel struct thread *td; 17999026Sjulian 18099026Sjulian td = (struct thread *)mem; 181131149Smarcel 182116355Salc vm_thread_new(td, 0); 18399026Sjulian cpu_thread_setup(td); 184126326Sjhb td->td_sleepqueue = sleepq_alloc(); 185122514Sjhb td->td_turnstile = turnstile_alloc(); 186107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 187134791Sjulian sched_newthread(td); 188161678Sdavidxu umtx_thread_init(td); 189132987Sgreen return (0); 19099026Sjulian} 19199026Sjulian 19299026Sjulian/* 19399026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 19499026Sjulian */ 19599026Sjulianstatic void 19699026Sjulianthread_fini(void *mem, int size) 19799026Sjulian{ 198131149Smarcel struct thread *td; 19999026Sjulian 20099026Sjulian td = (struct thread *)mem; 201122514Sjhb turnstile_free(td->td_turnstile); 202126326Sjhb sleepq_free(td->td_sleepqueue); 203161678Sdavidxu umtx_thread_fini(td); 204116355Salc vm_thread_dispose(td); 20599026Sjulian} 206111028Sjeff 207107126Sjeff/* 208111028Sjeff * For a newly created process, 209111028Sjeff * link up all the structures and its initial threads etc. 210134791Sjulian * called from: 211134791Sjulian * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 212134791Sjulian * proc_dtor() (should go away) 213134791Sjulian * proc_init() 214105854Sjulian */ 215105854Sjulianvoid 216163709Sjbproc_linkup(struct proc *p, struct thread *td) 217105854Sjulian{ 218170296Sjeff 219105854Sjulian TAILQ_INIT(&p->p_threads); /* all threads in proc */ 220164936Sjulian TAILQ_INIT(&p->p_upcalls); /* upcall list */ 221151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 222153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 223153253Sdavidxu if (p->p_ksi != NULL) { 224153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 225153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 226152185Sdavidxu } 227152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 228105854Sjulian p->p_numthreads = 0; 229163709Sjb thread_link(td, p); 230105854Sjulian} 231105854Sjulian 232111028Sjeff/* 23399026Sjulian * Initialize global thread allocation resources. 23499026Sjulian */ 23599026Sjulianvoid 23699026Sjulianthreadinit(void) 23799026Sjulian{ 23899026Sjulian 239143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 240143802Sphk tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock); 241143802Sphk 242107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 24399026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 244167944Sjhb 16 - 1, 0); 245163709Sjb#ifdef KSE 246134791Sjulian kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 247163709Sjb#endif 24899026Sjulian} 24999026Sjulian 25099026Sjulian/* 251103002Sjulian * Stash an embarasingly extra thread into the zombie thread queue. 252164936Sjulian * Use the slpq as that must be unused by now. 25399026Sjulian */ 25499026Sjulianvoid 25599026Sjulianthread_stash(struct thread *td) 25699026Sjulian{ 257170296Sjeff mtx_lock_spin(&zombie_lock); 258164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 259170296Sjeff mtx_unlock_spin(&zombie_lock); 26099026Sjulian} 26199026Sjulian 262103410Smini/* 263111028Sjeff * Reap zombie kse resource. 26499026Sjulian */ 26599026Sjulianvoid 26699026Sjulianthread_reap(void) 26799026Sjulian{ 268105854Sjulian struct thread *td_first, *td_next; 26999026Sjulian 27099026Sjulian /* 271111028Sjeff * Don't even bother to lock if none at this instant, 272111028Sjeff * we really don't care about the next instant.. 27399026Sjulian */ 274163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 275170296Sjeff mtx_lock_spin(&zombie_lock); 276105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 277105854Sjulian if (td_first) 278105854Sjulian TAILQ_INIT(&zombie_threads); 279170296Sjeff mtx_unlock_spin(&zombie_lock); 280105854Sjulian while (td_first) { 281164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 282111028Sjeff if (td_first->td_ucred) 283111028Sjeff crfree(td_first->td_ucred); 284105854Sjulian thread_free(td_first); 285105854Sjulian td_first = td_next; 28699026Sjulian } 28799026Sjulian } 28899026Sjulian} 28999026Sjulian 29099026Sjulian/* 29199026Sjulian * Allocate a thread. 29299026Sjulian */ 29399026Sjulianstruct thread * 29499026Sjulianthread_alloc(void) 29599026Sjulian{ 296163709Sjb 29799026Sjulian thread_reap(); /* check if any zombies to get */ 298111119Simp return (uma_zalloc(thread_zone, M_WAITOK)); 29999026Sjulian} 30099026Sjulian 301103367Sjulian 302103367Sjulian/* 30399026Sjulian * Deallocate a thread. 30499026Sjulian */ 30599026Sjulianvoid 30699026Sjulianthread_free(struct thread *td) 30799026Sjulian{ 308107719Sjulian 309107719Sjulian cpu_thread_clean(td); 31099026Sjulian uma_zfree(thread_zone, td); 31199026Sjulian} 31299026Sjulian 31399026Sjulian/* 31499026Sjulian * Discard the current thread and exit from its context. 315130355Sjulian * Always called with scheduler locked. 31699026Sjulian * 31799026Sjulian * Because we can't free a thread while we're operating under its context, 318107719Sjulian * push the current thread into our CPU's deadthread holder. This means 319107719Sjulian * we needn't worry about someone else grabbing our context before we 320130355Sjulian * do a cpu_throw(). This may not be needed now as we are under schedlock. 321130355Sjulian * Maybe we can just do a thread_stash() as thr_exit1 does. 32299026Sjulian */ 323130355Sjulian/* XXX 324130355Sjulian * libthr expects its thread exit to return for the last 325130355Sjulian * thread, meaning that the program is back to non-threaded 326130355Sjulian * mode I guess. Because we do this (cpu_throw) unconditionally 327130355Sjulian * here, they have their own version of it. (thr_exit1()) 328130355Sjulian * that doesn't do it all if this was the last thread. 329130355Sjulian * It is also called from thread_suspend_check(). 330130355Sjulian * Of course in the end, they end up coming here through exit1 331130355Sjulian * anyhow.. After fixing 'thr' to play by the rules we should be able 332130355Sjulian * to merge these two functions together. 333134791Sjulian * 334134791Sjulian * called from: 335134791Sjulian * exit1() 336134791Sjulian * kse_exit() 337134791Sjulian * thr_exit() 338163709Sjb * ifdef KSE 339134791Sjulian * thread_user_enter() 340134791Sjulian * thread_userret() 341163709Sjb * endif 342134791Sjulian * thread_suspend_check() 343130355Sjulian */ 34499026Sjulianvoid 34599026Sjulianthread_exit(void) 34699026Sjulian{ 347156705Sdavidxu uint64_t new_switchtime; 34899026Sjulian struct thread *td; 349170174Sjeff struct thread *td2; 35099026Sjulian struct proc *p; 35199026Sjulian 35299026Sjulian td = curthread; 35399026Sjulian p = td->td_proc; 35499026Sjulian 355170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 356134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 357170296Sjeff 358134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 359102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 360133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 361133234Srwatson (long)p->p_pid, p->p_comm); 362151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 36399026Sjulian 364155376Srwatson#ifdef AUDIT 365155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 366155376Srwatson#endif 367155376Srwatson 368163709Sjb#ifdef KSE 369104695Sjulian if (td->td_standin != NULL) { 370134791Sjulian /* 371134791Sjulian * Note that we don't need to free the cred here as it 372134791Sjulian * is done in thread_reap(). 373134791Sjulian */ 374104695Sjulian thread_stash(td->td_standin); 375104695Sjulian td->td_standin = NULL; 376104695Sjulian } 377163709Sjb#endif 378104695Sjulian 379161678Sdavidxu umtx_thread_exit(td); 380161678Sdavidxu 381134791Sjulian /* 382134791Sjulian * drop FPU & debug register state storage, or any other 383134791Sjulian * architecture specific resources that 384134791Sjulian * would not be on a new untouched process. 385134791Sjulian */ 38699026Sjulian cpu_thread_exit(td); /* XXXSMP */ 38799026Sjulian 388156705Sdavidxu /* Do the same timestamp bookkeeping that mi_switch() would do. */ 389156705Sdavidxu new_switchtime = cpu_ticks(); 390156705Sdavidxu p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 391156705Sdavidxu PCPU_SET(switchtime, new_switchtime); 392156705Sdavidxu PCPU_SET(switchticks, ticks); 393170292Sattilio PCPU_INC(cnt.v_swtch); 394170296Sjeff /* Add the child usage to our own when the final thread exits. */ 395156705Sdavidxu if (p->p_numthreads == 1) 396156705Sdavidxu ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux); 397134791Sjulian /* 398103002Sjulian * The last thread is left attached to the process 399103002Sjulian * So that the whole bundle gets recycled. Skip 400134791Sjulian * all this stuff if we never had threads. 401134791Sjulian * EXIT clears all sign of other threads when 402134791Sjulian * it goes to single threading, so the last thread always 403134791Sjulian * takes the short path. 404102581Sjulian */ 405134791Sjulian if (p->p_flag & P_HADTHREADS) { 406134791Sjulian if (p->p_numthreads > 1) { 407170296Sjeff thread_lock(td); 408170296Sjeff#ifdef KSE 409170296Sjeff kse_unlink(td); 410170296Sjeff#else 411134791Sjulian thread_unlink(td); 412170296Sjeff#endif 413170296Sjeff thread_unlock(td); 414170174Sjeff /* Impart our resource usage on another thread */ 415170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 416170174Sjeff rucollect(&td2->td_ru, &td->td_ru); 417170174Sjeff sched_exit_thread(td2, td); 418134791Sjulian 419134791Sjulian /* 420134791Sjulian * The test below is NOT true if we are the 421134791Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 422134791Sjulian * in exit1() after it is the only survivor. 423134791Sjulian */ 424134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 425134791Sjulian if (p->p_numthreads == p->p_suspcount) { 426170296Sjeff thread_lock(p->p_singlethread); 427134791Sjulian thread_unsuspend_one(p->p_singlethread); 428170296Sjeff thread_unlock(p->p_singlethread); 429134791Sjulian } 430103002Sjulian } 431104695Sjulian 432163709Sjb#ifdef KSE 433134791Sjulian /* 434134791Sjulian * Because each upcall structure has an owner thread, 435134791Sjulian * owner thread exits only when process is in exiting 436134791Sjulian * state, so upcall to userland is no longer needed, 437134791Sjulian * deleting upcall structure is safe here. 438134791Sjulian * So when all threads in a group is exited, all upcalls 439134791Sjulian * in the group should be automatically freed. 440134791Sjulian * XXXKSE This is a KSE thing and should be exported 441134791Sjulian * there somehow. 442134791Sjulian */ 443111028Sjeff upcall_remove(td); 444164936Sjulian#endif 445134791Sjulian PCPU_SET(deadthread, td); 446134791Sjulian } else { 447134791Sjulian /* 448134791Sjulian * The last thread is exiting.. but not through exit() 449134791Sjulian * what should we do? 450134791Sjulian * Theoretically this can't happen 451134791Sjulian * exit1() - clears threading flags before coming here 452134791Sjulian * kse_exit() - treats last thread specially 453134791Sjulian * thr_exit() - treats last thread specially 454163709Sjb * ifdef KSE 455134791Sjulian * thread_user_enter() - only if more exist 456134791Sjulian * thread_userret() - only if more exist 457163709Sjb * endif 458134791Sjulian * thread_suspend_check() - only if more exist 459134791Sjulian */ 460134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 461119488Sdavidxu } 462170296Sjeff } 463170296Sjeff PROC_UNLOCK(p); 464170296Sjeff thread_lock(td); 465170296Sjeff /* Aggregate our tick statistics into our parents rux. */ 466170296Sjeff ruxagg(&p->p_rux, td); 467170296Sjeff PROC_SUNLOCK(p); 468133396Sjulian td->td_state = TDS_INACTIVE; 469133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 470170296Sjeff sched_throw(td); 471112993Speter panic("I'm a teapot!"); 47299026Sjulian /* NOTREACHED */ 47399026Sjulian} 47499026Sjulian 475124350Sschweikh/* 476107719Sjulian * Do any thread specific cleanups that may be needed in wait() 477126932Speter * called with Giant, proc and schedlock not held. 478107719Sjulian */ 479107719Sjulianvoid 480107719Sjulianthread_wait(struct proc *p) 481107719Sjulian{ 482107719Sjulian struct thread *td; 483107719Sjulian 484126932Speter mtx_assert(&Giant, MA_NOTOWNED); 485124350Sschweikh KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 486107719Sjulian FOREACH_THREAD_IN_PROC(p, td) { 487163709Sjb#ifdef KSE 488107719Sjulian if (td->td_standin != NULL) { 489143944Sjulian if (td->td_standin->td_ucred != NULL) { 490143944Sjulian crfree(td->td_standin->td_ucred); 491143944Sjulian td->td_standin->td_ucred = NULL; 492143944Sjulian } 493107719Sjulian thread_free(td->td_standin); 494107719Sjulian td->td_standin = NULL; 495107719Sjulian } 496163709Sjb#endif 497107719Sjulian cpu_thread_clean(td); 498134791Sjulian crfree(td->td_ucred); 499107719Sjulian } 500107719Sjulian thread_reap(); /* check for zombie threads etc. */ 501107719Sjulian} 502107719Sjulian 50399026Sjulian/* 50499026Sjulian * Link a thread to a process. 505103002Sjulian * set up anything that needs to be initialized for it to 506103002Sjulian * be used by the process. 50799026Sjulian * 50899026Sjulian * Note that we do not link to the proc's ucred here. 50999026Sjulian * The thread is linked as if running but no KSE assigned. 510134791Sjulian * Called from: 511134791Sjulian * proc_linkup() 512134791Sjulian * thread_schedule_upcall() 513134791Sjulian * thr_create() 51499026Sjulian */ 51599026Sjulianvoid 516163709Sjbthread_link(struct thread *td, struct proc *p) 51799026Sjulian{ 51899026Sjulian 519170296Sjeff /* 520170296Sjeff * XXX This can't be enabled because it's called for proc0 before 521170296Sjeff * it's spinlock has been created. 522170296Sjeff * PROC_SLOCK_ASSERT(p, MA_OWNED); 523170296Sjeff */ 524111028Sjeff td->td_state = TDS_INACTIVE; 525111028Sjeff td->td_proc = p; 526111028Sjeff td->td_flags = 0; 52799026Sjulian 528103002Sjulian LIST_INIT(&td->td_contested); 529151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 530119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 53199026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 53299026Sjulian p->p_numthreads++; 53399026Sjulian} 53499026Sjulian 535134791Sjulian/* 536136160Sjulian * Convert a process with one thread to an unthreaded process. 537134791Sjulian * Called from: 538136160Sjulian * thread_single(exit) (called from execve and exit) 539136160Sjulian * kse_exit() XXX may need cleaning up wrt KSE stuff 540136160Sjulian */ 541136160Sjulianvoid 542136160Sjulianthread_unthread(struct thread *td) 543136160Sjulian{ 544136160Sjulian struct proc *p = td->td_proc; 545136160Sjulian 546136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 547163709Sjb#ifdef KSE 548136160Sjulian upcall_remove(td); 549136160Sjulian p->p_flag &= ~(P_SA|P_HADTHREADS); 550136160Sjulian td->td_mailbox = NULL; 551136160Sjulian td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 552136160Sjulian if (td->td_standin != NULL) { 553136160Sjulian thread_stash(td->td_standin); 554136160Sjulian td->td_standin = NULL; 555136160Sjulian } 556164936Sjulian sched_set_concurrency(p, 1); 557163709Sjb#else 558163709Sjb p->p_flag &= ~P_HADTHREADS; 559163709Sjb#endif 560136160Sjulian} 561136160Sjulian 562136160Sjulian/* 563136160Sjulian * Called from: 564134791Sjulian * thread_exit() 565134791Sjulian */ 566113641Sjulianvoid 567113641Sjulianthread_unlink(struct thread *td) 568124350Sschweikh{ 569113641Sjulian struct proc *p = td->td_proc; 570113920Sjhb 571170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 572113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 573113641Sjulian p->p_numthreads--; 574113641Sjulian /* could clear a few other things here */ 575163709Sjb /* Must NOT clear links to proc! */ 576124350Sschweikh} 577113641Sjulian 578111028Sjeff/* 57999026Sjulian * Enforce single-threading. 58099026Sjulian * 58199026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 58299026Sjulian * exit the process or similar). Process is locked! 58399026Sjulian * Returns 0 when you are successfully the only thread running. 58499026Sjulian * A process has successfully single threaded in the suspend mode when 58599026Sjulian * There are no threads in user mode. Threads in the kernel must be 58699026Sjulian * allowed to continue until they get to the user boundary. They may even 58799026Sjulian * copy out their return values and data before suspending. They may however be 588160048Smaxim * accelerated in reaching the user boundary as we will wake up 58999026Sjulian * any sleeping threads that are interruptable. (PCATCH). 59099026Sjulian */ 59199026Sjulianint 592136177Sdavidxuthread_single(int mode) 59399026Sjulian{ 59499026Sjulian struct thread *td; 59599026Sjulian struct thread *td2; 59699026Sjulian struct proc *p; 597130674Sdavidxu int remaining; 59899026Sjulian 59999026Sjulian td = curthread; 60099026Sjulian p = td->td_proc; 601126932Speter mtx_assert(&Giant, MA_NOTOWNED); 60299026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 60399026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 60499026Sjulian 605134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 60699026Sjulian return (0); 60799026Sjulian 608100648Sjulian /* Is someone already single threading? */ 609136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 61099026Sjulian return (1); 61199026Sjulian 612136177Sdavidxu if (mode == SINGLE_EXIT) { 613136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 614136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 615136177Sdavidxu } else { 616136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 617136177Sdavidxu if (mode == SINGLE_BOUNDARY) 618136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 619136177Sdavidxu else 620136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 621136177Sdavidxu } 622102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 623170296Sjeff PROC_SLOCK(p); 62499026Sjulian p->p_singlethread = td; 625136177Sdavidxu if (mode == SINGLE_EXIT) 626130674Sdavidxu remaining = p->p_numthreads; 627136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 628136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 629136177Sdavidxu else 630130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 631130674Sdavidxu while (remaining != 1) { 632156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 633156942Sdavidxu goto stopme; 63499026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 63599026Sjulian if (td2 == td) 63699026Sjulian continue; 637170296Sjeff thread_lock(td2); 638113705Sdavidxu td2->td_flags |= TDF_ASTPENDING; 639103216Sjulian if (TD_IS_INHIBITED(td2)) { 640136177Sdavidxu switch (mode) { 641136177Sdavidxu case SINGLE_EXIT: 642132087Sdavidxu if (td->td_flags & TDF_DBSUSPEND) 643132087Sdavidxu td->td_flags &= ~TDF_DBSUSPEND; 644136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 645103216Sjulian thread_unsuspend_one(td2); 646105911Sjulian if (TD_ON_SLEEPQ(td2) && 647136177Sdavidxu (td2->td_flags & TDF_SINTR)) 648155741Sdavidxu sleepq_abort(td2, EINTR); 649136177Sdavidxu break; 650136177Sdavidxu case SINGLE_BOUNDARY: 651136177Sdavidxu if (TD_IS_SUSPENDED(td2) && 652136177Sdavidxu !(td2->td_flags & TDF_BOUNDARY)) 653136177Sdavidxu thread_unsuspend_one(td2); 654136177Sdavidxu if (TD_ON_SLEEPQ(td2) && 655136177Sdavidxu (td2->td_flags & TDF_SINTR)) 656155741Sdavidxu sleepq_abort(td2, ERESTART); 657136177Sdavidxu break; 658136177Sdavidxu default: 659170296Sjeff if (TD_IS_SUSPENDED(td2)) { 660170296Sjeff thread_unlock(td2); 661105874Sdavidxu continue; 662170296Sjeff } 663111028Sjeff /* 664165693Srwatson * maybe other inhibited states too? 665111028Sjeff */ 666137281Sdavidxu if ((td2->td_flags & TDF_SINTR) && 667137281Sdavidxu (td2->td_inhibitors & 668137281Sdavidxu (TDI_SLEEPING | TDI_SWAPPED))) 669105911Sjulian thread_suspend_one(td2); 670136177Sdavidxu break; 67199026Sjulian } 67299026Sjulian } 673155594Sdavidxu#ifdef SMP 674155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 675155594Sdavidxu forward_signal(td2); 676155594Sdavidxu } 677155594Sdavidxu#endif 678170296Sjeff thread_unlock(td2); 67999026Sjulian } 680136177Sdavidxu if (mode == SINGLE_EXIT) 681130674Sdavidxu remaining = p->p_numthreads; 682136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 683136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 684130674Sdavidxu else 685130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 686130674Sdavidxu 687124350Sschweikh /* 688124350Sschweikh * Maybe we suspended some threads.. was it enough? 689105911Sjulian */ 690130674Sdavidxu if (remaining == 1) 691105911Sjulian break; 692105911Sjulian 693156942Sdavidxustopme: 69499026Sjulian /* 69599026Sjulian * Wake us up when everyone else has suspended. 696100648Sjulian * In the mean time we suspend as well. 69799026Sjulian */ 698170296Sjeff thread_suspend_switch(td); 699136177Sdavidxu if (mode == SINGLE_EXIT) 700130674Sdavidxu remaining = p->p_numthreads; 701136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 702136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 703130674Sdavidxu else 704130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 70599026Sjulian } 706136177Sdavidxu if (mode == SINGLE_EXIT) { 707135269Sjulian /* 708135269Sjulian * We have gotten rid of all the other threads and we 709135269Sjulian * are about to either exit or exec. In either case, 710135269Sjulian * we try our utmost to revert to being a non-threaded 711135269Sjulian * process. 712135269Sjulian */ 713136160Sjulian p->p_singlethread = NULL; 714137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 715136160Sjulian thread_unthread(td); 716111028Sjeff } 717170296Sjeff PROC_SUNLOCK(p); 71899026Sjulian return (0); 71999026Sjulian} 72099026Sjulian 72199026Sjulian/* 72299026Sjulian * Called in from locations that can safely check to see 72399026Sjulian * whether we have to suspend or at least throttle for a 72499026Sjulian * single-thread event (e.g. fork). 72599026Sjulian * 72699026Sjulian * Such locations include userret(). 72799026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 72899026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 72999026Sjulian * 73099026Sjulian * The 'return_instead' argument tells the function if it may do a 73199026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 73299026Sjulian * out instead. 73399026Sjulian * 73499026Sjulian * If the thread that set the single_threading request has set the 73599026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 73699026Sjulian * if 'return_instead' is false, but will exit. 73799026Sjulian * 73899026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 73999026Sjulian *---------------+--------------------+--------------------- 74099026Sjulian * 0 | returns 0 | returns 0 or 1 74199026Sjulian * | when ST ends | immediatly 74299026Sjulian *---------------+--------------------+--------------------- 74399026Sjulian * 1 | thread exits | returns 1 74499026Sjulian * | | immediatly 74599026Sjulian * 0 = thread_exit() or suspension ok, 74699026Sjulian * other = return error instead of stopping the thread. 74799026Sjulian * 74899026Sjulian * While a full suspension is under effect, even a single threading 74999026Sjulian * thread would be suspended if it made this call (but it shouldn't). 75099026Sjulian * This call should only be made from places where 751124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 75299026Sjulian * return_instead is set. 75399026Sjulian */ 75499026Sjulianint 75599026Sjulianthread_suspend_check(int return_instead) 75699026Sjulian{ 757104502Sjmallett struct thread *td; 758104502Sjmallett struct proc *p; 75999026Sjulian 76099026Sjulian td = curthread; 76199026Sjulian p = td->td_proc; 762126932Speter mtx_assert(&Giant, MA_NOTOWNED); 76399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 764132087Sdavidxu while (P_SHOULDSTOP(p) || 765132087Sdavidxu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 766102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 76799026Sjulian KASSERT(p->p_singlethread != NULL, 76899026Sjulian ("singlethread not set")); 76999026Sjulian /* 770100648Sjulian * The only suspension in action is a 771100648Sjulian * single-threading. Single threader need not stop. 772124350Sschweikh * XXX Should be safe to access unlocked 773100646Sjulian * as it can only be set to be true by us. 77499026Sjulian */ 775100648Sjulian if (p->p_singlethread == td) 77699026Sjulian return (0); /* Exempt from stopping. */ 777124350Sschweikh } 778134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 779155741Sdavidxu return (EINTR); 78099026Sjulian 781136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 782136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 783136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 784155741Sdavidxu return (ERESTART); 785136177Sdavidxu 786151316Sdavidxu /* If thread will exit, flush its pending signals */ 787151316Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 788151316Sdavidxu sigqueue_flush(&td->td_sigqueue); 789151316Sdavidxu 790170296Sjeff PROC_SLOCK(p); 791112071Sdavidxu thread_stopped(p); 79299026Sjulian /* 79399026Sjulian * If the process is waiting for us to exit, 79499026Sjulian * this thread should just suicide. 795102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 79699026Sjulian */ 797136177Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 798134791Sjulian thread_exit(); 799170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 800170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 801170296Sjeff thread_lock(p->p_singlethread); 802170296Sjeff thread_unsuspend_one(p->p_singlethread); 803170296Sjeff thread_unlock(p->p_singlethread); 804170296Sjeff } 805170296Sjeff } 806170296Sjeff PROC_UNLOCK(p); 807170296Sjeff thread_lock(td); 80899026Sjulian /* 80999026Sjulian * When a thread suspends, it just 810164936Sjulian * gets taken off all queues. 81199026Sjulian */ 812103216Sjulian thread_suspend_one(td); 813136177Sdavidxu if (return_instead == 0) { 814136177Sdavidxu p->p_boundary_count++; 815136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 816136177Sdavidxu } 817170296Sjeff PROC_SUNLOCK(p); 818131473Sjhb mi_switch(SW_INVOL, NULL); 819170296Sjeff if (return_instead == 0) 820136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 821170296Sjeff thread_unlock(td); 82299026Sjulian PROC_LOCK(p); 823170296Sjeff if (return_instead == 0) 824170296Sjeff p->p_boundary_count--; 82599026Sjulian } 82699026Sjulian return (0); 82799026Sjulian} 82899026Sjulian 829102898Sdavidxuvoid 830170296Sjeffthread_suspend_switch(struct thread *td) 831170296Sjeff{ 832170296Sjeff struct proc *p; 833170296Sjeff 834170296Sjeff p = td->td_proc; 835170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 836170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 837170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 838170296Sjeff /* 839170296Sjeff * We implement thread_suspend_one in stages here to avoid 840170296Sjeff * dropping the proc lock while the thread lock is owned. 841170296Sjeff */ 842170296Sjeff thread_stopped(p); 843170296Sjeff p->p_suspcount++; 844170296Sjeff PROC_UNLOCK(p); 845170296Sjeff thread_lock(td); 846170296Sjeff TD_SET_SUSPENDED(td); 847170296Sjeff PROC_SUNLOCK(p); 848170296Sjeff DROP_GIANT(); 849170296Sjeff mi_switch(SW_VOL, NULL); 850170296Sjeff thread_unlock(td); 851170296Sjeff PICKUP_GIANT(); 852170296Sjeff PROC_LOCK(p); 853170296Sjeff PROC_SLOCK(p); 854170296Sjeff} 855170296Sjeff 856170296Sjeffvoid 857102898Sdavidxuthread_suspend_one(struct thread *td) 858102898Sdavidxu{ 859102898Sdavidxu struct proc *p = td->td_proc; 860102898Sdavidxu 861170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 862170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 863112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 864102898Sdavidxu p->p_suspcount++; 865103216Sjulian TD_SET_SUSPENDED(td); 866102898Sdavidxu} 867102898Sdavidxu 868102898Sdavidxuvoid 869102898Sdavidxuthread_unsuspend_one(struct thread *td) 870102898Sdavidxu{ 871102898Sdavidxu struct proc *p = td->td_proc; 872102898Sdavidxu 873170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 874170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 875164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 876103216Sjulian TD_CLR_SUSPENDED(td); 877102898Sdavidxu p->p_suspcount--; 878103216Sjulian setrunnable(td); 879102898Sdavidxu} 880102898Sdavidxu 88199026Sjulian/* 88299026Sjulian * Allow all threads blocked by single threading to continue running. 88399026Sjulian */ 88499026Sjulianvoid 88599026Sjulianthread_unsuspend(struct proc *p) 88699026Sjulian{ 88799026Sjulian struct thread *td; 88899026Sjulian 88999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 890170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 89199026Sjulian if (!P_SHOULDSTOP(p)) { 892164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 893170296Sjeff thread_lock(td); 894164936Sjulian if (TD_IS_SUSPENDED(td)) { 895164936Sjulian thread_unsuspend_one(td); 896164936Sjulian } 897170296Sjeff thread_unlock(td); 89899026Sjulian } 899102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 90099026Sjulian (p->p_numthreads == p->p_suspcount)) { 90199026Sjulian /* 90299026Sjulian * Stopping everything also did the job for the single 90399026Sjulian * threading request. Now we've downgraded to single-threaded, 90499026Sjulian * let it continue. 90599026Sjulian */ 906170296Sjeff thread_lock(p->p_singlethread); 907102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 908170296Sjeff thread_unlock(p->p_singlethread); 90999026Sjulian } 91099026Sjulian} 91199026Sjulian 912134791Sjulian/* 913134791Sjulian * End the single threading mode.. 914134791Sjulian */ 91599026Sjulianvoid 91699026Sjulianthread_single_end(void) 91799026Sjulian{ 91899026Sjulian struct thread *td; 91999026Sjulian struct proc *p; 92099026Sjulian 92199026Sjulian td = curthread; 92299026Sjulian p = td->td_proc; 92399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 924136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 925170296Sjeff PROC_SLOCK(p); 92699026Sjulian p->p_singlethread = NULL; 927102292Sjulian /* 928102292Sjulian * If there are other threads they mey now run, 929102292Sjulian * unless of course there is a blanket 'stop order' 930102292Sjulian * on the process. The single threader must be allowed 931102292Sjulian * to continue however as this is a bad place to stop. 932102292Sjulian */ 933102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 934164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 935170296Sjeff thread_lock(td); 936164936Sjulian if (TD_IS_SUSPENDED(td)) { 937164936Sjulian thread_unsuspend_one(td); 938164936Sjulian } 939170296Sjeff thread_unlock(td); 940102292Sjulian } 941102292Sjulian } 942170296Sjeff PROC_SUNLOCK(p); 94399026Sjulian} 944128721Sdeischen 945151990Sdavidxustruct thread * 946151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 947151990Sdavidxu{ 948151990Sdavidxu struct thread *td; 949151990Sdavidxu 950151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 951170296Sjeff PROC_SLOCK(p); 952151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 953151990Sdavidxu if (td->td_tid == tid) 954151990Sdavidxu break; 955151990Sdavidxu } 956170296Sjeff PROC_SUNLOCK(p); 957151990Sdavidxu return (td); 958151990Sdavidxu} 959