kern_thread.c revision 258622
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29181695Sattilio#include "opt_witness.h" 30198464Sjkoshy#include "opt_hwpmc_hooks.h" 31181695Sattilio 32116182Sobrien#include <sys/cdefs.h> 33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 258622 2013-11-26 08:46:27Z avg $"); 34116182Sobrien 3599026Sjulian#include <sys/param.h> 3699026Sjulian#include <sys/systm.h> 3799026Sjulian#include <sys/kernel.h> 3899026Sjulian#include <sys/lock.h> 3999026Sjulian#include <sys/mutex.h> 4099026Sjulian#include <sys/proc.h> 41236317Skib#include <sys/rangelock.h> 42156705Sdavidxu#include <sys/resourcevar.h> 43235459Srstone#include <sys/sdt.h> 44130355Sjulian#include <sys/smp.h> 45107126Sjeff#include <sys/sched.h> 46126326Sjhb#include <sys/sleepqueue.h> 47174647Sjeff#include <sys/selinfo.h> 48122514Sjhb#include <sys/turnstile.h> 4999026Sjulian#include <sys/ktr.h> 50213642Sdavidxu#include <sys/rwlock.h> 51143149Sdavidxu#include <sys/umtx.h> 52176730Sjeff#include <sys/cpuset.h> 53198464Sjkoshy#ifdef HWPMC_HOOKS 54198464Sjkoshy#include <sys/pmckern.h> 55198464Sjkoshy#endif 5699026Sjulian 57155195Srwatson#include <security/audit/audit.h> 58155195Srwatson 5999026Sjulian#include <vm/vm.h> 60116355Salc#include <vm/vm_extern.h> 6199026Sjulian#include <vm/uma.h> 62173631Srrs#include <sys/eventhandler.h> 6399026Sjulian 64235459SrstoneSDT_PROVIDER_DECLARE(proc); 65258622SavgSDT_PROBE_DEFINE(proc, , , lwp__exit); 66235459Srstone 67235459Srstone 6899026Sjulian/* 69163709Sjb * thread related storage. 70163709Sjb */ 7199026Sjulianstatic uma_zone_t thread_zone; 7299026Sjulian 73111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 74172256Sattiliostatic struct mtx zombie_lock; 75170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7699026Sjulian 77170598Sjeffstatic void thread_zombie(struct thread *); 78170598Sjeff 79216314Sdavidxu#define TID_BUFFER_SIZE 1024 80216314Sdavidxu 81127794Smarcelstruct mtx tid_lock; 82143802Sphkstatic struct unrhdr *tid_unrhdr; 83216314Sdavidxustatic lwpid_t tid_buffer[TID_BUFFER_SIZE]; 84216314Sdavidxustatic int tid_head, tid_tail; 85213642Sdavidxustatic MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 86213642Sdavidxu 87213642Sdavidxustruct tidhashhead *tidhashtbl; 88213642Sdavidxuu_long tidhash; 89213642Sdavidxustruct rwlock tidhash_lock; 90213642Sdavidxu 91216314Sdavidxustatic lwpid_t 92216314Sdavidxutid_alloc(void) 93216314Sdavidxu{ 94216314Sdavidxu lwpid_t tid; 95216314Sdavidxu 96216314Sdavidxu tid = alloc_unr(tid_unrhdr); 97216314Sdavidxu if (tid != -1) 98216314Sdavidxu return (tid); 99216314Sdavidxu mtx_lock(&tid_lock); 100216314Sdavidxu if (tid_head == tid_tail) { 101216314Sdavidxu mtx_unlock(&tid_lock); 102216314Sdavidxu return (-1); 103216314Sdavidxu } 104240951Skib tid = tid_buffer[tid_head]; 105240951Skib tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 106216314Sdavidxu mtx_unlock(&tid_lock); 107216314Sdavidxu return (tid); 108216314Sdavidxu} 109216314Sdavidxu 110216314Sdavidxustatic void 111216314Sdavidxutid_free(lwpid_t tid) 112216314Sdavidxu{ 113216314Sdavidxu lwpid_t tmp_tid = -1; 114216314Sdavidxu 115216314Sdavidxu mtx_lock(&tid_lock); 116216314Sdavidxu if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 117240951Skib tmp_tid = tid_buffer[tid_head]; 118240951Skib tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 119216314Sdavidxu } 120240951Skib tid_buffer[tid_tail] = tid; 121240951Skib tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 122216314Sdavidxu mtx_unlock(&tid_lock); 123216314Sdavidxu if (tmp_tid != -1) 124216314Sdavidxu free_unr(tid_unrhdr, tmp_tid); 125216314Sdavidxu} 126216314Sdavidxu 127127794Smarcel/* 128107719Sjulian * Prepare a thread for use. 12999026Sjulian */ 130132987Sgreenstatic int 131132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 13299026Sjulian{ 13399026Sjulian struct thread *td; 13499026Sjulian 13599026Sjulian td = (struct thread *)mem; 136103216Sjulian td->td_state = TDS_INACTIVE; 137135573Sjhb td->td_oncpu = NOCPU; 138130269Sjmallett 139216314Sdavidxu td->td_tid = tid_alloc(); 140143840Sphk 141130269Sjmallett /* 142130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 143130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 144170296Sjeff * end of a context switch. 145130269Sjmallett */ 146118442Sjhb td->td_critnest = 1; 147216313Sdavidxu td->td_lend_user_pri = PRI_MAX; 148173631Srrs EVENTHANDLER_INVOKE(thread_ctor, td); 149155195Srwatson#ifdef AUDIT 150155195Srwatson audit_thread_alloc(td); 151155195Srwatson#endif 152161678Sdavidxu umtx_thread_alloc(td); 153132987Sgreen return (0); 15499026Sjulian} 15599026Sjulian 15699026Sjulian/* 15799026Sjulian * Reclaim a thread after use. 15899026Sjulian */ 15999026Sjulianstatic void 16099026Sjulianthread_dtor(void *mem, int size, void *arg) 16199026Sjulian{ 162127794Smarcel struct thread *td; 16399026Sjulian 16499026Sjulian td = (struct thread *)mem; 16599026Sjulian 16699026Sjulian#ifdef INVARIANTS 16799026Sjulian /* Verify that this thread is in a safe state to free. */ 16899026Sjulian switch (td->td_state) { 169103216Sjulian case TDS_INHIBITED: 170103216Sjulian case TDS_RUNNING: 171103216Sjulian case TDS_CAN_RUN: 17299026Sjulian case TDS_RUNQ: 17399026Sjulian /* 17499026Sjulian * We must never unlink a thread that is in one of 17599026Sjulian * these states, because it is currently active. 17699026Sjulian */ 17799026Sjulian panic("bad state for thread unlinking"); 17899026Sjulian /* NOTREACHED */ 179103216Sjulian case TDS_INACTIVE: 18099026Sjulian break; 18199026Sjulian default: 18299026Sjulian panic("bad thread state"); 18399026Sjulian /* NOTREACHED */ 18499026Sjulian } 18599026Sjulian#endif 186155353Srwatson#ifdef AUDIT 187155353Srwatson audit_thread_free(td); 188155353Srwatson#endif 189185029Spjd /* Free all OSD associated to this thread. */ 190185029Spjd osd_thread_exit(td); 191185029Spjd 192173631Srrs EVENTHANDLER_INVOKE(thread_dtor, td); 193216314Sdavidxu tid_free(td->td_tid); 19499026Sjulian} 19599026Sjulian 19699026Sjulian/* 19799026Sjulian * Initialize type-stable parts of a thread (when newly created). 19899026Sjulian */ 199132987Sgreenstatic int 200132987Sgreenthread_init(void *mem, int size, int flags) 20199026Sjulian{ 202131149Smarcel struct thread *td; 20399026Sjulian 20499026Sjulian td = (struct thread *)mem; 205131149Smarcel 206126326Sjhb td->td_sleepqueue = sleepq_alloc(); 207122514Sjhb td->td_turnstile = turnstile_alloc(); 208236317Skib td->td_rlqe = NULL; 209173631Srrs EVENTHANDLER_INVOKE(thread_init, td); 210107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 211161678Sdavidxu umtx_thread_init(td); 212173361Skib td->td_kstack = 0; 213132987Sgreen return (0); 21499026Sjulian} 21599026Sjulian 21699026Sjulian/* 21799026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 21899026Sjulian */ 21999026Sjulianstatic void 22099026Sjulianthread_fini(void *mem, int size) 22199026Sjulian{ 222131149Smarcel struct thread *td; 22399026Sjulian 22499026Sjulian td = (struct thread *)mem; 225173631Srrs EVENTHANDLER_INVOKE(thread_fini, td); 226236317Skib rlqentry_free(td->td_rlqe); 227122514Sjhb turnstile_free(td->td_turnstile); 228126326Sjhb sleepq_free(td->td_sleepqueue); 229161678Sdavidxu umtx_thread_fini(td); 230174647Sjeff seltdfini(td); 23199026Sjulian} 232111028Sjeff 233107126Sjeff/* 234111028Sjeff * For a newly created process, 235111028Sjeff * link up all the structures and its initial threads etc. 236134791Sjulian * called from: 237134791Sjulian * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 238134791Sjulian * proc_dtor() (should go away) 239134791Sjulian * proc_init() 240105854Sjulian */ 241105854Sjulianvoid 242173361Skibproc_linkup0(struct proc *p, struct thread *td) 243173361Skib{ 244173361Skib TAILQ_INIT(&p->p_threads); /* all threads in proc */ 245173361Skib proc_linkup(p, td); 246173361Skib} 247173361Skib 248173361Skibvoid 249163709Sjbproc_linkup(struct proc *p, struct thread *td) 250105854Sjulian{ 251170296Sjeff 252151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 253153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 254153253Sdavidxu if (p->p_ksi != NULL) { 255153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 256153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 257152185Sdavidxu } 258152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 259105854Sjulian p->p_numthreads = 0; 260163709Sjb thread_link(td, p); 261105854Sjulian} 262105854Sjulian 263111028Sjeff/* 26499026Sjulian * Initialize global thread allocation resources. 26599026Sjulian */ 26699026Sjulianvoid 26799026Sjulianthreadinit(void) 26899026Sjulian{ 26999026Sjulian 270143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 271239301Skib 272239301Skib /* 273239328Skib * pid_max cannot be greater than PID_MAX. 274239301Skib * leave one number for thread0. 275239301Skib */ 276174848Sjulian tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 277143802Sphk 278107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 27999026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 280167944Sjhb 16 - 1, 0); 281213642Sdavidxu tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 282213642Sdavidxu rw_init(&tidhash_lock, "tidhash"); 28399026Sjulian} 28499026Sjulian 28599026Sjulian/* 286170598Sjeff * Place an unused thread on the zombie list. 287164936Sjulian * Use the slpq as that must be unused by now. 28899026Sjulian */ 28999026Sjulianvoid 290170598Sjeffthread_zombie(struct thread *td) 29199026Sjulian{ 292170296Sjeff mtx_lock_spin(&zombie_lock); 293164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 294170296Sjeff mtx_unlock_spin(&zombie_lock); 29599026Sjulian} 29699026Sjulian 297103410Smini/* 298170598Sjeff * Release a thread that has exited after cpu_throw(). 299170598Sjeff */ 300170598Sjeffvoid 301170598Sjeffthread_stash(struct thread *td) 302170598Sjeff{ 303170598Sjeff atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 304170598Sjeff thread_zombie(td); 305170598Sjeff} 306170598Sjeff 307170598Sjeff/* 308177091Sjeff * Reap zombie resources. 30999026Sjulian */ 31099026Sjulianvoid 31199026Sjulianthread_reap(void) 31299026Sjulian{ 313105854Sjulian struct thread *td_first, *td_next; 31499026Sjulian 31599026Sjulian /* 316111028Sjeff * Don't even bother to lock if none at this instant, 317111028Sjeff * we really don't care about the next instant.. 31899026Sjulian */ 319163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 320170296Sjeff mtx_lock_spin(&zombie_lock); 321105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 322105854Sjulian if (td_first) 323105854Sjulian TAILQ_INIT(&zombie_threads); 324170296Sjeff mtx_unlock_spin(&zombie_lock); 325105854Sjulian while (td_first) { 326164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 327111028Sjeff if (td_first->td_ucred) 328111028Sjeff crfree(td_first->td_ucred); 329105854Sjulian thread_free(td_first); 330105854Sjulian td_first = td_next; 33199026Sjulian } 33299026Sjulian } 33399026Sjulian} 33499026Sjulian 33599026Sjulian/* 33699026Sjulian * Allocate a thread. 33799026Sjulian */ 33899026Sjulianstruct thread * 339196730Skibthread_alloc(int pages) 34099026Sjulian{ 341173361Skib struct thread *td; 342163709Sjb 34399026Sjulian thread_reap(); /* check if any zombies to get */ 344173361Skib 345173361Skib td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 346173361Skib KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 347196730Skib if (!vm_thread_new(td, pages)) { 348173361Skib uma_zfree(thread_zone, td); 349173361Skib return (NULL); 350173361Skib } 351173615Smarcel cpu_thread_alloc(td); 352173361Skib return (td); 35399026Sjulian} 35499026Sjulian 355196730Skibint 356196730Skibthread_alloc_stack(struct thread *td, int pages) 357196730Skib{ 358103367Sjulian 359196730Skib KASSERT(td->td_kstack == 0, 360196730Skib ("thread_alloc_stack called on a thread with kstack")); 361196730Skib if (!vm_thread_new(td, pages)) 362196730Skib return (0); 363196730Skib cpu_thread_alloc(td); 364196730Skib return (1); 365196730Skib} 366196730Skib 367103367Sjulian/* 36899026Sjulian * Deallocate a thread. 36999026Sjulian */ 37099026Sjulianvoid 37199026Sjulianthread_free(struct thread *td) 37299026Sjulian{ 373189845Sjeff 374189845Sjeff lock_profile_thread_exit(td); 375177369Sjeff if (td->td_cpuset) 376177369Sjeff cpuset_rel(td->td_cpuset); 377176730Sjeff td->td_cpuset = NULL; 378173615Smarcel cpu_thread_free(td); 379173361Skib if (td->td_kstack != 0) 380173361Skib vm_thread_dispose(td); 38199026Sjulian uma_zfree(thread_zone, td); 38299026Sjulian} 38399026Sjulian 38499026Sjulian/* 38599026Sjulian * Discard the current thread and exit from its context. 386130355Sjulian * Always called with scheduler locked. 38799026Sjulian * 38899026Sjulian * Because we can't free a thread while we're operating under its context, 389107719Sjulian * push the current thread into our CPU's deadthread holder. This means 390107719Sjulian * we needn't worry about someone else grabbing our context before we 391177091Sjeff * do a cpu_throw(). 39299026Sjulian */ 39399026Sjulianvoid 39499026Sjulianthread_exit(void) 39599026Sjulian{ 396229429Sjhb uint64_t runtime, new_switchtime; 39799026Sjulian struct thread *td; 398170174Sjeff struct thread *td2; 39999026Sjulian struct proc *p; 400182011Sjhb int wakeup_swapper; 40199026Sjulian 40299026Sjulian td = curthread; 40399026Sjulian p = td->td_proc; 40499026Sjulian 405170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 406134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 407170296Sjeff 408134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 409102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 410133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 411173601Sjulian (long)p->p_pid, td->td_name); 412151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 41399026Sjulian 414155376Srwatson#ifdef AUDIT 415155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 416155376Srwatson#endif 417161678Sdavidxu umtx_thread_exit(td); 418134791Sjulian /* 419134791Sjulian * drop FPU & debug register state storage, or any other 420134791Sjulian * architecture specific resources that 421134791Sjulian * would not be on a new untouched process. 422134791Sjulian */ 42399026Sjulian cpu_thread_exit(td); /* XXXSMP */ 42499026Sjulian 425134791Sjulian /* 426103002Sjulian * The last thread is left attached to the process 427103002Sjulian * So that the whole bundle gets recycled. Skip 428134791Sjulian * all this stuff if we never had threads. 429134791Sjulian * EXIT clears all sign of other threads when 430134791Sjulian * it goes to single threading, so the last thread always 431134791Sjulian * takes the short path. 432102581Sjulian */ 433134791Sjulian if (p->p_flag & P_HADTHREADS) { 434134791Sjulian if (p->p_numthreads > 1) { 435134791Sjulian thread_unlink(td); 436170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 437170174Sjeff sched_exit_thread(td2, td); 438134791Sjulian 439134791Sjulian /* 440134791Sjulian * The test below is NOT true if we are the 441207606Skib * sole exiting thread. P_STOPPED_SINGLE is unset 442134791Sjulian * in exit1() after it is the only survivor. 443134791Sjulian */ 444134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 445134791Sjulian if (p->p_numthreads == p->p_suspcount) { 446170296Sjeff thread_lock(p->p_singlethread); 447182011Sjhb wakeup_swapper = thread_unsuspend_one( 448182011Sjhb p->p_singlethread); 449170296Sjeff thread_unlock(p->p_singlethread); 450182011Sjhb if (wakeup_swapper) 451182011Sjhb kick_proc0(); 452134791Sjulian } 453103002Sjulian } 454104695Sjulian 455170598Sjeff atomic_add_int(&td->td_proc->p_exitthreads, 1); 456134791Sjulian PCPU_SET(deadthread, td); 457134791Sjulian } else { 458134791Sjulian /* 459134791Sjulian * The last thread is exiting.. but not through exit() 460134791Sjulian */ 461134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 462119488Sdavidxu } 463170296Sjeff } 464198464Sjkoshy#ifdef HWPMC_HOOKS 465198464Sjkoshy /* 466198464Sjkoshy * If this thread is part of a process that is being tracked by hwpmc(4), 467198464Sjkoshy * inform the module of the thread's impending exit. 468198464Sjkoshy */ 469198464Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 470198464Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 471198464Sjkoshy#endif 472170296Sjeff PROC_UNLOCK(p); 473229429Sjhb 474229429Sjhb /* Do the same timestamp bookkeeping that mi_switch() would do. */ 475229429Sjhb new_switchtime = cpu_ticks(); 476229429Sjhb runtime = new_switchtime - PCPU_GET(switchtime); 477229429Sjhb td->td_runtime += runtime; 478229429Sjhb td->td_incruntime += runtime; 479229429Sjhb PCPU_SET(switchtime, new_switchtime); 480229429Sjhb PCPU_SET(switchticks, ticks); 481229429Sjhb PCPU_INC(cnt.v_swtch); 482229429Sjhb 483229429Sjhb /* Save our resource usage in our process. */ 484229429Sjhb td->td_ru.ru_nvcsw++; 485208488Skib ruxagg(p, td); 486229429Sjhb rucollect(&p->p_ru, &td->td_ru); 487229429Sjhb 488170296Sjeff thread_lock(td); 489170296Sjeff PROC_SUNLOCK(p); 490133396Sjulian td->td_state = TDS_INACTIVE; 491181695Sattilio#ifdef WITNESS 492181695Sattilio witness_thread_exit(td); 493181695Sattilio#endif 494133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 495170296Sjeff sched_throw(td); 496112993Speter panic("I'm a teapot!"); 49799026Sjulian /* NOTREACHED */ 49899026Sjulian} 49999026Sjulian 500124350Sschweikh/* 501107719Sjulian * Do any thread specific cleanups that may be needed in wait() 502126932Speter * called with Giant, proc and schedlock not held. 503107719Sjulian */ 504107719Sjulianvoid 505107719Sjulianthread_wait(struct proc *p) 506107719Sjulian{ 507107719Sjulian struct thread *td; 508107719Sjulian 509126932Speter mtx_assert(&Giant, MA_NOTOWNED); 510124350Sschweikh KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 511170598Sjeff td = FIRST_THREAD_IN_PROC(p); 512170598Sjeff /* Lock the last thread so we spin until it exits cpu_throw(). */ 513170598Sjeff thread_lock(td); 514170598Sjeff thread_unlock(td); 515170598Sjeff /* Wait for any remaining threads to exit cpu_throw(). */ 516170598Sjeff while (p->p_exitthreads) 517170598Sjeff sched_relinquish(curthread); 518189845Sjeff lock_profile_thread_exit(td); 519176730Sjeff cpuset_rel(td->td_cpuset); 520176730Sjeff td->td_cpuset = NULL; 521170598Sjeff cpu_thread_clean(td); 522170598Sjeff crfree(td->td_ucred); 523107719Sjulian thread_reap(); /* check for zombie threads etc. */ 524107719Sjulian} 525107719Sjulian 52699026Sjulian/* 52799026Sjulian * Link a thread to a process. 528103002Sjulian * set up anything that needs to be initialized for it to 529103002Sjulian * be used by the process. 53099026Sjulian */ 53199026Sjulianvoid 532163709Sjbthread_link(struct thread *td, struct proc *p) 53399026Sjulian{ 53499026Sjulian 535170296Sjeff /* 536170296Sjeff * XXX This can't be enabled because it's called for proc0 before 537177368Sjeff * its lock has been created. 538177368Sjeff * PROC_LOCK_ASSERT(p, MA_OWNED); 539170296Sjeff */ 540111028Sjeff td->td_state = TDS_INACTIVE; 541111028Sjeff td->td_proc = p; 542172207Sjeff td->td_flags = TDF_INMEM; 54399026Sjulian 544103002Sjulian LIST_INIT(&td->td_contested); 545174629Sjeff LIST_INIT(&td->td_lprof[0]); 546174629Sjeff LIST_INIT(&td->td_lprof[1]); 547151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 548119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 54999026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 55099026Sjulian p->p_numthreads++; 55199026Sjulian} 55299026Sjulian 553134791Sjulian/* 554136160Sjulian * Convert a process with one thread to an unthreaded process. 555136160Sjulian */ 556136160Sjulianvoid 557136160Sjulianthread_unthread(struct thread *td) 558136160Sjulian{ 559136160Sjulian struct proc *p = td->td_proc; 560136160Sjulian 561136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 562163709Sjb p->p_flag &= ~P_HADTHREADS; 563136160Sjulian} 564136160Sjulian 565136160Sjulian/* 566136160Sjulian * Called from: 567134791Sjulian * thread_exit() 568134791Sjulian */ 569113641Sjulianvoid 570113641Sjulianthread_unlink(struct thread *td) 571124350Sschweikh{ 572113641Sjulian struct proc *p = td->td_proc; 573113920Sjhb 574177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 575113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 576113641Sjulian p->p_numthreads--; 577113641Sjulian /* could clear a few other things here */ 578163709Sjb /* Must NOT clear links to proc! */ 579124350Sschweikh} 580113641Sjulian 581195701Skibstatic int 582195701Skibcalc_remaining(struct proc *p, int mode) 583195701Skib{ 584195701Skib int remaining; 585195701Skib 586227657Skib PROC_LOCK_ASSERT(p, MA_OWNED); 587227657Skib PROC_SLOCK_ASSERT(p, MA_OWNED); 588195701Skib if (mode == SINGLE_EXIT) 589195701Skib remaining = p->p_numthreads; 590195701Skib else if (mode == SINGLE_BOUNDARY) 591195701Skib remaining = p->p_numthreads - p->p_boundary_count; 592195701Skib else if (mode == SINGLE_NO_EXIT) 593195701Skib remaining = p->p_numthreads - p->p_suspcount; 594195701Skib else 595195701Skib panic("calc_remaining: wrong mode %d", mode); 596195701Skib return (remaining); 597195701Skib} 598195701Skib 599111028Sjeff/* 60099026Sjulian * Enforce single-threading. 60199026Sjulian * 60299026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 60399026Sjulian * exit the process or similar). Process is locked! 60499026Sjulian * Returns 0 when you are successfully the only thread running. 60599026Sjulian * A process has successfully single threaded in the suspend mode when 60699026Sjulian * There are no threads in user mode. Threads in the kernel must be 60799026Sjulian * allowed to continue until they get to the user boundary. They may even 60899026Sjulian * copy out their return values and data before suspending. They may however be 609160048Smaxim * accelerated in reaching the user boundary as we will wake up 61099026Sjulian * any sleeping threads that are interruptable. (PCATCH). 61199026Sjulian */ 61299026Sjulianint 613136177Sdavidxuthread_single(int mode) 61499026Sjulian{ 61599026Sjulian struct thread *td; 61699026Sjulian struct thread *td2; 61799026Sjulian struct proc *p; 618181334Sjhb int remaining, wakeup_swapper; 61999026Sjulian 62099026Sjulian td = curthread; 62199026Sjulian p = td->td_proc; 622126932Speter mtx_assert(&Giant, MA_NOTOWNED); 62399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 62499026Sjulian 625134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 62699026Sjulian return (0); 62799026Sjulian 628100648Sjulian /* Is someone already single threading? */ 629136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 63099026Sjulian return (1); 63199026Sjulian 632136177Sdavidxu if (mode == SINGLE_EXIT) { 633136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 634136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 635136177Sdavidxu } else { 636136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 637136177Sdavidxu if (mode == SINGLE_BOUNDARY) 638136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 639136177Sdavidxu else 640136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 641136177Sdavidxu } 642102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 643184667Sdavidxu PROC_SLOCK(p); 64499026Sjulian p->p_singlethread = td; 645195701Skib remaining = calc_remaining(p, mode); 646130674Sdavidxu while (remaining != 1) { 647156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 648156942Sdavidxu goto stopme; 649181334Sjhb wakeup_swapper = 0; 65099026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 65199026Sjulian if (td2 == td) 65299026Sjulian continue; 653170296Sjeff thread_lock(td2); 654177471Sjeff td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 655103216Sjulian if (TD_IS_INHIBITED(td2)) { 656136177Sdavidxu switch (mode) { 657136177Sdavidxu case SINGLE_EXIT: 658136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 659182011Sjhb wakeup_swapper |= 660182011Sjhb thread_unsuspend_one(td2); 661105911Sjulian if (TD_ON_SLEEPQ(td2) && 662136177Sdavidxu (td2->td_flags & TDF_SINTR)) 663182011Sjhb wakeup_swapper |= 664181334Sjhb sleepq_abort(td2, EINTR); 665136177Sdavidxu break; 666136177Sdavidxu case SINGLE_BOUNDARY: 667183929Sdavidxu if (TD_IS_SUSPENDED(td2) && 668183929Sdavidxu !(td2->td_flags & TDF_BOUNDARY)) 669183929Sdavidxu wakeup_swapper |= 670183929Sdavidxu thread_unsuspend_one(td2); 671183929Sdavidxu if (TD_ON_SLEEPQ(td2) && 672183929Sdavidxu (td2->td_flags & TDF_SINTR)) 673183929Sdavidxu wakeup_swapper |= 674183929Sdavidxu sleepq_abort(td2, ERESTART); 675136177Sdavidxu break; 676195702Skib case SINGLE_NO_EXIT: 677195702Skib if (TD_IS_SUSPENDED(td2) && 678195702Skib !(td2->td_flags & TDF_BOUNDARY)) 679195702Skib wakeup_swapper |= 680195702Skib thread_unsuspend_one(td2); 681195702Skib if (TD_ON_SLEEPQ(td2) && 682195702Skib (td2->td_flags & TDF_SINTR)) 683195702Skib wakeup_swapper |= 684195702Skib sleepq_abort(td2, ERESTART); 685195702Skib break; 686183929Sdavidxu default: 687136177Sdavidxu break; 68899026Sjulian } 68999026Sjulian } 690155594Sdavidxu#ifdef SMP 691155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 692155594Sdavidxu forward_signal(td2); 693155594Sdavidxu } 694155594Sdavidxu#endif 695170296Sjeff thread_unlock(td2); 69699026Sjulian } 697181334Sjhb if (wakeup_swapper) 698181334Sjhb kick_proc0(); 699195701Skib remaining = calc_remaining(p, mode); 700130674Sdavidxu 701124350Sschweikh /* 702124350Sschweikh * Maybe we suspended some threads.. was it enough? 703105911Sjulian */ 704130674Sdavidxu if (remaining == 1) 705105911Sjulian break; 706105911Sjulian 707156942Sdavidxustopme: 70899026Sjulian /* 70999026Sjulian * Wake us up when everyone else has suspended. 710100648Sjulian * In the mean time we suspend as well. 71199026Sjulian */ 712170296Sjeff thread_suspend_switch(td); 713195701Skib remaining = calc_remaining(p, mode); 71499026Sjulian } 715136177Sdavidxu if (mode == SINGLE_EXIT) { 716135269Sjulian /* 717135269Sjulian * We have gotten rid of all the other threads and we 718135269Sjulian * are about to either exit or exec. In either case, 719240204Sjhb * we try our utmost to revert to being a non-threaded 720135269Sjulian * process. 721135269Sjulian */ 722136160Sjulian p->p_singlethread = NULL; 723137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 724136160Sjulian thread_unthread(td); 725111028Sjeff } 726184667Sdavidxu PROC_SUNLOCK(p); 72799026Sjulian return (0); 72899026Sjulian} 72999026Sjulian 73099026Sjulian/* 73199026Sjulian * Called in from locations that can safely check to see 73299026Sjulian * whether we have to suspend or at least throttle for a 73399026Sjulian * single-thread event (e.g. fork). 73499026Sjulian * 73599026Sjulian * Such locations include userret(). 73699026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 73799026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 73899026Sjulian * 73999026Sjulian * The 'return_instead' argument tells the function if it may do a 74099026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 74199026Sjulian * out instead. 74299026Sjulian * 74399026Sjulian * If the thread that set the single_threading request has set the 74499026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 74599026Sjulian * if 'return_instead' is false, but will exit. 74699026Sjulian * 74799026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 74899026Sjulian *---------------+--------------------+--------------------- 74999026Sjulian * 0 | returns 0 | returns 0 or 1 750246996Sjhb * | when ST ends | immediately 75199026Sjulian *---------------+--------------------+--------------------- 75299026Sjulian * 1 | thread exits | returns 1 753246996Sjhb * | | immediately 75499026Sjulian * 0 = thread_exit() or suspension ok, 75599026Sjulian * other = return error instead of stopping the thread. 75699026Sjulian * 75799026Sjulian * While a full suspension is under effect, even a single threading 75899026Sjulian * thread would be suspended if it made this call (but it shouldn't). 75999026Sjulian * This call should only be made from places where 760124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 76199026Sjulian * return_instead is set. 76299026Sjulian */ 76399026Sjulianint 76499026Sjulianthread_suspend_check(int return_instead) 76599026Sjulian{ 766104502Sjmallett struct thread *td; 767104502Sjmallett struct proc *p; 768182011Sjhb int wakeup_swapper; 76999026Sjulian 77099026Sjulian td = curthread; 77199026Sjulian p = td->td_proc; 772126932Speter mtx_assert(&Giant, MA_NOTOWNED); 77399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 774132087Sdavidxu while (P_SHOULDSTOP(p) || 775183911Sdavidxu ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) { 776102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 77799026Sjulian KASSERT(p->p_singlethread != NULL, 77899026Sjulian ("singlethread not set")); 77999026Sjulian /* 780100648Sjulian * The only suspension in action is a 781100648Sjulian * single-threading. Single threader need not stop. 782124350Sschweikh * XXX Should be safe to access unlocked 783100646Sjulian * as it can only be set to be true by us. 78499026Sjulian */ 785100648Sjulian if (p->p_singlethread == td) 78699026Sjulian return (0); /* Exempt from stopping. */ 787124350Sschweikh } 788134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 789155741Sdavidxu return (EINTR); 79099026Sjulian 791136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 792136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 793136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 794155741Sdavidxu return (ERESTART); 795136177Sdavidxu 79699026Sjulian /* 797248584Sjhb * Ignore suspend requests for stop signals if they 798248584Sjhb * are deferred. 799248584Sjhb */ 800248584Sjhb if (P_SHOULDSTOP(p) == P_STOPPED_SIG && 801248584Sjhb td->td_flags & TDF_SBDRY) { 802248584Sjhb KASSERT(return_instead, 803248584Sjhb ("TDF_SBDRY set for unsafe thread_suspend_check")); 804248584Sjhb return (0); 805248584Sjhb } 806248584Sjhb 807248584Sjhb /* 80899026Sjulian * If the process is waiting for us to exit, 80999026Sjulian * this thread should just suicide. 810102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 81199026Sjulian */ 812213642Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 813213642Sdavidxu PROC_UNLOCK(p); 814213642Sdavidxu tidhash_remove(td); 815213642Sdavidxu PROC_LOCK(p); 816213950Sdavidxu tdsigcleanup(td); 817213642Sdavidxu PROC_SLOCK(p); 818213950Sdavidxu thread_stopped(p); 819134791Sjulian thread_exit(); 820213642Sdavidxu } 821213950Sdavidxu 822213950Sdavidxu PROC_SLOCK(p); 823213950Sdavidxu thread_stopped(p); 824170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 825170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 826170296Sjeff thread_lock(p->p_singlethread); 827182011Sjhb wakeup_swapper = 828182011Sjhb thread_unsuspend_one(p->p_singlethread); 829170296Sjeff thread_unlock(p->p_singlethread); 830182011Sjhb if (wakeup_swapper) 831182011Sjhb kick_proc0(); 832170296Sjeff } 833170296Sjeff } 834184667Sdavidxu PROC_UNLOCK(p); 835184199Sdavidxu thread_lock(td); 83699026Sjulian /* 83799026Sjulian * When a thread suspends, it just 838164936Sjulian * gets taken off all queues. 83999026Sjulian */ 840103216Sjulian thread_suspend_one(td); 841136177Sdavidxu if (return_instead == 0) { 842136177Sdavidxu p->p_boundary_count++; 843136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 844136177Sdavidxu } 845184667Sdavidxu PROC_SUNLOCK(p); 846178272Sjeff mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 847170296Sjeff if (return_instead == 0) 848136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 849170296Sjeff thread_unlock(td); 85099026Sjulian PROC_LOCK(p); 851227657Skib if (return_instead == 0) { 852227657Skib PROC_SLOCK(p); 853170296Sjeff p->p_boundary_count--; 854227657Skib PROC_SUNLOCK(p); 855227657Skib } 85699026Sjulian } 85799026Sjulian return (0); 85899026Sjulian} 85999026Sjulian 860102898Sdavidxuvoid 861170296Sjeffthread_suspend_switch(struct thread *td) 862170296Sjeff{ 863170296Sjeff struct proc *p; 864170296Sjeff 865170296Sjeff p = td->td_proc; 866170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 867170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 868184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 869170296Sjeff /* 870170296Sjeff * We implement thread_suspend_one in stages here to avoid 871170296Sjeff * dropping the proc lock while the thread lock is owned. 872170296Sjeff */ 873170296Sjeff thread_stopped(p); 874170296Sjeff p->p_suspcount++; 875184667Sdavidxu PROC_UNLOCK(p); 876184199Sdavidxu thread_lock(td); 877177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 878170296Sjeff TD_SET_SUSPENDED(td); 879177085Sjeff sched_sleep(td, 0); 880184667Sdavidxu PROC_SUNLOCK(p); 881170296Sjeff DROP_GIANT(); 882178272Sjeff mi_switch(SW_VOL | SWT_SUSPEND, NULL); 883170296Sjeff thread_unlock(td); 884170296Sjeff PICKUP_GIANT(); 885170296Sjeff PROC_LOCK(p); 886184667Sdavidxu PROC_SLOCK(p); 887170296Sjeff} 888170296Sjeff 889170296Sjeffvoid 890102898Sdavidxuthread_suspend_one(struct thread *td) 891102898Sdavidxu{ 892102898Sdavidxu struct proc *p = td->td_proc; 893102898Sdavidxu 894184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 895170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 896112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 897102898Sdavidxu p->p_suspcount++; 898177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 899103216Sjulian TD_SET_SUSPENDED(td); 900177085Sjeff sched_sleep(td, 0); 901102898Sdavidxu} 902102898Sdavidxu 903182011Sjhbint 904102898Sdavidxuthread_unsuspend_one(struct thread *td) 905102898Sdavidxu{ 906102898Sdavidxu struct proc *p = td->td_proc; 907102898Sdavidxu 908184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 909170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 910164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 911103216Sjulian TD_CLR_SUSPENDED(td); 912102898Sdavidxu p->p_suspcount--; 913182011Sjhb return (setrunnable(td)); 914102898Sdavidxu} 915102898Sdavidxu 91699026Sjulian/* 91799026Sjulian * Allow all threads blocked by single threading to continue running. 91899026Sjulian */ 91999026Sjulianvoid 92099026Sjulianthread_unsuspend(struct proc *p) 92199026Sjulian{ 92299026Sjulian struct thread *td; 923182011Sjhb int wakeup_swapper; 92499026Sjulian 92599026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 926184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 927182011Sjhb wakeup_swapper = 0; 92899026Sjulian if (!P_SHOULDSTOP(p)) { 929164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 930170296Sjeff thread_lock(td); 931164936Sjulian if (TD_IS_SUSPENDED(td)) { 932182011Sjhb wakeup_swapper |= thread_unsuspend_one(td); 933164936Sjulian } 934170296Sjeff thread_unlock(td); 93599026Sjulian } 936102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 93799026Sjulian (p->p_numthreads == p->p_suspcount)) { 93899026Sjulian /* 93999026Sjulian * Stopping everything also did the job for the single 94099026Sjulian * threading request. Now we've downgraded to single-threaded, 94199026Sjulian * let it continue. 94299026Sjulian */ 943170296Sjeff thread_lock(p->p_singlethread); 944182011Sjhb wakeup_swapper = thread_unsuspend_one(p->p_singlethread); 945170296Sjeff thread_unlock(p->p_singlethread); 94699026Sjulian } 947182011Sjhb if (wakeup_swapper) 948182011Sjhb kick_proc0(); 94999026Sjulian} 95099026Sjulian 951134791Sjulian/* 952134791Sjulian * End the single threading mode.. 953134791Sjulian */ 95499026Sjulianvoid 95599026Sjulianthread_single_end(void) 95699026Sjulian{ 95799026Sjulian struct thread *td; 95899026Sjulian struct proc *p; 959182011Sjhb int wakeup_swapper; 96099026Sjulian 96199026Sjulian td = curthread; 96299026Sjulian p = td->td_proc; 96399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 964136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 965184667Sdavidxu PROC_SLOCK(p); 96699026Sjulian p->p_singlethread = NULL; 967182011Sjhb wakeup_swapper = 0; 968102292Sjulian /* 969182011Sjhb * If there are other threads they may now run, 970102292Sjulian * unless of course there is a blanket 'stop order' 971102292Sjulian * on the process. The single threader must be allowed 972102292Sjulian * to continue however as this is a bad place to stop. 973102292Sjulian */ 974102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 975164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 976170296Sjeff thread_lock(td); 977164936Sjulian if (TD_IS_SUSPENDED(td)) { 978182011Sjhb wakeup_swapper |= thread_unsuspend_one(td); 979164936Sjulian } 980170296Sjeff thread_unlock(td); 981102292Sjulian } 982102292Sjulian } 983184667Sdavidxu PROC_SUNLOCK(p); 984182011Sjhb if (wakeup_swapper) 985182011Sjhb kick_proc0(); 98699026Sjulian} 987128721Sdeischen 988151990Sdavidxustruct thread * 989151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 990151990Sdavidxu{ 991151990Sdavidxu struct thread *td; 992151990Sdavidxu 993151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 994151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 995151990Sdavidxu if (td->td_tid == tid) 996151990Sdavidxu break; 997151990Sdavidxu } 998151990Sdavidxu return (td); 999151990Sdavidxu} 1000213642Sdavidxu 1001213642Sdavidxu/* Locate a thread by number; return with proc lock held. */ 1002213642Sdavidxustruct thread * 1003213642Sdavidxutdfind(lwpid_t tid, pid_t pid) 1004213642Sdavidxu{ 1005213642Sdavidxu#define RUN_THRESH 16 1006213642Sdavidxu struct thread *td; 1007213642Sdavidxu int run = 0; 1008213642Sdavidxu 1009213642Sdavidxu rw_rlock(&tidhash_lock); 1010213642Sdavidxu LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1011213642Sdavidxu if (td->td_tid == tid) { 1012213642Sdavidxu if (pid != -1 && td->td_proc->p_pid != pid) { 1013213642Sdavidxu td = NULL; 1014213642Sdavidxu break; 1015213642Sdavidxu } 1016219968Sjhb PROC_LOCK(td->td_proc); 1017213642Sdavidxu if (td->td_proc->p_state == PRS_NEW) { 1018219968Sjhb PROC_UNLOCK(td->td_proc); 1019213642Sdavidxu td = NULL; 1020213642Sdavidxu break; 1021213642Sdavidxu } 1022213642Sdavidxu if (run > RUN_THRESH) { 1023213642Sdavidxu if (rw_try_upgrade(&tidhash_lock)) { 1024213642Sdavidxu LIST_REMOVE(td, td_hash); 1025213642Sdavidxu LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1026213642Sdavidxu td, td_hash); 1027213642Sdavidxu rw_wunlock(&tidhash_lock); 1028213642Sdavidxu return (td); 1029213642Sdavidxu } 1030213642Sdavidxu } 1031213642Sdavidxu break; 1032213642Sdavidxu } 1033213642Sdavidxu run++; 1034213642Sdavidxu } 1035213642Sdavidxu rw_runlock(&tidhash_lock); 1036213642Sdavidxu return (td); 1037213642Sdavidxu} 1038213642Sdavidxu 1039213642Sdavidxuvoid 1040213642Sdavidxutidhash_add(struct thread *td) 1041213642Sdavidxu{ 1042213642Sdavidxu rw_wlock(&tidhash_lock); 1043213950Sdavidxu LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1044213642Sdavidxu rw_wunlock(&tidhash_lock); 1045213642Sdavidxu} 1046213642Sdavidxu 1047213642Sdavidxuvoid 1048213642Sdavidxutidhash_remove(struct thread *td) 1049213642Sdavidxu{ 1050213642Sdavidxu rw_wlock(&tidhash_lock); 1051213950Sdavidxu LIST_REMOVE(td, td_hash); 1052213642Sdavidxu rw_wunlock(&tidhash_lock); 1053213642Sdavidxu} 1054