kern_thread.c revision 271000
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29181695Sattilio#include "opt_witness.h" 30198464Sjkoshy#include "opt_hwpmc_hooks.h" 31181695Sattilio 32116182Sobrien#include <sys/cdefs.h> 33116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 271000 2014-09-03 08:18:07Z kib $"); 34116182Sobrien 3599026Sjulian#include <sys/param.h> 3699026Sjulian#include <sys/systm.h> 3799026Sjulian#include <sys/kernel.h> 3899026Sjulian#include <sys/lock.h> 3999026Sjulian#include <sys/mutex.h> 4099026Sjulian#include <sys/proc.h> 41236317Skib#include <sys/rangelock.h> 42156705Sdavidxu#include <sys/resourcevar.h> 43235459Srstone#include <sys/sdt.h> 44130355Sjulian#include <sys/smp.h> 45107126Sjeff#include <sys/sched.h> 46126326Sjhb#include <sys/sleepqueue.h> 47174647Sjeff#include <sys/selinfo.h> 48122514Sjhb#include <sys/turnstile.h> 4999026Sjulian#include <sys/ktr.h> 50213642Sdavidxu#include <sys/rwlock.h> 51143149Sdavidxu#include <sys/umtx.h> 52176730Sjeff#include <sys/cpuset.h> 53198464Sjkoshy#ifdef HWPMC_HOOKS 54198464Sjkoshy#include <sys/pmckern.h> 55198464Sjkoshy#endif 5699026Sjulian 57155195Srwatson#include <security/audit/audit.h> 58155195Srwatson 5999026Sjulian#include <vm/vm.h> 60116355Salc#include <vm/vm_extern.h> 6199026Sjulian#include <vm/uma.h> 62173631Srrs#include <sys/eventhandler.h> 6399026Sjulian 64235459SrstoneSDT_PROVIDER_DECLARE(proc); 65258622SavgSDT_PROBE_DEFINE(proc, , , lwp__exit); 66235459Srstone 67235459Srstone 6899026Sjulian/* 69163709Sjb * thread related storage. 70163709Sjb */ 7199026Sjulianstatic uma_zone_t thread_zone; 7299026Sjulian 73111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 74172256Sattiliostatic struct mtx zombie_lock; 75170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7699026Sjulian 77170598Sjeffstatic void thread_zombie(struct thread *); 78170598Sjeff 79216314Sdavidxu#define TID_BUFFER_SIZE 1024 80216314Sdavidxu 81127794Smarcelstruct mtx tid_lock; 82143802Sphkstatic struct unrhdr *tid_unrhdr; 83216314Sdavidxustatic lwpid_t tid_buffer[TID_BUFFER_SIZE]; 84216314Sdavidxustatic int tid_head, tid_tail; 85213642Sdavidxustatic MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); 86213642Sdavidxu 87213642Sdavidxustruct tidhashhead *tidhashtbl; 88213642Sdavidxuu_long tidhash; 89213642Sdavidxustruct rwlock tidhash_lock; 90213642Sdavidxu 91216314Sdavidxustatic lwpid_t 92216314Sdavidxutid_alloc(void) 93216314Sdavidxu{ 94216314Sdavidxu lwpid_t tid; 95216314Sdavidxu 96216314Sdavidxu tid = alloc_unr(tid_unrhdr); 97216314Sdavidxu if (tid != -1) 98216314Sdavidxu return (tid); 99216314Sdavidxu mtx_lock(&tid_lock); 100216314Sdavidxu if (tid_head == tid_tail) { 101216314Sdavidxu mtx_unlock(&tid_lock); 102216314Sdavidxu return (-1); 103216314Sdavidxu } 104240951Skib tid = tid_buffer[tid_head]; 105240951Skib tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 106216314Sdavidxu mtx_unlock(&tid_lock); 107216314Sdavidxu return (tid); 108216314Sdavidxu} 109216314Sdavidxu 110216314Sdavidxustatic void 111216314Sdavidxutid_free(lwpid_t tid) 112216314Sdavidxu{ 113216314Sdavidxu lwpid_t tmp_tid = -1; 114216314Sdavidxu 115216314Sdavidxu mtx_lock(&tid_lock); 116216314Sdavidxu if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { 117240951Skib tmp_tid = tid_buffer[tid_head]; 118240951Skib tid_head = (tid_head + 1) % TID_BUFFER_SIZE; 119216314Sdavidxu } 120240951Skib tid_buffer[tid_tail] = tid; 121240951Skib tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; 122216314Sdavidxu mtx_unlock(&tid_lock); 123216314Sdavidxu if (tmp_tid != -1) 124216314Sdavidxu free_unr(tid_unrhdr, tmp_tid); 125216314Sdavidxu} 126216314Sdavidxu 127127794Smarcel/* 128107719Sjulian * Prepare a thread for use. 12999026Sjulian */ 130132987Sgreenstatic int 131132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 13299026Sjulian{ 13399026Sjulian struct thread *td; 13499026Sjulian 13599026Sjulian td = (struct thread *)mem; 136103216Sjulian td->td_state = TDS_INACTIVE; 137135573Sjhb td->td_oncpu = NOCPU; 138130269Sjmallett 139216314Sdavidxu td->td_tid = tid_alloc(); 140143840Sphk 141130269Sjmallett /* 142130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 143130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 144170296Sjeff * end of a context switch. 145130269Sjmallett */ 146118442Sjhb td->td_critnest = 1; 147216313Sdavidxu td->td_lend_user_pri = PRI_MAX; 148173631Srrs EVENTHANDLER_INVOKE(thread_ctor, td); 149155195Srwatson#ifdef AUDIT 150155195Srwatson audit_thread_alloc(td); 151155195Srwatson#endif 152161678Sdavidxu umtx_thread_alloc(td); 153132987Sgreen return (0); 15499026Sjulian} 15599026Sjulian 15699026Sjulian/* 15799026Sjulian * Reclaim a thread after use. 15899026Sjulian */ 15999026Sjulianstatic void 16099026Sjulianthread_dtor(void *mem, int size, void *arg) 16199026Sjulian{ 162127794Smarcel struct thread *td; 16399026Sjulian 16499026Sjulian td = (struct thread *)mem; 16599026Sjulian 16699026Sjulian#ifdef INVARIANTS 16799026Sjulian /* Verify that this thread is in a safe state to free. */ 16899026Sjulian switch (td->td_state) { 169103216Sjulian case TDS_INHIBITED: 170103216Sjulian case TDS_RUNNING: 171103216Sjulian case TDS_CAN_RUN: 17299026Sjulian case TDS_RUNQ: 17399026Sjulian /* 17499026Sjulian * We must never unlink a thread that is in one of 17599026Sjulian * these states, because it is currently active. 17699026Sjulian */ 17799026Sjulian panic("bad state for thread unlinking"); 17899026Sjulian /* NOTREACHED */ 179103216Sjulian case TDS_INACTIVE: 18099026Sjulian break; 18199026Sjulian default: 18299026Sjulian panic("bad thread state"); 18399026Sjulian /* NOTREACHED */ 18499026Sjulian } 18599026Sjulian#endif 186155353Srwatson#ifdef AUDIT 187155353Srwatson audit_thread_free(td); 188155353Srwatson#endif 189185029Spjd /* Free all OSD associated to this thread. */ 190185029Spjd osd_thread_exit(td); 191185029Spjd 192173631Srrs EVENTHANDLER_INVOKE(thread_dtor, td); 193216314Sdavidxu tid_free(td->td_tid); 19499026Sjulian} 19599026Sjulian 19699026Sjulian/* 19799026Sjulian * Initialize type-stable parts of a thread (when newly created). 19899026Sjulian */ 199132987Sgreenstatic int 200132987Sgreenthread_init(void *mem, int size, int flags) 20199026Sjulian{ 202131149Smarcel struct thread *td; 20399026Sjulian 20499026Sjulian td = (struct thread *)mem; 205131149Smarcel 206126326Sjhb td->td_sleepqueue = sleepq_alloc(); 207122514Sjhb td->td_turnstile = turnstile_alloc(); 208236317Skib td->td_rlqe = NULL; 209173631Srrs EVENTHANDLER_INVOKE(thread_init, td); 210107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 211161678Sdavidxu umtx_thread_init(td); 212173361Skib td->td_kstack = 0; 213132987Sgreen return (0); 21499026Sjulian} 21599026Sjulian 21699026Sjulian/* 21799026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 21899026Sjulian */ 21999026Sjulianstatic void 22099026Sjulianthread_fini(void *mem, int size) 22199026Sjulian{ 222131149Smarcel struct thread *td; 22399026Sjulian 22499026Sjulian td = (struct thread *)mem; 225173631Srrs EVENTHANDLER_INVOKE(thread_fini, td); 226236317Skib rlqentry_free(td->td_rlqe); 227122514Sjhb turnstile_free(td->td_turnstile); 228126326Sjhb sleepq_free(td->td_sleepqueue); 229161678Sdavidxu umtx_thread_fini(td); 230174647Sjeff seltdfini(td); 23199026Sjulian} 232111028Sjeff 233107126Sjeff/* 234111028Sjeff * For a newly created process, 235111028Sjeff * link up all the structures and its initial threads etc. 236134791Sjulian * called from: 237268351Smarcel * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. 238134791Sjulian * proc_dtor() (should go away) 239134791Sjulian * proc_init() 240105854Sjulian */ 241105854Sjulianvoid 242173361Skibproc_linkup0(struct proc *p, struct thread *td) 243173361Skib{ 244173361Skib TAILQ_INIT(&p->p_threads); /* all threads in proc */ 245173361Skib proc_linkup(p, td); 246173361Skib} 247173361Skib 248173361Skibvoid 249163709Sjbproc_linkup(struct proc *p, struct thread *td) 250105854Sjulian{ 251170296Sjeff 252151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 253153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 254153253Sdavidxu if (p->p_ksi != NULL) { 255153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 256153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 257152185Sdavidxu } 258152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 259105854Sjulian p->p_numthreads = 0; 260163709Sjb thread_link(td, p); 261105854Sjulian} 262105854Sjulian 263111028Sjeff/* 26499026Sjulian * Initialize global thread allocation resources. 26599026Sjulian */ 26699026Sjulianvoid 26799026Sjulianthreadinit(void) 26899026Sjulian{ 26999026Sjulian 270143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 271239301Skib 272239301Skib /* 273239328Skib * pid_max cannot be greater than PID_MAX. 274239301Skib * leave one number for thread0. 275239301Skib */ 276174848Sjulian tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 277143802Sphk 278107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 27999026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 280167944Sjhb 16 - 1, 0); 281213642Sdavidxu tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); 282213642Sdavidxu rw_init(&tidhash_lock, "tidhash"); 28399026Sjulian} 28499026Sjulian 28599026Sjulian/* 286170598Sjeff * Place an unused thread on the zombie list. 287164936Sjulian * Use the slpq as that must be unused by now. 28899026Sjulian */ 28999026Sjulianvoid 290170598Sjeffthread_zombie(struct thread *td) 29199026Sjulian{ 292170296Sjeff mtx_lock_spin(&zombie_lock); 293164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 294170296Sjeff mtx_unlock_spin(&zombie_lock); 29599026Sjulian} 29699026Sjulian 297103410Smini/* 298170598Sjeff * Release a thread that has exited after cpu_throw(). 299170598Sjeff */ 300170598Sjeffvoid 301170598Sjeffthread_stash(struct thread *td) 302170598Sjeff{ 303170598Sjeff atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 304170598Sjeff thread_zombie(td); 305170598Sjeff} 306170598Sjeff 307170598Sjeff/* 308177091Sjeff * Reap zombie resources. 30999026Sjulian */ 31099026Sjulianvoid 31199026Sjulianthread_reap(void) 31299026Sjulian{ 313105854Sjulian struct thread *td_first, *td_next; 31499026Sjulian 31599026Sjulian /* 316111028Sjeff * Don't even bother to lock if none at this instant, 317111028Sjeff * we really don't care about the next instant.. 31899026Sjulian */ 319163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 320170296Sjeff mtx_lock_spin(&zombie_lock); 321105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 322105854Sjulian if (td_first) 323105854Sjulian TAILQ_INIT(&zombie_threads); 324170296Sjeff mtx_unlock_spin(&zombie_lock); 325105854Sjulian while (td_first) { 326164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 327111028Sjeff if (td_first->td_ucred) 328111028Sjeff crfree(td_first->td_ucred); 329105854Sjulian thread_free(td_first); 330105854Sjulian td_first = td_next; 33199026Sjulian } 33299026Sjulian } 33399026Sjulian} 33499026Sjulian 33599026Sjulian/* 33699026Sjulian * Allocate a thread. 33799026Sjulian */ 33899026Sjulianstruct thread * 339196730Skibthread_alloc(int pages) 34099026Sjulian{ 341173361Skib struct thread *td; 342163709Sjb 34399026Sjulian thread_reap(); /* check if any zombies to get */ 344173361Skib 345173361Skib td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 346173361Skib KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 347196730Skib if (!vm_thread_new(td, pages)) { 348173361Skib uma_zfree(thread_zone, td); 349173361Skib return (NULL); 350173361Skib } 351173615Smarcel cpu_thread_alloc(td); 352173361Skib return (td); 35399026Sjulian} 35499026Sjulian 355196730Skibint 356196730Skibthread_alloc_stack(struct thread *td, int pages) 357196730Skib{ 358103367Sjulian 359196730Skib KASSERT(td->td_kstack == 0, 360196730Skib ("thread_alloc_stack called on a thread with kstack")); 361196730Skib if (!vm_thread_new(td, pages)) 362196730Skib return (0); 363196730Skib cpu_thread_alloc(td); 364196730Skib return (1); 365196730Skib} 366196730Skib 367103367Sjulian/* 36899026Sjulian * Deallocate a thread. 36999026Sjulian */ 37099026Sjulianvoid 37199026Sjulianthread_free(struct thread *td) 37299026Sjulian{ 373189845Sjeff 374189845Sjeff lock_profile_thread_exit(td); 375177369Sjeff if (td->td_cpuset) 376177369Sjeff cpuset_rel(td->td_cpuset); 377176730Sjeff td->td_cpuset = NULL; 378173615Smarcel cpu_thread_free(td); 379173361Skib if (td->td_kstack != 0) 380173361Skib vm_thread_dispose(td); 38199026Sjulian uma_zfree(thread_zone, td); 38299026Sjulian} 38399026Sjulian 38499026Sjulian/* 38599026Sjulian * Discard the current thread and exit from its context. 386130355Sjulian * Always called with scheduler locked. 38799026Sjulian * 38899026Sjulian * Because we can't free a thread while we're operating under its context, 389107719Sjulian * push the current thread into our CPU's deadthread holder. This means 390107719Sjulian * we needn't worry about someone else grabbing our context before we 391177091Sjeff * do a cpu_throw(). 39299026Sjulian */ 39399026Sjulianvoid 39499026Sjulianthread_exit(void) 39599026Sjulian{ 396229429Sjhb uint64_t runtime, new_switchtime; 39799026Sjulian struct thread *td; 398170174Sjeff struct thread *td2; 39999026Sjulian struct proc *p; 400182011Sjhb int wakeup_swapper; 40199026Sjulian 40299026Sjulian td = curthread; 40399026Sjulian p = td->td_proc; 40499026Sjulian 405170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 406134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 407170296Sjeff 408134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 409102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 410133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 411173601Sjulian (long)p->p_pid, td->td_name); 412151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 41399026Sjulian 414155376Srwatson#ifdef AUDIT 415155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 416155376Srwatson#endif 417161678Sdavidxu umtx_thread_exit(td); 418134791Sjulian /* 419134791Sjulian * drop FPU & debug register state storage, or any other 420134791Sjulian * architecture specific resources that 421134791Sjulian * would not be on a new untouched process. 422134791Sjulian */ 42399026Sjulian cpu_thread_exit(td); /* XXXSMP */ 42499026Sjulian 425134791Sjulian /* 426103002Sjulian * The last thread is left attached to the process 427103002Sjulian * So that the whole bundle gets recycled. Skip 428134791Sjulian * all this stuff if we never had threads. 429134791Sjulian * EXIT clears all sign of other threads when 430134791Sjulian * it goes to single threading, so the last thread always 431134791Sjulian * takes the short path. 432102581Sjulian */ 433134791Sjulian if (p->p_flag & P_HADTHREADS) { 434134791Sjulian if (p->p_numthreads > 1) { 435271000Skib atomic_add_int(&td->td_proc->p_exitthreads, 1); 436134791Sjulian thread_unlink(td); 437170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 438170174Sjeff sched_exit_thread(td2, td); 439134791Sjulian 440134791Sjulian /* 441134791Sjulian * The test below is NOT true if we are the 442207606Skib * sole exiting thread. P_STOPPED_SINGLE is unset 443134791Sjulian * in exit1() after it is the only survivor. 444134791Sjulian */ 445134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 446134791Sjulian if (p->p_numthreads == p->p_suspcount) { 447170296Sjeff thread_lock(p->p_singlethread); 448182011Sjhb wakeup_swapper = thread_unsuspend_one( 449182011Sjhb p->p_singlethread); 450170296Sjeff thread_unlock(p->p_singlethread); 451182011Sjhb if (wakeup_swapper) 452182011Sjhb kick_proc0(); 453134791Sjulian } 454103002Sjulian } 455104695Sjulian 456134791Sjulian PCPU_SET(deadthread, td); 457134791Sjulian } else { 458134791Sjulian /* 459134791Sjulian * The last thread is exiting.. but not through exit() 460134791Sjulian */ 461134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 462119488Sdavidxu } 463170296Sjeff } 464198464Sjkoshy#ifdef HWPMC_HOOKS 465198464Sjkoshy /* 466198464Sjkoshy * If this thread is part of a process that is being tracked by hwpmc(4), 467198464Sjkoshy * inform the module of the thread's impending exit. 468198464Sjkoshy */ 469198464Sjkoshy if (PMC_PROC_IS_USING_PMCS(td->td_proc)) 470198464Sjkoshy PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); 471198464Sjkoshy#endif 472170296Sjeff PROC_UNLOCK(p); 473229429Sjhb 474229429Sjhb /* Do the same timestamp bookkeeping that mi_switch() would do. */ 475229429Sjhb new_switchtime = cpu_ticks(); 476229429Sjhb runtime = new_switchtime - PCPU_GET(switchtime); 477229429Sjhb td->td_runtime += runtime; 478229429Sjhb td->td_incruntime += runtime; 479229429Sjhb PCPU_SET(switchtime, new_switchtime); 480229429Sjhb PCPU_SET(switchticks, ticks); 481229429Sjhb PCPU_INC(cnt.v_swtch); 482229429Sjhb 483229429Sjhb /* Save our resource usage in our process. */ 484229429Sjhb td->td_ru.ru_nvcsw++; 485208488Skib ruxagg(p, td); 486229429Sjhb rucollect(&p->p_ru, &td->td_ru); 487229429Sjhb 488170296Sjeff thread_lock(td); 489170296Sjeff PROC_SUNLOCK(p); 490133396Sjulian td->td_state = TDS_INACTIVE; 491181695Sattilio#ifdef WITNESS 492181695Sattilio witness_thread_exit(td); 493181695Sattilio#endif 494133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 495170296Sjeff sched_throw(td); 496112993Speter panic("I'm a teapot!"); 49799026Sjulian /* NOTREACHED */ 49899026Sjulian} 49999026Sjulian 500124350Sschweikh/* 501107719Sjulian * Do any thread specific cleanups that may be needed in wait() 502126932Speter * called with Giant, proc and schedlock not held. 503107719Sjulian */ 504107719Sjulianvoid 505107719Sjulianthread_wait(struct proc *p) 506107719Sjulian{ 507107719Sjulian struct thread *td; 508107719Sjulian 509126932Speter mtx_assert(&Giant, MA_NOTOWNED); 510271000Skib KASSERT((p->p_numthreads == 1), ("multiple threads in thread_wait()")); 511271000Skib KASSERT((p->p_exitthreads == 0), ("p_exitthreads leaking")); 512170598Sjeff td = FIRST_THREAD_IN_PROC(p); 513170598Sjeff /* Lock the last thread so we spin until it exits cpu_throw(). */ 514170598Sjeff thread_lock(td); 515170598Sjeff thread_unlock(td); 516189845Sjeff lock_profile_thread_exit(td); 517176730Sjeff cpuset_rel(td->td_cpuset); 518176730Sjeff td->td_cpuset = NULL; 519170598Sjeff cpu_thread_clean(td); 520170598Sjeff crfree(td->td_ucred); 521107719Sjulian thread_reap(); /* check for zombie threads etc. */ 522107719Sjulian} 523107719Sjulian 52499026Sjulian/* 52599026Sjulian * Link a thread to a process. 526103002Sjulian * set up anything that needs to be initialized for it to 527103002Sjulian * be used by the process. 52899026Sjulian */ 52999026Sjulianvoid 530163709Sjbthread_link(struct thread *td, struct proc *p) 53199026Sjulian{ 53299026Sjulian 533170296Sjeff /* 534170296Sjeff * XXX This can't be enabled because it's called for proc0 before 535177368Sjeff * its lock has been created. 536177368Sjeff * PROC_LOCK_ASSERT(p, MA_OWNED); 537170296Sjeff */ 538111028Sjeff td->td_state = TDS_INACTIVE; 539111028Sjeff td->td_proc = p; 540172207Sjeff td->td_flags = TDF_INMEM; 54199026Sjulian 542103002Sjulian LIST_INIT(&td->td_contested); 543174629Sjeff LIST_INIT(&td->td_lprof[0]); 544174629Sjeff LIST_INIT(&td->td_lprof[1]); 545151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 546119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 547269095Sdeischen TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); 54899026Sjulian p->p_numthreads++; 54999026Sjulian} 55099026Sjulian 551134791Sjulian/* 552136160Sjulian * Convert a process with one thread to an unthreaded process. 553136160Sjulian */ 554136160Sjulianvoid 555136160Sjulianthread_unthread(struct thread *td) 556136160Sjulian{ 557136160Sjulian struct proc *p = td->td_proc; 558136160Sjulian 559136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 560163709Sjb p->p_flag &= ~P_HADTHREADS; 561136160Sjulian} 562136160Sjulian 563136160Sjulian/* 564136160Sjulian * Called from: 565134791Sjulian * thread_exit() 566134791Sjulian */ 567113641Sjulianvoid 568113641Sjulianthread_unlink(struct thread *td) 569124350Sschweikh{ 570113641Sjulian struct proc *p = td->td_proc; 571113920Sjhb 572177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 573113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 574113641Sjulian p->p_numthreads--; 575113641Sjulian /* could clear a few other things here */ 576163709Sjb /* Must NOT clear links to proc! */ 577124350Sschweikh} 578113641Sjulian 579195701Skibstatic int 580195701Skibcalc_remaining(struct proc *p, int mode) 581195701Skib{ 582195701Skib int remaining; 583195701Skib 584227657Skib PROC_LOCK_ASSERT(p, MA_OWNED); 585227657Skib PROC_SLOCK_ASSERT(p, MA_OWNED); 586195701Skib if (mode == SINGLE_EXIT) 587195701Skib remaining = p->p_numthreads; 588195701Skib else if (mode == SINGLE_BOUNDARY) 589195701Skib remaining = p->p_numthreads - p->p_boundary_count; 590195701Skib else if (mode == SINGLE_NO_EXIT) 591195701Skib remaining = p->p_numthreads - p->p_suspcount; 592195701Skib else 593195701Skib panic("calc_remaining: wrong mode %d", mode); 594195701Skib return (remaining); 595195701Skib} 596195701Skib 597111028Sjeff/* 59899026Sjulian * Enforce single-threading. 59999026Sjulian * 60099026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 60199026Sjulian * exit the process or similar). Process is locked! 60299026Sjulian * Returns 0 when you are successfully the only thread running. 60399026Sjulian * A process has successfully single threaded in the suspend mode when 60499026Sjulian * There are no threads in user mode. Threads in the kernel must be 60599026Sjulian * allowed to continue until they get to the user boundary. They may even 60699026Sjulian * copy out their return values and data before suspending. They may however be 607160048Smaxim * accelerated in reaching the user boundary as we will wake up 60899026Sjulian * any sleeping threads that are interruptable. (PCATCH). 60999026Sjulian */ 61099026Sjulianint 611136177Sdavidxuthread_single(int mode) 61299026Sjulian{ 61399026Sjulian struct thread *td; 61499026Sjulian struct thread *td2; 61599026Sjulian struct proc *p; 616181334Sjhb int remaining, wakeup_swapper; 61799026Sjulian 61899026Sjulian td = curthread; 61999026Sjulian p = td->td_proc; 620126932Speter mtx_assert(&Giant, MA_NOTOWNED); 62199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 62299026Sjulian 623134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 62499026Sjulian return (0); 62599026Sjulian 626100648Sjulian /* Is someone already single threading? */ 627136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 62899026Sjulian return (1); 62999026Sjulian 630136177Sdavidxu if (mode == SINGLE_EXIT) { 631136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 632136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 633136177Sdavidxu } else { 634136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 635136177Sdavidxu if (mode == SINGLE_BOUNDARY) 636136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 637136177Sdavidxu else 638136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 639136177Sdavidxu } 640102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 641184667Sdavidxu PROC_SLOCK(p); 64299026Sjulian p->p_singlethread = td; 643195701Skib remaining = calc_remaining(p, mode); 644130674Sdavidxu while (remaining != 1) { 645156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 646156942Sdavidxu goto stopme; 647181334Sjhb wakeup_swapper = 0; 64899026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 64999026Sjulian if (td2 == td) 65099026Sjulian continue; 651170296Sjeff thread_lock(td2); 652177471Sjeff td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 653103216Sjulian if (TD_IS_INHIBITED(td2)) { 654136177Sdavidxu switch (mode) { 655136177Sdavidxu case SINGLE_EXIT: 656136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 657182011Sjhb wakeup_swapper |= 658182011Sjhb thread_unsuspend_one(td2); 659105911Sjulian if (TD_ON_SLEEPQ(td2) && 660136177Sdavidxu (td2->td_flags & TDF_SINTR)) 661182011Sjhb wakeup_swapper |= 662181334Sjhb sleepq_abort(td2, EINTR); 663136177Sdavidxu break; 664136177Sdavidxu case SINGLE_BOUNDARY: 665183929Sdavidxu if (TD_IS_SUSPENDED(td2) && 666183929Sdavidxu !(td2->td_flags & TDF_BOUNDARY)) 667183929Sdavidxu wakeup_swapper |= 668183929Sdavidxu thread_unsuspend_one(td2); 669183929Sdavidxu if (TD_ON_SLEEPQ(td2) && 670183929Sdavidxu (td2->td_flags & TDF_SINTR)) 671183929Sdavidxu wakeup_swapper |= 672183929Sdavidxu sleepq_abort(td2, ERESTART); 673136177Sdavidxu break; 674195702Skib case SINGLE_NO_EXIT: 675195702Skib if (TD_IS_SUSPENDED(td2) && 676195702Skib !(td2->td_flags & TDF_BOUNDARY)) 677195702Skib wakeup_swapper |= 678195702Skib thread_unsuspend_one(td2); 679195702Skib if (TD_ON_SLEEPQ(td2) && 680195702Skib (td2->td_flags & TDF_SINTR)) 681195702Skib wakeup_swapper |= 682195702Skib sleepq_abort(td2, ERESTART); 683195702Skib break; 684183929Sdavidxu default: 685136177Sdavidxu break; 68699026Sjulian } 68799026Sjulian } 688155594Sdavidxu#ifdef SMP 689155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 690155594Sdavidxu forward_signal(td2); 691155594Sdavidxu } 692155594Sdavidxu#endif 693170296Sjeff thread_unlock(td2); 69499026Sjulian } 695181334Sjhb if (wakeup_swapper) 696181334Sjhb kick_proc0(); 697195701Skib remaining = calc_remaining(p, mode); 698130674Sdavidxu 699124350Sschweikh /* 700124350Sschweikh * Maybe we suspended some threads.. was it enough? 701105911Sjulian */ 702130674Sdavidxu if (remaining == 1) 703105911Sjulian break; 704105911Sjulian 705156942Sdavidxustopme: 70699026Sjulian /* 70799026Sjulian * Wake us up when everyone else has suspended. 708100648Sjulian * In the mean time we suspend as well. 70999026Sjulian */ 710170296Sjeff thread_suspend_switch(td); 711195701Skib remaining = calc_remaining(p, mode); 71299026Sjulian } 713136177Sdavidxu if (mode == SINGLE_EXIT) { 714135269Sjulian /* 715135269Sjulian * We have gotten rid of all the other threads and we 716135269Sjulian * are about to either exit or exec. In either case, 717240204Sjhb * we try our utmost to revert to being a non-threaded 718135269Sjulian * process. 719135269Sjulian */ 720136160Sjulian p->p_singlethread = NULL; 721137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 722136160Sjulian thread_unthread(td); 723271000Skib 724271000Skib /* 725271000Skib * Wait for any remaining threads to exit cpu_throw(). 726271000Skib */ 727271000Skib while (p->p_exitthreads != 0) { 728271000Skib PROC_SUNLOCK(p); 729271000Skib PROC_UNLOCK(p); 730271000Skib sched_relinquish(td); 731271000Skib PROC_LOCK(p); 732271000Skib PROC_SLOCK(p); 733271000Skib } 734111028Sjeff } 735184667Sdavidxu PROC_SUNLOCK(p); 73699026Sjulian return (0); 73799026Sjulian} 73899026Sjulian 73999026Sjulian/* 74099026Sjulian * Called in from locations that can safely check to see 74199026Sjulian * whether we have to suspend or at least throttle for a 74299026Sjulian * single-thread event (e.g. fork). 74399026Sjulian * 74499026Sjulian * Such locations include userret(). 74599026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 74699026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 74799026Sjulian * 74899026Sjulian * The 'return_instead' argument tells the function if it may do a 74999026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 75099026Sjulian * out instead. 75199026Sjulian * 75299026Sjulian * If the thread that set the single_threading request has set the 75399026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 75499026Sjulian * if 'return_instead' is false, but will exit. 75599026Sjulian * 75699026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 75799026Sjulian *---------------+--------------------+--------------------- 75899026Sjulian * 0 | returns 0 | returns 0 or 1 759246996Sjhb * | when ST ends | immediately 76099026Sjulian *---------------+--------------------+--------------------- 76199026Sjulian * 1 | thread exits | returns 1 762246996Sjhb * | | immediately 76399026Sjulian * 0 = thread_exit() or suspension ok, 76499026Sjulian * other = return error instead of stopping the thread. 76599026Sjulian * 76699026Sjulian * While a full suspension is under effect, even a single threading 76799026Sjulian * thread would be suspended if it made this call (but it shouldn't). 76899026Sjulian * This call should only be made from places where 769124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 77099026Sjulian * return_instead is set. 77199026Sjulian */ 77299026Sjulianint 77399026Sjulianthread_suspend_check(int return_instead) 77499026Sjulian{ 775104502Sjmallett struct thread *td; 776104502Sjmallett struct proc *p; 777182011Sjhb int wakeup_swapper; 77899026Sjulian 77999026Sjulian td = curthread; 78099026Sjulian p = td->td_proc; 781126932Speter mtx_assert(&Giant, MA_NOTOWNED); 78299026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 783132087Sdavidxu while (P_SHOULDSTOP(p) || 784183911Sdavidxu ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND))) { 785102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 78699026Sjulian KASSERT(p->p_singlethread != NULL, 78799026Sjulian ("singlethread not set")); 78899026Sjulian /* 789100648Sjulian * The only suspension in action is a 790100648Sjulian * single-threading. Single threader need not stop. 791124350Sschweikh * XXX Should be safe to access unlocked 792100646Sjulian * as it can only be set to be true by us. 79399026Sjulian */ 794100648Sjulian if (p->p_singlethread == td) 79599026Sjulian return (0); /* Exempt from stopping. */ 796124350Sschweikh } 797134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 798155741Sdavidxu return (EINTR); 79999026Sjulian 800136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 801136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 802136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 803155741Sdavidxu return (ERESTART); 804136177Sdavidxu 80599026Sjulian /* 806248584Sjhb * Ignore suspend requests for stop signals if they 807248584Sjhb * are deferred. 808248584Sjhb */ 809248584Sjhb if (P_SHOULDSTOP(p) == P_STOPPED_SIG && 810248584Sjhb td->td_flags & TDF_SBDRY) { 811248584Sjhb KASSERT(return_instead, 812248584Sjhb ("TDF_SBDRY set for unsafe thread_suspend_check")); 813248584Sjhb return (0); 814248584Sjhb } 815248584Sjhb 816248584Sjhb /* 81799026Sjulian * If the process is waiting for us to exit, 81899026Sjulian * this thread should just suicide. 819102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 82099026Sjulian */ 821213642Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 822213642Sdavidxu PROC_UNLOCK(p); 823213642Sdavidxu tidhash_remove(td); 824213642Sdavidxu PROC_LOCK(p); 825213950Sdavidxu tdsigcleanup(td); 826213642Sdavidxu PROC_SLOCK(p); 827213950Sdavidxu thread_stopped(p); 828134791Sjulian thread_exit(); 829213642Sdavidxu } 830213950Sdavidxu 831213950Sdavidxu PROC_SLOCK(p); 832213950Sdavidxu thread_stopped(p); 833170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 834170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 835170296Sjeff thread_lock(p->p_singlethread); 836182011Sjhb wakeup_swapper = 837182011Sjhb thread_unsuspend_one(p->p_singlethread); 838170296Sjeff thread_unlock(p->p_singlethread); 839182011Sjhb if (wakeup_swapper) 840182011Sjhb kick_proc0(); 841170296Sjeff } 842170296Sjeff } 843184667Sdavidxu PROC_UNLOCK(p); 844184199Sdavidxu thread_lock(td); 84599026Sjulian /* 84699026Sjulian * When a thread suspends, it just 847164936Sjulian * gets taken off all queues. 84899026Sjulian */ 849103216Sjulian thread_suspend_one(td); 850136177Sdavidxu if (return_instead == 0) { 851136177Sdavidxu p->p_boundary_count++; 852136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 853136177Sdavidxu } 854184667Sdavidxu PROC_SUNLOCK(p); 855178272Sjeff mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 856170296Sjeff if (return_instead == 0) 857136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 858170296Sjeff thread_unlock(td); 85999026Sjulian PROC_LOCK(p); 860227657Skib if (return_instead == 0) { 861227657Skib PROC_SLOCK(p); 862170296Sjeff p->p_boundary_count--; 863227657Skib PROC_SUNLOCK(p); 864227657Skib } 86599026Sjulian } 86699026Sjulian return (0); 86799026Sjulian} 86899026Sjulian 869102898Sdavidxuvoid 870170296Sjeffthread_suspend_switch(struct thread *td) 871170296Sjeff{ 872170296Sjeff struct proc *p; 873170296Sjeff 874170296Sjeff p = td->td_proc; 875170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 876170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 877184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 878170296Sjeff /* 879170296Sjeff * We implement thread_suspend_one in stages here to avoid 880170296Sjeff * dropping the proc lock while the thread lock is owned. 881170296Sjeff */ 882170296Sjeff thread_stopped(p); 883170296Sjeff p->p_suspcount++; 884184667Sdavidxu PROC_UNLOCK(p); 885184199Sdavidxu thread_lock(td); 886177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 887170296Sjeff TD_SET_SUSPENDED(td); 888177085Sjeff sched_sleep(td, 0); 889184667Sdavidxu PROC_SUNLOCK(p); 890170296Sjeff DROP_GIANT(); 891178272Sjeff mi_switch(SW_VOL | SWT_SUSPEND, NULL); 892170296Sjeff thread_unlock(td); 893170296Sjeff PICKUP_GIANT(); 894170296Sjeff PROC_LOCK(p); 895184667Sdavidxu PROC_SLOCK(p); 896170296Sjeff} 897170296Sjeff 898170296Sjeffvoid 899102898Sdavidxuthread_suspend_one(struct thread *td) 900102898Sdavidxu{ 901102898Sdavidxu struct proc *p = td->td_proc; 902102898Sdavidxu 903184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 904170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 905112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 906102898Sdavidxu p->p_suspcount++; 907177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 908103216Sjulian TD_SET_SUSPENDED(td); 909177085Sjeff sched_sleep(td, 0); 910102898Sdavidxu} 911102898Sdavidxu 912182011Sjhbint 913102898Sdavidxuthread_unsuspend_one(struct thread *td) 914102898Sdavidxu{ 915102898Sdavidxu struct proc *p = td->td_proc; 916102898Sdavidxu 917184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 918170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 919164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 920103216Sjulian TD_CLR_SUSPENDED(td); 921102898Sdavidxu p->p_suspcount--; 922182011Sjhb return (setrunnable(td)); 923102898Sdavidxu} 924102898Sdavidxu 92599026Sjulian/* 92699026Sjulian * Allow all threads blocked by single threading to continue running. 92799026Sjulian */ 92899026Sjulianvoid 92999026Sjulianthread_unsuspend(struct proc *p) 93099026Sjulian{ 93199026Sjulian struct thread *td; 932182011Sjhb int wakeup_swapper; 93399026Sjulian 93499026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 935184667Sdavidxu PROC_SLOCK_ASSERT(p, MA_OWNED); 936182011Sjhb wakeup_swapper = 0; 93799026Sjulian if (!P_SHOULDSTOP(p)) { 938164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 939170296Sjeff thread_lock(td); 940164936Sjulian if (TD_IS_SUSPENDED(td)) { 941182011Sjhb wakeup_swapper |= thread_unsuspend_one(td); 942164936Sjulian } 943170296Sjeff thread_unlock(td); 94499026Sjulian } 945102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 94699026Sjulian (p->p_numthreads == p->p_suspcount)) { 94799026Sjulian /* 94899026Sjulian * Stopping everything also did the job for the single 94999026Sjulian * threading request. Now we've downgraded to single-threaded, 95099026Sjulian * let it continue. 95199026Sjulian */ 952170296Sjeff thread_lock(p->p_singlethread); 953182011Sjhb wakeup_swapper = thread_unsuspend_one(p->p_singlethread); 954170296Sjeff thread_unlock(p->p_singlethread); 95599026Sjulian } 956182011Sjhb if (wakeup_swapper) 957182011Sjhb kick_proc0(); 95899026Sjulian} 95999026Sjulian 960134791Sjulian/* 961134791Sjulian * End the single threading mode.. 962134791Sjulian */ 96399026Sjulianvoid 96499026Sjulianthread_single_end(void) 96599026Sjulian{ 96699026Sjulian struct thread *td; 96799026Sjulian struct proc *p; 968182011Sjhb int wakeup_swapper; 96999026Sjulian 97099026Sjulian td = curthread; 97199026Sjulian p = td->td_proc; 97299026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 973136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 974184667Sdavidxu PROC_SLOCK(p); 97599026Sjulian p->p_singlethread = NULL; 976182011Sjhb wakeup_swapper = 0; 977102292Sjulian /* 978182011Sjhb * If there are other threads they may now run, 979102292Sjulian * unless of course there is a blanket 'stop order' 980102292Sjulian * on the process. The single threader must be allowed 981102292Sjulian * to continue however as this is a bad place to stop. 982102292Sjulian */ 983102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 984164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 985170296Sjeff thread_lock(td); 986164936Sjulian if (TD_IS_SUSPENDED(td)) { 987182011Sjhb wakeup_swapper |= thread_unsuspend_one(td); 988164936Sjulian } 989170296Sjeff thread_unlock(td); 990102292Sjulian } 991102292Sjulian } 992184667Sdavidxu PROC_SUNLOCK(p); 993182011Sjhb if (wakeup_swapper) 994182011Sjhb kick_proc0(); 99599026Sjulian} 996128721Sdeischen 997151990Sdavidxustruct thread * 998151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 999151990Sdavidxu{ 1000151990Sdavidxu struct thread *td; 1001151990Sdavidxu 1002151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 1003151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 1004151990Sdavidxu if (td->td_tid == tid) 1005151990Sdavidxu break; 1006151990Sdavidxu } 1007151990Sdavidxu return (td); 1008151990Sdavidxu} 1009213642Sdavidxu 1010213642Sdavidxu/* Locate a thread by number; return with proc lock held. */ 1011213642Sdavidxustruct thread * 1012213642Sdavidxutdfind(lwpid_t tid, pid_t pid) 1013213642Sdavidxu{ 1014213642Sdavidxu#define RUN_THRESH 16 1015213642Sdavidxu struct thread *td; 1016213642Sdavidxu int run = 0; 1017213642Sdavidxu 1018213642Sdavidxu rw_rlock(&tidhash_lock); 1019213642Sdavidxu LIST_FOREACH(td, TIDHASH(tid), td_hash) { 1020213642Sdavidxu if (td->td_tid == tid) { 1021213642Sdavidxu if (pid != -1 && td->td_proc->p_pid != pid) { 1022213642Sdavidxu td = NULL; 1023213642Sdavidxu break; 1024213642Sdavidxu } 1025219968Sjhb PROC_LOCK(td->td_proc); 1026213642Sdavidxu if (td->td_proc->p_state == PRS_NEW) { 1027219968Sjhb PROC_UNLOCK(td->td_proc); 1028213642Sdavidxu td = NULL; 1029213642Sdavidxu break; 1030213642Sdavidxu } 1031213642Sdavidxu if (run > RUN_THRESH) { 1032213642Sdavidxu if (rw_try_upgrade(&tidhash_lock)) { 1033213642Sdavidxu LIST_REMOVE(td, td_hash); 1034213642Sdavidxu LIST_INSERT_HEAD(TIDHASH(td->td_tid), 1035213642Sdavidxu td, td_hash); 1036213642Sdavidxu rw_wunlock(&tidhash_lock); 1037213642Sdavidxu return (td); 1038213642Sdavidxu } 1039213642Sdavidxu } 1040213642Sdavidxu break; 1041213642Sdavidxu } 1042213642Sdavidxu run++; 1043213642Sdavidxu } 1044213642Sdavidxu rw_runlock(&tidhash_lock); 1045213642Sdavidxu return (td); 1046213642Sdavidxu} 1047213642Sdavidxu 1048213642Sdavidxuvoid 1049213642Sdavidxutidhash_add(struct thread *td) 1050213642Sdavidxu{ 1051213642Sdavidxu rw_wlock(&tidhash_lock); 1052213950Sdavidxu LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); 1053213642Sdavidxu rw_wunlock(&tidhash_lock); 1054213642Sdavidxu} 1055213642Sdavidxu 1056213642Sdavidxuvoid 1057213642Sdavidxutidhash_remove(struct thread *td) 1058213642Sdavidxu{ 1059213642Sdavidxu rw_wlock(&tidhash_lock); 1060213950Sdavidxu LIST_REMOVE(td, td_hash); 1061213642Sdavidxu rw_wunlock(&tidhash_lock); 1062213642Sdavidxu} 1063