kern_thread.c revision 177471
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29116182Sobrien#include <sys/cdefs.h> 30116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 177471 2008-03-21 08:23:25Z jeff $"); 31116182Sobrien 3299026Sjulian#include <sys/param.h> 3399026Sjulian#include <sys/systm.h> 3499026Sjulian#include <sys/kernel.h> 3599026Sjulian#include <sys/lock.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 38156705Sdavidxu#include <sys/resourcevar.h> 39130355Sjulian#include <sys/smp.h> 4099026Sjulian#include <sys/sysctl.h> 41107126Sjeff#include <sys/sched.h> 42126326Sjhb#include <sys/sleepqueue.h> 43174647Sjeff#include <sys/selinfo.h> 44122514Sjhb#include <sys/turnstile.h> 4599026Sjulian#include <sys/ktr.h> 46143149Sdavidxu#include <sys/umtx.h> 47176730Sjeff#include <sys/cpuset.h> 4899026Sjulian 49155195Srwatson#include <security/audit/audit.h> 50155195Srwatson 5199026Sjulian#include <vm/vm.h> 52116355Salc#include <vm/vm_extern.h> 5399026Sjulian#include <vm/uma.h> 54173631Srrs#include <sys/eventhandler.h> 5599026Sjulian 5699026Sjulian/* 57163709Sjb * thread related storage. 58163709Sjb */ 5999026Sjulianstatic uma_zone_t thread_zone; 6099026Sjulian 6199026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 6299026Sjulian 63130199Sjulianint max_threads_per_proc = 1500; 64107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 65103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 66103367Sjulian 67130199Sjulianint max_threads_hits; 68111115SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 69111115Sdavidxu &max_threads_hits, 0, ""); 70111115Sdavidxu 71111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 72172256Sattiliostatic struct mtx zombie_lock; 73170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7499026Sjulian 75170598Sjeffstatic void thread_zombie(struct thread *); 76170598Sjeff 77127794Smarcelstruct mtx tid_lock; 78143802Sphkstatic struct unrhdr *tid_unrhdr; 79127794Smarcel 80127794Smarcel/* 81107719Sjulian * Prepare a thread for use. 8299026Sjulian */ 83132987Sgreenstatic int 84132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 8599026Sjulian{ 8699026Sjulian struct thread *td; 8799026Sjulian 8899026Sjulian td = (struct thread *)mem; 89103216Sjulian td->td_state = TDS_INACTIVE; 90135573Sjhb td->td_oncpu = NOCPU; 91130269Sjmallett 92143840Sphk td->td_tid = alloc_unr(tid_unrhdr); 93167352Smohans td->td_syscalls = 0; 94143840Sphk 95130269Sjmallett /* 96130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 97130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 98170296Sjeff * end of a context switch. 99130269Sjmallett */ 100118442Sjhb td->td_critnest = 1; 101173631Srrs EVENTHANDLER_INVOKE(thread_ctor, td); 102155195Srwatson#ifdef AUDIT 103155195Srwatson audit_thread_alloc(td); 104155195Srwatson#endif 105161678Sdavidxu umtx_thread_alloc(td); 106132987Sgreen return (0); 10799026Sjulian} 10899026Sjulian 10999026Sjulian/* 11099026Sjulian * Reclaim a thread after use. 11199026Sjulian */ 11299026Sjulianstatic void 11399026Sjulianthread_dtor(void *mem, int size, void *arg) 11499026Sjulian{ 115127794Smarcel struct thread *td; 11699026Sjulian 11799026Sjulian td = (struct thread *)mem; 11899026Sjulian 11999026Sjulian#ifdef INVARIANTS 12099026Sjulian /* Verify that this thread is in a safe state to free. */ 12199026Sjulian switch (td->td_state) { 122103216Sjulian case TDS_INHIBITED: 123103216Sjulian case TDS_RUNNING: 124103216Sjulian case TDS_CAN_RUN: 12599026Sjulian case TDS_RUNQ: 12699026Sjulian /* 12799026Sjulian * We must never unlink a thread that is in one of 12899026Sjulian * these states, because it is currently active. 12999026Sjulian */ 13099026Sjulian panic("bad state for thread unlinking"); 13199026Sjulian /* NOTREACHED */ 132103216Sjulian case TDS_INACTIVE: 13399026Sjulian break; 13499026Sjulian default: 13599026Sjulian panic("bad thread state"); 13699026Sjulian /* NOTREACHED */ 13799026Sjulian } 13899026Sjulian#endif 139155353Srwatson#ifdef AUDIT 140155353Srwatson audit_thread_free(td); 141155353Srwatson#endif 142173631Srrs EVENTHANDLER_INVOKE(thread_dtor, td); 143143840Sphk free_unr(tid_unrhdr, td->td_tid); 14499026Sjulian} 14599026Sjulian 14699026Sjulian/* 14799026Sjulian * Initialize type-stable parts of a thread (when newly created). 14899026Sjulian */ 149132987Sgreenstatic int 150132987Sgreenthread_init(void *mem, int size, int flags) 15199026Sjulian{ 152131149Smarcel struct thread *td; 15399026Sjulian 15499026Sjulian td = (struct thread *)mem; 155131149Smarcel 156126326Sjhb td->td_sleepqueue = sleepq_alloc(); 157122514Sjhb td->td_turnstile = turnstile_alloc(); 158173631Srrs EVENTHANDLER_INVOKE(thread_init, td); 159107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 160161678Sdavidxu umtx_thread_init(td); 161173361Skib td->td_kstack = 0; 162132987Sgreen return (0); 16399026Sjulian} 16499026Sjulian 16599026Sjulian/* 16699026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 16799026Sjulian */ 16899026Sjulianstatic void 16999026Sjulianthread_fini(void *mem, int size) 17099026Sjulian{ 171131149Smarcel struct thread *td; 17299026Sjulian 17399026Sjulian td = (struct thread *)mem; 174173631Srrs EVENTHANDLER_INVOKE(thread_fini, td); 175122514Sjhb turnstile_free(td->td_turnstile); 176126326Sjhb sleepq_free(td->td_sleepqueue); 177161678Sdavidxu umtx_thread_fini(td); 178174647Sjeff seltdfini(td); 17999026Sjulian} 180111028Sjeff 181107126Sjeff/* 182111028Sjeff * For a newly created process, 183111028Sjeff * link up all the structures and its initial threads etc. 184134791Sjulian * called from: 185134791Sjulian * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 186134791Sjulian * proc_dtor() (should go away) 187134791Sjulian * proc_init() 188105854Sjulian */ 189105854Sjulianvoid 190173361Skibproc_linkup0(struct proc *p, struct thread *td) 191173361Skib{ 192173361Skib TAILQ_INIT(&p->p_threads); /* all threads in proc */ 193173361Skib proc_linkup(p, td); 194173361Skib} 195173361Skib 196173361Skibvoid 197163709Sjbproc_linkup(struct proc *p, struct thread *td) 198105854Sjulian{ 199170296Sjeff 200151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 201153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 202153253Sdavidxu if (p->p_ksi != NULL) { 203153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 204153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 205152185Sdavidxu } 206152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 207105854Sjulian p->p_numthreads = 0; 208163709Sjb thread_link(td, p); 209105854Sjulian} 210105854Sjulian 211111028Sjeff/* 21299026Sjulian * Initialize global thread allocation resources. 21399026Sjulian */ 21499026Sjulianvoid 21599026Sjulianthreadinit(void) 21699026Sjulian{ 21799026Sjulian 218143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 219174848Sjulian /* leave one number for thread0 */ 220174848Sjulian tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 221143802Sphk 222107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 22399026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 224167944Sjhb 16 - 1, 0); 22599026Sjulian} 22699026Sjulian 22799026Sjulian/* 228170598Sjeff * Place an unused thread on the zombie list. 229164936Sjulian * Use the slpq as that must be unused by now. 23099026Sjulian */ 23199026Sjulianvoid 232170598Sjeffthread_zombie(struct thread *td) 23399026Sjulian{ 234170296Sjeff mtx_lock_spin(&zombie_lock); 235164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 236170296Sjeff mtx_unlock_spin(&zombie_lock); 23799026Sjulian} 23899026Sjulian 239103410Smini/* 240170598Sjeff * Release a thread that has exited after cpu_throw(). 241170598Sjeff */ 242170598Sjeffvoid 243170598Sjeffthread_stash(struct thread *td) 244170598Sjeff{ 245170598Sjeff atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 246170598Sjeff thread_zombie(td); 247170598Sjeff} 248170598Sjeff 249170598Sjeff/* 250177091Sjeff * Reap zombie resources. 25199026Sjulian */ 25299026Sjulianvoid 25399026Sjulianthread_reap(void) 25499026Sjulian{ 255105854Sjulian struct thread *td_first, *td_next; 25699026Sjulian 25799026Sjulian /* 258111028Sjeff * Don't even bother to lock if none at this instant, 259111028Sjeff * we really don't care about the next instant.. 26099026Sjulian */ 261163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 262170296Sjeff mtx_lock_spin(&zombie_lock); 263105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 264105854Sjulian if (td_first) 265105854Sjulian TAILQ_INIT(&zombie_threads); 266170296Sjeff mtx_unlock_spin(&zombie_lock); 267105854Sjulian while (td_first) { 268164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 269111028Sjeff if (td_first->td_ucred) 270111028Sjeff crfree(td_first->td_ucred); 271105854Sjulian thread_free(td_first); 272105854Sjulian td_first = td_next; 27399026Sjulian } 27499026Sjulian } 27599026Sjulian} 27699026Sjulian 27799026Sjulian/* 27899026Sjulian * Allocate a thread. 27999026Sjulian */ 28099026Sjulianstruct thread * 28199026Sjulianthread_alloc(void) 28299026Sjulian{ 283173361Skib struct thread *td; 284163709Sjb 28599026Sjulian thread_reap(); /* check if any zombies to get */ 286173361Skib 287173361Skib td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 288173361Skib KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 289173361Skib if (!vm_thread_new(td, 0)) { 290173361Skib uma_zfree(thread_zone, td); 291173361Skib return (NULL); 292173361Skib } 293173615Smarcel cpu_thread_alloc(td); 294173361Skib return (td); 29599026Sjulian} 29699026Sjulian 297103367Sjulian 298103367Sjulian/* 29999026Sjulian * Deallocate a thread. 30099026Sjulian */ 30199026Sjulianvoid 30299026Sjulianthread_free(struct thread *td) 30399026Sjulian{ 304177369Sjeff if (td->td_cpuset) 305177369Sjeff cpuset_rel(td->td_cpuset); 306176730Sjeff td->td_cpuset = NULL; 307173615Smarcel cpu_thread_free(td); 308173361Skib if (td->td_altkstack != 0) 309173361Skib vm_thread_dispose_altkstack(td); 310173361Skib if (td->td_kstack != 0) 311173361Skib vm_thread_dispose(td); 31299026Sjulian uma_zfree(thread_zone, td); 31399026Sjulian} 31499026Sjulian 31599026Sjulian/* 31699026Sjulian * Discard the current thread and exit from its context. 317130355Sjulian * Always called with scheduler locked. 31899026Sjulian * 31999026Sjulian * Because we can't free a thread while we're operating under its context, 320107719Sjulian * push the current thread into our CPU's deadthread holder. This means 321107719Sjulian * we needn't worry about someone else grabbing our context before we 322177091Sjeff * do a cpu_throw(). 32399026Sjulian */ 32499026Sjulianvoid 32599026Sjulianthread_exit(void) 32699026Sjulian{ 327156705Sdavidxu uint64_t new_switchtime; 32899026Sjulian struct thread *td; 329170174Sjeff struct thread *td2; 33099026Sjulian struct proc *p; 33199026Sjulian 33299026Sjulian td = curthread; 33399026Sjulian p = td->td_proc; 33499026Sjulian 335170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 336134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 337170296Sjeff 338134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 339102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 340133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 341173601Sjulian (long)p->p_pid, td->td_name); 342151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 34399026Sjulian 344155376Srwatson#ifdef AUDIT 345155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 346155376Srwatson#endif 347161678Sdavidxu umtx_thread_exit(td); 348134791Sjulian /* 349134791Sjulian * drop FPU & debug register state storage, or any other 350134791Sjulian * architecture specific resources that 351134791Sjulian * would not be on a new untouched process. 352134791Sjulian */ 35399026Sjulian cpu_thread_exit(td); /* XXXSMP */ 35499026Sjulian 355156705Sdavidxu /* Do the same timestamp bookkeeping that mi_switch() would do. */ 356156705Sdavidxu new_switchtime = cpu_ticks(); 357156705Sdavidxu p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 358156705Sdavidxu PCPU_SET(switchtime, new_switchtime); 359156705Sdavidxu PCPU_SET(switchticks, ticks); 360170292Sattilio PCPU_INC(cnt.v_swtch); 361170466Sattilio /* Save our resource usage in our process. */ 362170466Sattilio td->td_ru.ru_nvcsw++; 363170466Sattilio rucollect(&p->p_ru, &td->td_ru); 364134791Sjulian /* 365103002Sjulian * The last thread is left attached to the process 366103002Sjulian * So that the whole bundle gets recycled. Skip 367134791Sjulian * all this stuff if we never had threads. 368134791Sjulian * EXIT clears all sign of other threads when 369134791Sjulian * it goes to single threading, so the last thread always 370134791Sjulian * takes the short path. 371102581Sjulian */ 372134791Sjulian if (p->p_flag & P_HADTHREADS) { 373134791Sjulian if (p->p_numthreads > 1) { 374134791Sjulian thread_unlink(td); 375170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 376170174Sjeff sched_exit_thread(td2, td); 377134791Sjulian 378134791Sjulian /* 379134791Sjulian * The test below is NOT true if we are the 380134791Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 381134791Sjulian * in exit1() after it is the only survivor. 382134791Sjulian */ 383134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 384134791Sjulian if (p->p_numthreads == p->p_suspcount) { 385170296Sjeff thread_lock(p->p_singlethread); 386134791Sjulian thread_unsuspend_one(p->p_singlethread); 387170296Sjeff thread_unlock(p->p_singlethread); 388134791Sjulian } 389103002Sjulian } 390104695Sjulian 391170598Sjeff atomic_add_int(&td->td_proc->p_exitthreads, 1); 392134791Sjulian PCPU_SET(deadthread, td); 393134791Sjulian } else { 394134791Sjulian /* 395134791Sjulian * The last thread is exiting.. but not through exit() 396134791Sjulian */ 397134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 398119488Sdavidxu } 399170296Sjeff } 400170296Sjeff PROC_UNLOCK(p); 401170296Sjeff thread_lock(td); 402170466Sattilio /* Save our tick information with both the thread and proc locked */ 403170296Sjeff ruxagg(&p->p_rux, td); 404170296Sjeff PROC_SUNLOCK(p); 405133396Sjulian td->td_state = TDS_INACTIVE; 406133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 407170296Sjeff sched_throw(td); 408112993Speter panic("I'm a teapot!"); 40999026Sjulian /* NOTREACHED */ 41099026Sjulian} 41199026Sjulian 412124350Sschweikh/* 413107719Sjulian * Do any thread specific cleanups that may be needed in wait() 414126932Speter * called with Giant, proc and schedlock not held. 415107719Sjulian */ 416107719Sjulianvoid 417107719Sjulianthread_wait(struct proc *p) 418107719Sjulian{ 419107719Sjulian struct thread *td; 420107719Sjulian 421126932Speter mtx_assert(&Giant, MA_NOTOWNED); 422124350Sschweikh KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 423170598Sjeff td = FIRST_THREAD_IN_PROC(p); 424170598Sjeff /* Lock the last thread so we spin until it exits cpu_throw(). */ 425170598Sjeff thread_lock(td); 426170598Sjeff thread_unlock(td); 427170598Sjeff /* Wait for any remaining threads to exit cpu_throw(). */ 428170598Sjeff while (p->p_exitthreads) 429170598Sjeff sched_relinquish(curthread); 430176730Sjeff cpuset_rel(td->td_cpuset); 431176730Sjeff td->td_cpuset = NULL; 432170598Sjeff cpu_thread_clean(td); 433170598Sjeff crfree(td->td_ucred); 434107719Sjulian thread_reap(); /* check for zombie threads etc. */ 435107719Sjulian} 436107719Sjulian 43799026Sjulian/* 43899026Sjulian * Link a thread to a process. 439103002Sjulian * set up anything that needs to be initialized for it to 440103002Sjulian * be used by the process. 44199026Sjulian */ 44299026Sjulianvoid 443163709Sjbthread_link(struct thread *td, struct proc *p) 44499026Sjulian{ 44599026Sjulian 446170296Sjeff /* 447170296Sjeff * XXX This can't be enabled because it's called for proc0 before 448177368Sjeff * its lock has been created. 449177368Sjeff * PROC_LOCK_ASSERT(p, MA_OWNED); 450170296Sjeff */ 451111028Sjeff td->td_state = TDS_INACTIVE; 452111028Sjeff td->td_proc = p; 453172207Sjeff td->td_flags = TDF_INMEM; 45499026Sjulian 455103002Sjulian LIST_INIT(&td->td_contested); 456174629Sjeff LIST_INIT(&td->td_lprof[0]); 457174629Sjeff LIST_INIT(&td->td_lprof[1]); 458151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 459119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 46099026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 46199026Sjulian p->p_numthreads++; 46299026Sjulian} 46399026Sjulian 464134791Sjulian/* 465136160Sjulian * Convert a process with one thread to an unthreaded process. 466136160Sjulian */ 467136160Sjulianvoid 468136160Sjulianthread_unthread(struct thread *td) 469136160Sjulian{ 470136160Sjulian struct proc *p = td->td_proc; 471136160Sjulian 472136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 473163709Sjb p->p_flag &= ~P_HADTHREADS; 474136160Sjulian} 475136160Sjulian 476136160Sjulian/* 477136160Sjulian * Called from: 478134791Sjulian * thread_exit() 479134791Sjulian */ 480113641Sjulianvoid 481113641Sjulianthread_unlink(struct thread *td) 482124350Sschweikh{ 483113641Sjulian struct proc *p = td->td_proc; 484113920Sjhb 485177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 486113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 487113641Sjulian p->p_numthreads--; 488113641Sjulian /* could clear a few other things here */ 489163709Sjb /* Must NOT clear links to proc! */ 490124350Sschweikh} 491113641Sjulian 492111028Sjeff/* 49399026Sjulian * Enforce single-threading. 49499026Sjulian * 49599026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 49699026Sjulian * exit the process or similar). Process is locked! 49799026Sjulian * Returns 0 when you are successfully the only thread running. 49899026Sjulian * A process has successfully single threaded in the suspend mode when 49999026Sjulian * There are no threads in user mode. Threads in the kernel must be 50099026Sjulian * allowed to continue until they get to the user boundary. They may even 50199026Sjulian * copy out their return values and data before suspending. They may however be 502160048Smaxim * accelerated in reaching the user boundary as we will wake up 50399026Sjulian * any sleeping threads that are interruptable. (PCATCH). 50499026Sjulian */ 50599026Sjulianint 506136177Sdavidxuthread_single(int mode) 50799026Sjulian{ 50899026Sjulian struct thread *td; 50999026Sjulian struct thread *td2; 51099026Sjulian struct proc *p; 511130674Sdavidxu int remaining; 51299026Sjulian 51399026Sjulian td = curthread; 51499026Sjulian p = td->td_proc; 515126932Speter mtx_assert(&Giant, MA_NOTOWNED); 51699026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 51799026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 51899026Sjulian 519134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 52099026Sjulian return (0); 52199026Sjulian 522100648Sjulian /* Is someone already single threading? */ 523136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 52499026Sjulian return (1); 52599026Sjulian 526136177Sdavidxu if (mode == SINGLE_EXIT) { 527136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 528136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 529136177Sdavidxu } else { 530136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 531136177Sdavidxu if (mode == SINGLE_BOUNDARY) 532136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 533136177Sdavidxu else 534136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 535136177Sdavidxu } 536102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 537170296Sjeff PROC_SLOCK(p); 53899026Sjulian p->p_singlethread = td; 539136177Sdavidxu if (mode == SINGLE_EXIT) 540130674Sdavidxu remaining = p->p_numthreads; 541136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 542136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 543136177Sdavidxu else 544130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 545130674Sdavidxu while (remaining != 1) { 546156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 547156942Sdavidxu goto stopme; 54899026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 54999026Sjulian if (td2 == td) 55099026Sjulian continue; 551170296Sjeff thread_lock(td2); 552177471Sjeff td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 553103216Sjulian if (TD_IS_INHIBITED(td2)) { 554136177Sdavidxu switch (mode) { 555136177Sdavidxu case SINGLE_EXIT: 556132087Sdavidxu if (td->td_flags & TDF_DBSUSPEND) 557132087Sdavidxu td->td_flags &= ~TDF_DBSUSPEND; 558136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 559103216Sjulian thread_unsuspend_one(td2); 560105911Sjulian if (TD_ON_SLEEPQ(td2) && 561136177Sdavidxu (td2->td_flags & TDF_SINTR)) 562155741Sdavidxu sleepq_abort(td2, EINTR); 563136177Sdavidxu break; 564136177Sdavidxu case SINGLE_BOUNDARY: 565136177Sdavidxu break; 566136177Sdavidxu default: 567170296Sjeff if (TD_IS_SUSPENDED(td2)) { 568170296Sjeff thread_unlock(td2); 569105874Sdavidxu continue; 570170296Sjeff } 571111028Sjeff /* 572165693Srwatson * maybe other inhibited states too? 573111028Sjeff */ 574137281Sdavidxu if ((td2->td_flags & TDF_SINTR) && 575137281Sdavidxu (td2->td_inhibitors & 576137281Sdavidxu (TDI_SLEEPING | TDI_SWAPPED))) 577105911Sjulian thread_suspend_one(td2); 578136177Sdavidxu break; 57999026Sjulian } 58099026Sjulian } 581155594Sdavidxu#ifdef SMP 582155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 583155594Sdavidxu forward_signal(td2); 584155594Sdavidxu } 585155594Sdavidxu#endif 586170296Sjeff thread_unlock(td2); 58799026Sjulian } 588136177Sdavidxu if (mode == SINGLE_EXIT) 589130674Sdavidxu remaining = p->p_numthreads; 590136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 591136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 592130674Sdavidxu else 593130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 594130674Sdavidxu 595124350Sschweikh /* 596124350Sschweikh * Maybe we suspended some threads.. was it enough? 597105911Sjulian */ 598130674Sdavidxu if (remaining == 1) 599105911Sjulian break; 600105911Sjulian 601156942Sdavidxustopme: 60299026Sjulian /* 60399026Sjulian * Wake us up when everyone else has suspended. 604100648Sjulian * In the mean time we suspend as well. 60599026Sjulian */ 606170296Sjeff thread_suspend_switch(td); 607136177Sdavidxu if (mode == SINGLE_EXIT) 608130674Sdavidxu remaining = p->p_numthreads; 609136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 610136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 611130674Sdavidxu else 612130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 61399026Sjulian } 614136177Sdavidxu if (mode == SINGLE_EXIT) { 615135269Sjulian /* 616135269Sjulian * We have gotten rid of all the other threads and we 617135269Sjulian * are about to either exit or exec. In either case, 618135269Sjulian * we try our utmost to revert to being a non-threaded 619135269Sjulian * process. 620135269Sjulian */ 621136160Sjulian p->p_singlethread = NULL; 622137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 623136160Sjulian thread_unthread(td); 624111028Sjeff } 625170296Sjeff PROC_SUNLOCK(p); 62699026Sjulian return (0); 62799026Sjulian} 62899026Sjulian 62999026Sjulian/* 63099026Sjulian * Called in from locations that can safely check to see 63199026Sjulian * whether we have to suspend or at least throttle for a 63299026Sjulian * single-thread event (e.g. fork). 63399026Sjulian * 63499026Sjulian * Such locations include userret(). 63599026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 63699026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 63799026Sjulian * 63899026Sjulian * The 'return_instead' argument tells the function if it may do a 63999026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 64099026Sjulian * out instead. 64199026Sjulian * 64299026Sjulian * If the thread that set the single_threading request has set the 64399026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 64499026Sjulian * if 'return_instead' is false, but will exit. 64599026Sjulian * 64699026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 64799026Sjulian *---------------+--------------------+--------------------- 64899026Sjulian * 0 | returns 0 | returns 0 or 1 64999026Sjulian * | when ST ends | immediatly 65099026Sjulian *---------------+--------------------+--------------------- 65199026Sjulian * 1 | thread exits | returns 1 65299026Sjulian * | | immediatly 65399026Sjulian * 0 = thread_exit() or suspension ok, 65499026Sjulian * other = return error instead of stopping the thread. 65599026Sjulian * 65699026Sjulian * While a full suspension is under effect, even a single threading 65799026Sjulian * thread would be suspended if it made this call (but it shouldn't). 65899026Sjulian * This call should only be made from places where 659124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 66099026Sjulian * return_instead is set. 66199026Sjulian */ 66299026Sjulianint 66399026Sjulianthread_suspend_check(int return_instead) 66499026Sjulian{ 665104502Sjmallett struct thread *td; 666104502Sjmallett struct proc *p; 66799026Sjulian 66899026Sjulian td = curthread; 66999026Sjulian p = td->td_proc; 670126932Speter mtx_assert(&Giant, MA_NOTOWNED); 67199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 672132087Sdavidxu while (P_SHOULDSTOP(p) || 673132087Sdavidxu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 674102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 67599026Sjulian KASSERT(p->p_singlethread != NULL, 67699026Sjulian ("singlethread not set")); 67799026Sjulian /* 678100648Sjulian * The only suspension in action is a 679100648Sjulian * single-threading. Single threader need not stop. 680124350Sschweikh * XXX Should be safe to access unlocked 681100646Sjulian * as it can only be set to be true by us. 68299026Sjulian */ 683100648Sjulian if (p->p_singlethread == td) 68499026Sjulian return (0); /* Exempt from stopping. */ 685124350Sschweikh } 686134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 687155741Sdavidxu return (EINTR); 68899026Sjulian 689136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 690136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 691136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 692155741Sdavidxu return (ERESTART); 693136177Sdavidxu 694151316Sdavidxu /* If thread will exit, flush its pending signals */ 695151316Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 696151316Sdavidxu sigqueue_flush(&td->td_sigqueue); 697151316Sdavidxu 698170296Sjeff PROC_SLOCK(p); 699112071Sdavidxu thread_stopped(p); 70099026Sjulian /* 70199026Sjulian * If the process is waiting for us to exit, 70299026Sjulian * this thread should just suicide. 703102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 70499026Sjulian */ 705136177Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 706134791Sjulian thread_exit(); 707170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 708170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 709170296Sjeff thread_lock(p->p_singlethread); 710170296Sjeff thread_unsuspend_one(p->p_singlethread); 711170296Sjeff thread_unlock(p->p_singlethread); 712170296Sjeff } 713170296Sjeff } 714170296Sjeff PROC_UNLOCK(p); 715170296Sjeff thread_lock(td); 71699026Sjulian /* 71799026Sjulian * When a thread suspends, it just 718164936Sjulian * gets taken off all queues. 71999026Sjulian */ 720103216Sjulian thread_suspend_one(td); 721136177Sdavidxu if (return_instead == 0) { 722136177Sdavidxu p->p_boundary_count++; 723136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 724136177Sdavidxu } 725170296Sjeff PROC_SUNLOCK(p); 726131473Sjhb mi_switch(SW_INVOL, NULL); 727170296Sjeff if (return_instead == 0) 728136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 729170296Sjeff thread_unlock(td); 73099026Sjulian PROC_LOCK(p); 731170296Sjeff if (return_instead == 0) 732170296Sjeff p->p_boundary_count--; 73399026Sjulian } 73499026Sjulian return (0); 73599026Sjulian} 73699026Sjulian 737102898Sdavidxuvoid 738170296Sjeffthread_suspend_switch(struct thread *td) 739170296Sjeff{ 740170296Sjeff struct proc *p; 741170296Sjeff 742170296Sjeff p = td->td_proc; 743170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 744170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 745170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 746170296Sjeff /* 747170296Sjeff * We implement thread_suspend_one in stages here to avoid 748170296Sjeff * dropping the proc lock while the thread lock is owned. 749170296Sjeff */ 750170296Sjeff thread_stopped(p); 751170296Sjeff p->p_suspcount++; 752170296Sjeff PROC_UNLOCK(p); 753170296Sjeff thread_lock(td); 754177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 755170296Sjeff TD_SET_SUSPENDED(td); 756177085Sjeff sched_sleep(td, 0); 757170296Sjeff PROC_SUNLOCK(p); 758170296Sjeff DROP_GIANT(); 759170296Sjeff mi_switch(SW_VOL, NULL); 760170296Sjeff thread_unlock(td); 761170296Sjeff PICKUP_GIANT(); 762170296Sjeff PROC_LOCK(p); 763170296Sjeff PROC_SLOCK(p); 764170296Sjeff} 765170296Sjeff 766170296Sjeffvoid 767102898Sdavidxuthread_suspend_one(struct thread *td) 768102898Sdavidxu{ 769102898Sdavidxu struct proc *p = td->td_proc; 770102898Sdavidxu 771170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 772170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 773112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 774102898Sdavidxu p->p_suspcount++; 775177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 776103216Sjulian TD_SET_SUSPENDED(td); 777177085Sjeff sched_sleep(td, 0); 778102898Sdavidxu} 779102898Sdavidxu 780102898Sdavidxuvoid 781102898Sdavidxuthread_unsuspend_one(struct thread *td) 782102898Sdavidxu{ 783102898Sdavidxu struct proc *p = td->td_proc; 784102898Sdavidxu 785170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 786170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 787164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 788103216Sjulian TD_CLR_SUSPENDED(td); 789102898Sdavidxu p->p_suspcount--; 790103216Sjulian setrunnable(td); 791102898Sdavidxu} 792102898Sdavidxu 79399026Sjulian/* 79499026Sjulian * Allow all threads blocked by single threading to continue running. 79599026Sjulian */ 79699026Sjulianvoid 79799026Sjulianthread_unsuspend(struct proc *p) 79899026Sjulian{ 79999026Sjulian struct thread *td; 80099026Sjulian 80199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 802170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 80399026Sjulian if (!P_SHOULDSTOP(p)) { 804164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 805170296Sjeff thread_lock(td); 806164936Sjulian if (TD_IS_SUSPENDED(td)) { 807164936Sjulian thread_unsuspend_one(td); 808164936Sjulian } 809170296Sjeff thread_unlock(td); 81099026Sjulian } 811102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 81299026Sjulian (p->p_numthreads == p->p_suspcount)) { 81399026Sjulian /* 81499026Sjulian * Stopping everything also did the job for the single 81599026Sjulian * threading request. Now we've downgraded to single-threaded, 81699026Sjulian * let it continue. 81799026Sjulian */ 818170296Sjeff thread_lock(p->p_singlethread); 819102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 820170296Sjeff thread_unlock(p->p_singlethread); 82199026Sjulian } 82299026Sjulian} 82399026Sjulian 824134791Sjulian/* 825134791Sjulian * End the single threading mode.. 826134791Sjulian */ 82799026Sjulianvoid 82899026Sjulianthread_single_end(void) 82999026Sjulian{ 83099026Sjulian struct thread *td; 83199026Sjulian struct proc *p; 83299026Sjulian 83399026Sjulian td = curthread; 83499026Sjulian p = td->td_proc; 83599026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 836136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 837170296Sjeff PROC_SLOCK(p); 83899026Sjulian p->p_singlethread = NULL; 839102292Sjulian /* 840102292Sjulian * If there are other threads they mey now run, 841102292Sjulian * unless of course there is a blanket 'stop order' 842102292Sjulian * on the process. The single threader must be allowed 843102292Sjulian * to continue however as this is a bad place to stop. 844102292Sjulian */ 845102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 846164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 847170296Sjeff thread_lock(td); 848164936Sjulian if (TD_IS_SUSPENDED(td)) { 849164936Sjulian thread_unsuspend_one(td); 850164936Sjulian } 851170296Sjeff thread_unlock(td); 852102292Sjulian } 853102292Sjulian } 854170296Sjeff PROC_SUNLOCK(p); 85599026Sjulian} 856128721Sdeischen 857151990Sdavidxustruct thread * 858151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 859151990Sdavidxu{ 860151990Sdavidxu struct thread *td; 861151990Sdavidxu 862151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 863151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 864151990Sdavidxu if (td->td_tid == tid) 865151990Sdavidxu break; 866151990Sdavidxu } 867151990Sdavidxu return (td); 868151990Sdavidxu} 869