kern_thread.c revision 181334
1139804Simp/*- 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 10124350Sschweikh * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian */ 2899026Sjulian 29116182Sobrien#include <sys/cdefs.h> 30116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 181334 2008-08-05 20:02:31Z jhb $"); 31116182Sobrien 3299026Sjulian#include <sys/param.h> 3399026Sjulian#include <sys/systm.h> 3499026Sjulian#include <sys/kernel.h> 3599026Sjulian#include <sys/lock.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 38156705Sdavidxu#include <sys/resourcevar.h> 39130355Sjulian#include <sys/smp.h> 4099026Sjulian#include <sys/sysctl.h> 41107126Sjeff#include <sys/sched.h> 42126326Sjhb#include <sys/sleepqueue.h> 43174647Sjeff#include <sys/selinfo.h> 44122514Sjhb#include <sys/turnstile.h> 4599026Sjulian#include <sys/ktr.h> 46143149Sdavidxu#include <sys/umtx.h> 47176730Sjeff#include <sys/cpuset.h> 4899026Sjulian 49155195Srwatson#include <security/audit/audit.h> 50155195Srwatson 5199026Sjulian#include <vm/vm.h> 52116355Salc#include <vm/vm_extern.h> 5399026Sjulian#include <vm/uma.h> 54173631Srrs#include <sys/eventhandler.h> 5599026Sjulian 5699026Sjulian/* 57163709Sjb * thread related storage. 58163709Sjb */ 5999026Sjulianstatic uma_zone_t thread_zone; 6099026Sjulian 6199026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 6299026Sjulian 63130199Sjulianint max_threads_per_proc = 1500; 64107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 65103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 66103367Sjulian 67130199Sjulianint max_threads_hits; 68111115SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 69111115Sdavidxu &max_threads_hits, 0, ""); 70111115Sdavidxu 71111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 72172256Sattiliostatic struct mtx zombie_lock; 73170296SjeffMTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 7499026Sjulian 75170598Sjeffstatic void thread_zombie(struct thread *); 76170598Sjeff 77127794Smarcelstruct mtx tid_lock; 78143802Sphkstatic struct unrhdr *tid_unrhdr; 79127794Smarcel 80127794Smarcel/* 81107719Sjulian * Prepare a thread for use. 8299026Sjulian */ 83132987Sgreenstatic int 84132987Sgreenthread_ctor(void *mem, int size, void *arg, int flags) 8599026Sjulian{ 8699026Sjulian struct thread *td; 8799026Sjulian 8899026Sjulian td = (struct thread *)mem; 89103216Sjulian td->td_state = TDS_INACTIVE; 90135573Sjhb td->td_oncpu = NOCPU; 91130269Sjmallett 92143840Sphk td->td_tid = alloc_unr(tid_unrhdr); 93167352Smohans td->td_syscalls = 0; 94143840Sphk 95130269Sjmallett /* 96130269Sjmallett * Note that td_critnest begins life as 1 because the thread is not 97130269Sjmallett * running and is thereby implicitly waiting to be on the receiving 98170296Sjeff * end of a context switch. 99130269Sjmallett */ 100118442Sjhb td->td_critnest = 1; 101173631Srrs EVENTHANDLER_INVOKE(thread_ctor, td); 102155195Srwatson#ifdef AUDIT 103155195Srwatson audit_thread_alloc(td); 104155195Srwatson#endif 105161678Sdavidxu umtx_thread_alloc(td); 106132987Sgreen return (0); 10799026Sjulian} 10899026Sjulian 10999026Sjulian/* 11099026Sjulian * Reclaim a thread after use. 11199026Sjulian */ 11299026Sjulianstatic void 11399026Sjulianthread_dtor(void *mem, int size, void *arg) 11499026Sjulian{ 115127794Smarcel struct thread *td; 11699026Sjulian 11799026Sjulian td = (struct thread *)mem; 11899026Sjulian 11999026Sjulian#ifdef INVARIANTS 12099026Sjulian /* Verify that this thread is in a safe state to free. */ 12199026Sjulian switch (td->td_state) { 122103216Sjulian case TDS_INHIBITED: 123103216Sjulian case TDS_RUNNING: 124103216Sjulian case TDS_CAN_RUN: 12599026Sjulian case TDS_RUNQ: 12699026Sjulian /* 12799026Sjulian * We must never unlink a thread that is in one of 12899026Sjulian * these states, because it is currently active. 12999026Sjulian */ 13099026Sjulian panic("bad state for thread unlinking"); 13199026Sjulian /* NOTREACHED */ 132103216Sjulian case TDS_INACTIVE: 13399026Sjulian break; 13499026Sjulian default: 13599026Sjulian panic("bad thread state"); 13699026Sjulian /* NOTREACHED */ 13799026Sjulian } 13899026Sjulian#endif 139155353Srwatson#ifdef AUDIT 140155353Srwatson audit_thread_free(td); 141155353Srwatson#endif 142173631Srrs EVENTHANDLER_INVOKE(thread_dtor, td); 143143840Sphk free_unr(tid_unrhdr, td->td_tid); 14499026Sjulian} 14599026Sjulian 14699026Sjulian/* 14799026Sjulian * Initialize type-stable parts of a thread (when newly created). 14899026Sjulian */ 149132987Sgreenstatic int 150132987Sgreenthread_init(void *mem, int size, int flags) 15199026Sjulian{ 152131149Smarcel struct thread *td; 15399026Sjulian 15499026Sjulian td = (struct thread *)mem; 155131149Smarcel 156126326Sjhb td->td_sleepqueue = sleepq_alloc(); 157122514Sjhb td->td_turnstile = turnstile_alloc(); 158173631Srrs EVENTHANDLER_INVOKE(thread_init, td); 159107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 160161678Sdavidxu umtx_thread_init(td); 161173361Skib td->td_kstack = 0; 162132987Sgreen return (0); 16399026Sjulian} 16499026Sjulian 16599026Sjulian/* 16699026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 16799026Sjulian */ 16899026Sjulianstatic void 16999026Sjulianthread_fini(void *mem, int size) 17099026Sjulian{ 171131149Smarcel struct thread *td; 17299026Sjulian 17399026Sjulian td = (struct thread *)mem; 174173631Srrs EVENTHANDLER_INVOKE(thread_fini, td); 175122514Sjhb turnstile_free(td->td_turnstile); 176126326Sjhb sleepq_free(td->td_sleepqueue); 177161678Sdavidxu umtx_thread_fini(td); 178174647Sjeff seltdfini(td); 17999026Sjulian} 180111028Sjeff 181107126Sjeff/* 182111028Sjeff * For a newly created process, 183111028Sjeff * link up all the structures and its initial threads etc. 184134791Sjulian * called from: 185134791Sjulian * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 186134791Sjulian * proc_dtor() (should go away) 187134791Sjulian * proc_init() 188105854Sjulian */ 189105854Sjulianvoid 190173361Skibproc_linkup0(struct proc *p, struct thread *td) 191173361Skib{ 192173361Skib TAILQ_INIT(&p->p_threads); /* all threads in proc */ 193173361Skib proc_linkup(p, td); 194173361Skib} 195173361Skib 196173361Skibvoid 197163709Sjbproc_linkup(struct proc *p, struct thread *td) 198105854Sjulian{ 199170296Sjeff 200151316Sdavidxu sigqueue_init(&p->p_sigqueue, p); 201153253Sdavidxu p->p_ksi = ksiginfo_alloc(1); 202153253Sdavidxu if (p->p_ksi != NULL) { 203153253Sdavidxu /* XXX p_ksi may be null if ksiginfo zone is not ready */ 204153253Sdavidxu p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 205152185Sdavidxu } 206152948Sdavidxu LIST_INIT(&p->p_mqnotifier); 207105854Sjulian p->p_numthreads = 0; 208163709Sjb thread_link(td, p); 209105854Sjulian} 210105854Sjulian 211111028Sjeff/* 21299026Sjulian * Initialize global thread allocation resources. 21399026Sjulian */ 21499026Sjulianvoid 21599026Sjulianthreadinit(void) 21699026Sjulian{ 21799026Sjulian 218143802Sphk mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 219174848Sjulian /* leave one number for thread0 */ 220174848Sjulian tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 221143802Sphk 222107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 22399026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 224167944Sjhb 16 - 1, 0); 22599026Sjulian} 22699026Sjulian 22799026Sjulian/* 228170598Sjeff * Place an unused thread on the zombie list. 229164936Sjulian * Use the slpq as that must be unused by now. 23099026Sjulian */ 23199026Sjulianvoid 232170598Sjeffthread_zombie(struct thread *td) 23399026Sjulian{ 234170296Sjeff mtx_lock_spin(&zombie_lock); 235164936Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 236170296Sjeff mtx_unlock_spin(&zombie_lock); 23799026Sjulian} 23899026Sjulian 239103410Smini/* 240170598Sjeff * Release a thread that has exited after cpu_throw(). 241170598Sjeff */ 242170598Sjeffvoid 243170598Sjeffthread_stash(struct thread *td) 244170598Sjeff{ 245170598Sjeff atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 246170598Sjeff thread_zombie(td); 247170598Sjeff} 248170598Sjeff 249170598Sjeff/* 250177091Sjeff * Reap zombie resources. 25199026Sjulian */ 25299026Sjulianvoid 25399026Sjulianthread_reap(void) 25499026Sjulian{ 255105854Sjulian struct thread *td_first, *td_next; 25699026Sjulian 25799026Sjulian /* 258111028Sjeff * Don't even bother to lock if none at this instant, 259111028Sjeff * we really don't care about the next instant.. 26099026Sjulian */ 261163709Sjb if (!TAILQ_EMPTY(&zombie_threads)) { 262170296Sjeff mtx_lock_spin(&zombie_lock); 263105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 264105854Sjulian if (td_first) 265105854Sjulian TAILQ_INIT(&zombie_threads); 266170296Sjeff mtx_unlock_spin(&zombie_lock); 267105854Sjulian while (td_first) { 268164936Sjulian td_next = TAILQ_NEXT(td_first, td_slpq); 269111028Sjeff if (td_first->td_ucred) 270111028Sjeff crfree(td_first->td_ucred); 271105854Sjulian thread_free(td_first); 272105854Sjulian td_first = td_next; 27399026Sjulian } 27499026Sjulian } 27599026Sjulian} 27699026Sjulian 27799026Sjulian/* 27899026Sjulian * Allocate a thread. 27999026Sjulian */ 28099026Sjulianstruct thread * 28199026Sjulianthread_alloc(void) 28299026Sjulian{ 283173361Skib struct thread *td; 284163709Sjb 28599026Sjulian thread_reap(); /* check if any zombies to get */ 286173361Skib 287173361Skib td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 288173361Skib KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 289173361Skib if (!vm_thread_new(td, 0)) { 290173361Skib uma_zfree(thread_zone, td); 291173361Skib return (NULL); 292173361Skib } 293173615Smarcel cpu_thread_alloc(td); 294173361Skib return (td); 29599026Sjulian} 29699026Sjulian 297103367Sjulian 298103367Sjulian/* 29999026Sjulian * Deallocate a thread. 30099026Sjulian */ 30199026Sjulianvoid 30299026Sjulianthread_free(struct thread *td) 30399026Sjulian{ 304177369Sjeff if (td->td_cpuset) 305177369Sjeff cpuset_rel(td->td_cpuset); 306176730Sjeff td->td_cpuset = NULL; 307173615Smarcel cpu_thread_free(td); 308173361Skib if (td->td_altkstack != 0) 309173361Skib vm_thread_dispose_altkstack(td); 310173361Skib if (td->td_kstack != 0) 311173361Skib vm_thread_dispose(td); 31299026Sjulian uma_zfree(thread_zone, td); 31399026Sjulian} 31499026Sjulian 31599026Sjulian/* 31699026Sjulian * Discard the current thread and exit from its context. 317130355Sjulian * Always called with scheduler locked. 31899026Sjulian * 31999026Sjulian * Because we can't free a thread while we're operating under its context, 320107719Sjulian * push the current thread into our CPU's deadthread holder. This means 321107719Sjulian * we needn't worry about someone else grabbing our context before we 322177091Sjeff * do a cpu_throw(). 32399026Sjulian */ 32499026Sjulianvoid 32599026Sjulianthread_exit(void) 32699026Sjulian{ 327156705Sdavidxu uint64_t new_switchtime; 32899026Sjulian struct thread *td; 329170174Sjeff struct thread *td2; 33099026Sjulian struct proc *p; 33199026Sjulian 33299026Sjulian td = curthread; 33399026Sjulian p = td->td_proc; 33499026Sjulian 335170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 336134791Sjulian mtx_assert(&Giant, MA_NOTOWNED); 337170296Sjeff 338134791Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 339102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 340133234Srwatson CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 341173601Sjulian (long)p->p_pid, td->td_name); 342151316Sdavidxu KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 34399026Sjulian 344155376Srwatson#ifdef AUDIT 345155376Srwatson AUDIT_SYSCALL_EXIT(0, td); 346155376Srwatson#endif 347161678Sdavidxu umtx_thread_exit(td); 348134791Sjulian /* 349134791Sjulian * drop FPU & debug register state storage, or any other 350134791Sjulian * architecture specific resources that 351134791Sjulian * would not be on a new untouched process. 352134791Sjulian */ 35399026Sjulian cpu_thread_exit(td); /* XXXSMP */ 35499026Sjulian 355156705Sdavidxu /* Do the same timestamp bookkeeping that mi_switch() would do. */ 356156705Sdavidxu new_switchtime = cpu_ticks(); 357156705Sdavidxu p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 358156705Sdavidxu PCPU_SET(switchtime, new_switchtime); 359156705Sdavidxu PCPU_SET(switchticks, ticks); 360170292Sattilio PCPU_INC(cnt.v_swtch); 361170466Sattilio /* Save our resource usage in our process. */ 362170466Sattilio td->td_ru.ru_nvcsw++; 363170466Sattilio rucollect(&p->p_ru, &td->td_ru); 364134791Sjulian /* 365103002Sjulian * The last thread is left attached to the process 366103002Sjulian * So that the whole bundle gets recycled. Skip 367134791Sjulian * all this stuff if we never had threads. 368134791Sjulian * EXIT clears all sign of other threads when 369134791Sjulian * it goes to single threading, so the last thread always 370134791Sjulian * takes the short path. 371102581Sjulian */ 372134791Sjulian if (p->p_flag & P_HADTHREADS) { 373134791Sjulian if (p->p_numthreads > 1) { 374134791Sjulian thread_unlink(td); 375170174Sjeff td2 = FIRST_THREAD_IN_PROC(p); 376170174Sjeff sched_exit_thread(td2, td); 377134791Sjulian 378134791Sjulian /* 379134791Sjulian * The test below is NOT true if we are the 380134791Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 381134791Sjulian * in exit1() after it is the only survivor. 382134791Sjulian */ 383134791Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 384134791Sjulian if (p->p_numthreads == p->p_suspcount) { 385170296Sjeff thread_lock(p->p_singlethread); 386134791Sjulian thread_unsuspend_one(p->p_singlethread); 387170296Sjeff thread_unlock(p->p_singlethread); 388134791Sjulian } 389103002Sjulian } 390104695Sjulian 391170598Sjeff atomic_add_int(&td->td_proc->p_exitthreads, 1); 392134791Sjulian PCPU_SET(deadthread, td); 393134791Sjulian } else { 394134791Sjulian /* 395134791Sjulian * The last thread is exiting.. but not through exit() 396134791Sjulian */ 397134791Sjulian panic ("thread_exit: Last thread exiting on its own"); 398119488Sdavidxu } 399170296Sjeff } 400170296Sjeff PROC_UNLOCK(p); 401170296Sjeff thread_lock(td); 402170466Sattilio /* Save our tick information with both the thread and proc locked */ 403170296Sjeff ruxagg(&p->p_rux, td); 404170296Sjeff PROC_SUNLOCK(p); 405133396Sjulian td->td_state = TDS_INACTIVE; 406133396Sjulian CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 407170296Sjeff sched_throw(td); 408112993Speter panic("I'm a teapot!"); 40999026Sjulian /* NOTREACHED */ 41099026Sjulian} 41199026Sjulian 412124350Sschweikh/* 413107719Sjulian * Do any thread specific cleanups that may be needed in wait() 414126932Speter * called with Giant, proc and schedlock not held. 415107719Sjulian */ 416107719Sjulianvoid 417107719Sjulianthread_wait(struct proc *p) 418107719Sjulian{ 419107719Sjulian struct thread *td; 420107719Sjulian 421126932Speter mtx_assert(&Giant, MA_NOTOWNED); 422124350Sschweikh KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 423170598Sjeff td = FIRST_THREAD_IN_PROC(p); 424170598Sjeff /* Lock the last thread so we spin until it exits cpu_throw(). */ 425170598Sjeff thread_lock(td); 426170598Sjeff thread_unlock(td); 427170598Sjeff /* Wait for any remaining threads to exit cpu_throw(). */ 428170598Sjeff while (p->p_exitthreads) 429170598Sjeff sched_relinquish(curthread); 430176730Sjeff cpuset_rel(td->td_cpuset); 431176730Sjeff td->td_cpuset = NULL; 432170598Sjeff cpu_thread_clean(td); 433170598Sjeff crfree(td->td_ucred); 434107719Sjulian thread_reap(); /* check for zombie threads etc. */ 435107719Sjulian} 436107719Sjulian 43799026Sjulian/* 43899026Sjulian * Link a thread to a process. 439103002Sjulian * set up anything that needs to be initialized for it to 440103002Sjulian * be used by the process. 44199026Sjulian */ 44299026Sjulianvoid 443163709Sjbthread_link(struct thread *td, struct proc *p) 44499026Sjulian{ 44599026Sjulian 446170296Sjeff /* 447170296Sjeff * XXX This can't be enabled because it's called for proc0 before 448177368Sjeff * its lock has been created. 449177368Sjeff * PROC_LOCK_ASSERT(p, MA_OWNED); 450170296Sjeff */ 451111028Sjeff td->td_state = TDS_INACTIVE; 452111028Sjeff td->td_proc = p; 453172207Sjeff td->td_flags = TDF_INMEM; 45499026Sjulian 455103002Sjulian LIST_INIT(&td->td_contested); 456174629Sjeff LIST_INIT(&td->td_lprof[0]); 457174629Sjeff LIST_INIT(&td->td_lprof[1]); 458151316Sdavidxu sigqueue_init(&td->td_sigqueue, p); 459119137Ssam callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 46099026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 46199026Sjulian p->p_numthreads++; 46299026Sjulian} 46399026Sjulian 464134791Sjulian/* 465136160Sjulian * Convert a process with one thread to an unthreaded process. 466136160Sjulian */ 467136160Sjulianvoid 468136160Sjulianthread_unthread(struct thread *td) 469136160Sjulian{ 470136160Sjulian struct proc *p = td->td_proc; 471136160Sjulian 472136160Sjulian KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 473163709Sjb p->p_flag &= ~P_HADTHREADS; 474136160Sjulian} 475136160Sjulian 476136160Sjulian/* 477136160Sjulian * Called from: 478134791Sjulian * thread_exit() 479134791Sjulian */ 480113641Sjulianvoid 481113641Sjulianthread_unlink(struct thread *td) 482124350Sschweikh{ 483113641Sjulian struct proc *p = td->td_proc; 484113920Sjhb 485177368Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 486113641Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 487113641Sjulian p->p_numthreads--; 488113641Sjulian /* could clear a few other things here */ 489163709Sjb /* Must NOT clear links to proc! */ 490124350Sschweikh} 491113641Sjulian 492111028Sjeff/* 49399026Sjulian * Enforce single-threading. 49499026Sjulian * 49599026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 49699026Sjulian * exit the process or similar). Process is locked! 49799026Sjulian * Returns 0 when you are successfully the only thread running. 49899026Sjulian * A process has successfully single threaded in the suspend mode when 49999026Sjulian * There are no threads in user mode. Threads in the kernel must be 50099026Sjulian * allowed to continue until they get to the user boundary. They may even 50199026Sjulian * copy out their return values and data before suspending. They may however be 502160048Smaxim * accelerated in reaching the user boundary as we will wake up 50399026Sjulian * any sleeping threads that are interruptable. (PCATCH). 50499026Sjulian */ 50599026Sjulianint 506136177Sdavidxuthread_single(int mode) 50799026Sjulian{ 50899026Sjulian struct thread *td; 50999026Sjulian struct thread *td2; 51099026Sjulian struct proc *p; 511181334Sjhb int remaining, wakeup_swapper; 51299026Sjulian 51399026Sjulian td = curthread; 51499026Sjulian p = td->td_proc; 515126932Speter mtx_assert(&Giant, MA_NOTOWNED); 51699026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 51799026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 51899026Sjulian 519134791Sjulian if ((p->p_flag & P_HADTHREADS) == 0) 52099026Sjulian return (0); 52199026Sjulian 522100648Sjulian /* Is someone already single threading? */ 523136177Sdavidxu if (p->p_singlethread != NULL && p->p_singlethread != td) 52499026Sjulian return (1); 52599026Sjulian 526136177Sdavidxu if (mode == SINGLE_EXIT) { 527136177Sdavidxu p->p_flag |= P_SINGLE_EXIT; 528136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 529136177Sdavidxu } else { 530136177Sdavidxu p->p_flag &= ~P_SINGLE_EXIT; 531136177Sdavidxu if (mode == SINGLE_BOUNDARY) 532136177Sdavidxu p->p_flag |= P_SINGLE_BOUNDARY; 533136177Sdavidxu else 534136177Sdavidxu p->p_flag &= ~P_SINGLE_BOUNDARY; 535136177Sdavidxu } 536102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 537170296Sjeff PROC_SLOCK(p); 53899026Sjulian p->p_singlethread = td; 539136177Sdavidxu if (mode == SINGLE_EXIT) 540130674Sdavidxu remaining = p->p_numthreads; 541136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 542136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 543136177Sdavidxu else 544130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 545130674Sdavidxu while (remaining != 1) { 546156942Sdavidxu if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 547156942Sdavidxu goto stopme; 548181334Sjhb wakeup_swapper = 0; 54999026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 55099026Sjulian if (td2 == td) 55199026Sjulian continue; 552170296Sjeff thread_lock(td2); 553177471Sjeff td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; 554103216Sjulian if (TD_IS_INHIBITED(td2)) { 555136177Sdavidxu switch (mode) { 556136177Sdavidxu case SINGLE_EXIT: 557132087Sdavidxu if (td->td_flags & TDF_DBSUSPEND) 558132087Sdavidxu td->td_flags &= ~TDF_DBSUSPEND; 559136177Sdavidxu if (TD_IS_SUSPENDED(td2)) 560103216Sjulian thread_unsuspend_one(td2); 561105911Sjulian if (TD_ON_SLEEPQ(td2) && 562136177Sdavidxu (td2->td_flags & TDF_SINTR)) 563181334Sjhb wakeup_swapper = 564181334Sjhb sleepq_abort(td2, EINTR); 565136177Sdavidxu break; 566136177Sdavidxu case SINGLE_BOUNDARY: 567136177Sdavidxu break; 568136177Sdavidxu default: 569170296Sjeff if (TD_IS_SUSPENDED(td2)) { 570170296Sjeff thread_unlock(td2); 571105874Sdavidxu continue; 572170296Sjeff } 573111028Sjeff /* 574165693Srwatson * maybe other inhibited states too? 575111028Sjeff */ 576137281Sdavidxu if ((td2->td_flags & TDF_SINTR) && 577137281Sdavidxu (td2->td_inhibitors & 578137281Sdavidxu (TDI_SLEEPING | TDI_SWAPPED))) 579105911Sjulian thread_suspend_one(td2); 580136177Sdavidxu break; 58199026Sjulian } 58299026Sjulian } 583155594Sdavidxu#ifdef SMP 584155594Sdavidxu else if (TD_IS_RUNNING(td2) && td != td2) { 585155594Sdavidxu forward_signal(td2); 586155594Sdavidxu } 587155594Sdavidxu#endif 588170296Sjeff thread_unlock(td2); 58999026Sjulian } 590181334Sjhb if (wakeup_swapper) 591181334Sjhb kick_proc0(); 592136177Sdavidxu if (mode == SINGLE_EXIT) 593130674Sdavidxu remaining = p->p_numthreads; 594136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 595136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 596130674Sdavidxu else 597130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 598130674Sdavidxu 599124350Sschweikh /* 600124350Sschweikh * Maybe we suspended some threads.. was it enough? 601105911Sjulian */ 602130674Sdavidxu if (remaining == 1) 603105911Sjulian break; 604105911Sjulian 605156942Sdavidxustopme: 60699026Sjulian /* 60799026Sjulian * Wake us up when everyone else has suspended. 608100648Sjulian * In the mean time we suspend as well. 60999026Sjulian */ 610170296Sjeff thread_suspend_switch(td); 611136177Sdavidxu if (mode == SINGLE_EXIT) 612130674Sdavidxu remaining = p->p_numthreads; 613136177Sdavidxu else if (mode == SINGLE_BOUNDARY) 614136177Sdavidxu remaining = p->p_numthreads - p->p_boundary_count; 615130674Sdavidxu else 616130674Sdavidxu remaining = p->p_numthreads - p->p_suspcount; 61799026Sjulian } 618136177Sdavidxu if (mode == SINGLE_EXIT) { 619135269Sjulian /* 620135269Sjulian * We have gotten rid of all the other threads and we 621135269Sjulian * are about to either exit or exec. In either case, 622135269Sjulian * we try our utmost to revert to being a non-threaded 623135269Sjulian * process. 624135269Sjulian */ 625136160Sjulian p->p_singlethread = NULL; 626137279Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 627136160Sjulian thread_unthread(td); 628111028Sjeff } 629170296Sjeff PROC_SUNLOCK(p); 63099026Sjulian return (0); 63199026Sjulian} 63299026Sjulian 63399026Sjulian/* 63499026Sjulian * Called in from locations that can safely check to see 63599026Sjulian * whether we have to suspend or at least throttle for a 63699026Sjulian * single-thread event (e.g. fork). 63799026Sjulian * 63899026Sjulian * Such locations include userret(). 63999026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 64099026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 64199026Sjulian * 64299026Sjulian * The 'return_instead' argument tells the function if it may do a 64399026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 64499026Sjulian * out instead. 64599026Sjulian * 64699026Sjulian * If the thread that set the single_threading request has set the 64799026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 64899026Sjulian * if 'return_instead' is false, but will exit. 64999026Sjulian * 65099026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 65199026Sjulian *---------------+--------------------+--------------------- 65299026Sjulian * 0 | returns 0 | returns 0 or 1 65399026Sjulian * | when ST ends | immediatly 65499026Sjulian *---------------+--------------------+--------------------- 65599026Sjulian * 1 | thread exits | returns 1 65699026Sjulian * | | immediatly 65799026Sjulian * 0 = thread_exit() or suspension ok, 65899026Sjulian * other = return error instead of stopping the thread. 65999026Sjulian * 66099026Sjulian * While a full suspension is under effect, even a single threading 66199026Sjulian * thread would be suspended if it made this call (but it shouldn't). 66299026Sjulian * This call should only be made from places where 663124350Sschweikh * thread_exit() would be safe as that may be the outcome unless 66499026Sjulian * return_instead is set. 66599026Sjulian */ 66699026Sjulianint 66799026Sjulianthread_suspend_check(int return_instead) 66899026Sjulian{ 669104502Sjmallett struct thread *td; 670104502Sjmallett struct proc *p; 67199026Sjulian 67299026Sjulian td = curthread; 67399026Sjulian p = td->td_proc; 674126932Speter mtx_assert(&Giant, MA_NOTOWNED); 67599026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 676132087Sdavidxu while (P_SHOULDSTOP(p) || 677132087Sdavidxu ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 678102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 67999026Sjulian KASSERT(p->p_singlethread != NULL, 68099026Sjulian ("singlethread not set")); 68199026Sjulian /* 682100648Sjulian * The only suspension in action is a 683100648Sjulian * single-threading. Single threader need not stop. 684124350Sschweikh * XXX Should be safe to access unlocked 685100646Sjulian * as it can only be set to be true by us. 68699026Sjulian */ 687100648Sjulian if (p->p_singlethread == td) 68899026Sjulian return (0); /* Exempt from stopping. */ 689124350Sschweikh } 690134498Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 691155741Sdavidxu return (EINTR); 69299026Sjulian 693136177Sdavidxu /* Should we goto user boundary if we didn't come from there? */ 694136177Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 695136177Sdavidxu (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 696155741Sdavidxu return (ERESTART); 697136177Sdavidxu 698151316Sdavidxu /* If thread will exit, flush its pending signals */ 699151316Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 700151316Sdavidxu sigqueue_flush(&td->td_sigqueue); 701151316Sdavidxu 702170296Sjeff PROC_SLOCK(p); 703112071Sdavidxu thread_stopped(p); 70499026Sjulian /* 70599026Sjulian * If the process is waiting for us to exit, 70699026Sjulian * this thread should just suicide. 707102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 70899026Sjulian */ 709136177Sdavidxu if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 710134791Sjulian thread_exit(); 711170296Sjeff if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 712170296Sjeff if (p->p_numthreads == p->p_suspcount + 1) { 713170296Sjeff thread_lock(p->p_singlethread); 714170296Sjeff thread_unsuspend_one(p->p_singlethread); 715170296Sjeff thread_unlock(p->p_singlethread); 716170296Sjeff } 717170296Sjeff } 718170296Sjeff PROC_UNLOCK(p); 719170296Sjeff thread_lock(td); 72099026Sjulian /* 72199026Sjulian * When a thread suspends, it just 722164936Sjulian * gets taken off all queues. 72399026Sjulian */ 724103216Sjulian thread_suspend_one(td); 725136177Sdavidxu if (return_instead == 0) { 726136177Sdavidxu p->p_boundary_count++; 727136177Sdavidxu td->td_flags |= TDF_BOUNDARY; 728136177Sdavidxu } 729170296Sjeff PROC_SUNLOCK(p); 730178272Sjeff mi_switch(SW_INVOL | SWT_SUSPEND, NULL); 731170296Sjeff if (return_instead == 0) 732136177Sdavidxu td->td_flags &= ~TDF_BOUNDARY; 733170296Sjeff thread_unlock(td); 73499026Sjulian PROC_LOCK(p); 735170296Sjeff if (return_instead == 0) 736170296Sjeff p->p_boundary_count--; 73799026Sjulian } 73899026Sjulian return (0); 73999026Sjulian} 74099026Sjulian 741102898Sdavidxuvoid 742170296Sjeffthread_suspend_switch(struct thread *td) 743170296Sjeff{ 744170296Sjeff struct proc *p; 745170296Sjeff 746170296Sjeff p = td->td_proc; 747170296Sjeff KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 748170296Sjeff PROC_LOCK_ASSERT(p, MA_OWNED); 749170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 750170296Sjeff /* 751170296Sjeff * We implement thread_suspend_one in stages here to avoid 752170296Sjeff * dropping the proc lock while the thread lock is owned. 753170296Sjeff */ 754170296Sjeff thread_stopped(p); 755170296Sjeff p->p_suspcount++; 756170296Sjeff PROC_UNLOCK(p); 757170296Sjeff thread_lock(td); 758177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 759170296Sjeff TD_SET_SUSPENDED(td); 760177085Sjeff sched_sleep(td, 0); 761170296Sjeff PROC_SUNLOCK(p); 762170296Sjeff DROP_GIANT(); 763178272Sjeff mi_switch(SW_VOL | SWT_SUSPEND, NULL); 764170296Sjeff thread_unlock(td); 765170296Sjeff PICKUP_GIANT(); 766170296Sjeff PROC_LOCK(p); 767170296Sjeff PROC_SLOCK(p); 768170296Sjeff} 769170296Sjeff 770170296Sjeffvoid 771102898Sdavidxuthread_suspend_one(struct thread *td) 772102898Sdavidxu{ 773102898Sdavidxu struct proc *p = td->td_proc; 774102898Sdavidxu 775170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 776170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 777112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 778102898Sdavidxu p->p_suspcount++; 779177471Sjeff td->td_flags &= ~TDF_NEEDSUSPCHK; 780103216Sjulian TD_SET_SUSPENDED(td); 781177085Sjeff sched_sleep(td, 0); 782102898Sdavidxu} 783102898Sdavidxu 784102898Sdavidxuvoid 785102898Sdavidxuthread_unsuspend_one(struct thread *td) 786102898Sdavidxu{ 787102898Sdavidxu struct proc *p = td->td_proc; 788102898Sdavidxu 789170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 790170296Sjeff THREAD_LOCK_ASSERT(td, MA_OWNED); 791164936Sjulian KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 792103216Sjulian TD_CLR_SUSPENDED(td); 793102898Sdavidxu p->p_suspcount--; 794181334Sjhb if (setrunnable(td)) { 795181334Sjhb#ifdef INVARIANTS 796181334Sjhb panic("not waking up swapper"); 797181334Sjhb#endif 798181334Sjhb } 799102898Sdavidxu} 800102898Sdavidxu 80199026Sjulian/* 80299026Sjulian * Allow all threads blocked by single threading to continue running. 80399026Sjulian */ 80499026Sjulianvoid 80599026Sjulianthread_unsuspend(struct proc *p) 80699026Sjulian{ 80799026Sjulian struct thread *td; 80899026Sjulian 80999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 810170296Sjeff PROC_SLOCK_ASSERT(p, MA_OWNED); 81199026Sjulian if (!P_SHOULDSTOP(p)) { 812164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 813170296Sjeff thread_lock(td); 814164936Sjulian if (TD_IS_SUSPENDED(td)) { 815164936Sjulian thread_unsuspend_one(td); 816164936Sjulian } 817170296Sjeff thread_unlock(td); 81899026Sjulian } 819102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 82099026Sjulian (p->p_numthreads == p->p_suspcount)) { 82199026Sjulian /* 82299026Sjulian * Stopping everything also did the job for the single 82399026Sjulian * threading request. Now we've downgraded to single-threaded, 82499026Sjulian * let it continue. 82599026Sjulian */ 826170296Sjeff thread_lock(p->p_singlethread); 827102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 828170296Sjeff thread_unlock(p->p_singlethread); 82999026Sjulian } 83099026Sjulian} 83199026Sjulian 832134791Sjulian/* 833134791Sjulian * End the single threading mode.. 834134791Sjulian */ 83599026Sjulianvoid 83699026Sjulianthread_single_end(void) 83799026Sjulian{ 83899026Sjulian struct thread *td; 83999026Sjulian struct proc *p; 84099026Sjulian 84199026Sjulian td = curthread; 84299026Sjulian p = td->td_proc; 84399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 844136177Sdavidxu p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 845170296Sjeff PROC_SLOCK(p); 84699026Sjulian p->p_singlethread = NULL; 847102292Sjulian /* 848102292Sjulian * If there are other threads they mey now run, 849102292Sjulian * unless of course there is a blanket 'stop order' 850102292Sjulian * on the process. The single threader must be allowed 851102292Sjulian * to continue however as this is a bad place to stop. 852102292Sjulian */ 853102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 854164936Sjulian FOREACH_THREAD_IN_PROC(p, td) { 855170296Sjeff thread_lock(td); 856164936Sjulian if (TD_IS_SUSPENDED(td)) { 857164936Sjulian thread_unsuspend_one(td); 858164936Sjulian } 859170296Sjeff thread_unlock(td); 860102292Sjulian } 861102292Sjulian } 862170296Sjeff PROC_SUNLOCK(p); 86399026Sjulian} 864128721Sdeischen 865151990Sdavidxustruct thread * 866151990Sdavidxuthread_find(struct proc *p, lwpid_t tid) 867151990Sdavidxu{ 868151990Sdavidxu struct thread *td; 869151990Sdavidxu 870151990Sdavidxu PROC_LOCK_ASSERT(p, MA_OWNED); 871151990Sdavidxu FOREACH_THREAD_IN_PROC(p, td) { 872151990Sdavidxu if (td->td_tid == tid) 873151990Sdavidxu break; 874151990Sdavidxu } 875151990Sdavidxu return (td); 876151990Sdavidxu} 877