kern_thread.c revision 112993
199026Sjulian/* 299026Sjulian * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 399026Sjulian * All rights reserved. 499026Sjulian * 599026Sjulian * Redistribution and use in source and binary forms, with or without 699026Sjulian * modification, are permitted provided that the following conditions 799026Sjulian * are met: 899026Sjulian * 1. Redistributions of source code must retain the above copyright 999026Sjulian * notice(s), this list of conditions and the following disclaimer as 1099026Sjulian * the first lines of this file unmodified other than the possible 1199026Sjulian * addition of one or more copyright notices. 1299026Sjulian * 2. Redistributions in binary form must reproduce the above copyright 1399026Sjulian * notice(s), this list of conditions and the following disclaimer in the 1499026Sjulian * documentation and/or other materials provided with the distribution. 1599026Sjulian * 1699026Sjulian * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 1799026Sjulian * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 1899026Sjulian * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 1999026Sjulian * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 2099026Sjulian * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 2199026Sjulian * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 2299026Sjulian * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 2399026Sjulian * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2499026Sjulian * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2599026Sjulian * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 2699026Sjulian * DAMAGE. 2799026Sjulian * 2899026Sjulian * $FreeBSD: head/sys/kern/kern_thread.c 112993 2003-04-02 23:53:30Z peter $ 2999026Sjulian */ 3099026Sjulian 3199026Sjulian#include <sys/param.h> 3299026Sjulian#include <sys/systm.h> 3399026Sjulian#include <sys/kernel.h> 3499026Sjulian#include <sys/lock.h> 3599026Sjulian#include <sys/malloc.h> 3699026Sjulian#include <sys/mutex.h> 3799026Sjulian#include <sys/proc.h> 38107029Sjulian#include <sys/smp.h> 3999026Sjulian#include <sys/sysctl.h> 40105854Sjulian#include <sys/sysproto.h> 4199026Sjulian#include <sys/filedesc.h> 42107126Sjeff#include <sys/sched.h> 4399026Sjulian#include <sys/signalvar.h> 4499026Sjulian#include <sys/sx.h> 45107126Sjeff#include <sys/tty.h> 4699026Sjulian#include <sys/user.h> 4799026Sjulian#include <sys/jail.h> 4899026Sjulian#include <sys/kse.h> 4999026Sjulian#include <sys/ktr.h> 50103410Smini#include <sys/ucontext.h> 5199026Sjulian 5299026Sjulian#include <vm/vm.h> 5399026Sjulian#include <vm/vm_object.h> 5499026Sjulian#include <vm/pmap.h> 5599026Sjulian#include <vm/uma.h> 5699026Sjulian#include <vm/vm_map.h> 5799026Sjulian 58100273Speter#include <machine/frame.h> 59100273Speter 6099026Sjulian/* 61103367Sjulian * KSEGRP related storage. 6299026Sjulian */ 63103367Sjulianstatic uma_zone_t ksegrp_zone; 64103367Sjulianstatic uma_zone_t kse_zone; 6599026Sjulianstatic uma_zone_t thread_zone; 66111028Sjeffstatic uma_zone_t upcall_zone; 6799026Sjulian 68103367Sjulian/* DEBUG ONLY */ 6999026SjulianSYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70107719Sjulianstatic int thread_debug = 0; 71107719SjulianSYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72107719Sjulian &thread_debug, 0, "thread debug"); 7399026Sjulian 74107006Sdavidxustatic int max_threads_per_proc = 30; 75107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76103367Sjulian &max_threads_per_proc, 0, "Limit on threads per proc"); 77103367Sjulian 78107006Sdavidxustatic int max_groups_per_proc = 5; 79107006SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80107006Sdavidxu &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81107006Sdavidxu 82111115Sdavidxustatic int max_threads_hits; 83111115SdavidxuSYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84111115Sdavidxu &max_threads_hits, 0, ""); 85111115Sdavidxu 86111028Sjeffstatic int virtual_cpu; 87111028Sjeff 8899026Sjulian#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 8999026Sjulian 90111028SjeffTAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91105854SjulianTAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92105854SjulianTAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93111028SjeffTAILQ_HEAD(, kse_upcall) zombie_upcalls = 94111028Sjeff TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95111028Sjeffstruct mtx kse_zombie_lock; 96111028SjeffMTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 9799026Sjulian 98107719Sjulianstatic void kse_purge(struct proc *p, struct thread *td); 99111028Sjeffstatic void kse_purge_group(struct thread *td); 100111515Sdavidxustatic int thread_update_usr_ticks(struct thread *td, int user); 101111028Sjeffstatic void thread_alloc_spare(struct thread *td, struct thread *spare); 102105854Sjulian 103111028Sjeffstatic int 104111028Sjeffsysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 105111028Sjeff{ 106111028Sjeff int error, new_val; 107111028Sjeff int def_val; 108111028Sjeff 109111028Sjeff#ifdef SMP 110111028Sjeff def_val = mp_ncpus; 111111028Sjeff#else 112111028Sjeff def_val = 1; 113111028Sjeff#endif 114111028Sjeff if (virtual_cpu == 0) 115111028Sjeff new_val = def_val; 116111028Sjeff else 117111028Sjeff new_val = virtual_cpu; 118111028Sjeff error = sysctl_handle_int(oidp, &new_val, 0, req); 119111028Sjeff if (error != 0 || req->newptr == NULL) 120111028Sjeff return (error); 121111028Sjeff if (new_val < 0) 122111028Sjeff return (EINVAL); 123111028Sjeff virtual_cpu = new_val; 124111028Sjeff return (0); 125111028Sjeff} 126111028Sjeff 127111028Sjeff/* DEBUG ONLY */ 128111028SjeffSYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 129111028Sjeff 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 130111028Sjeff "debug virtual cpus"); 131111028Sjeff 13299026Sjulian/* 133107719Sjulian * Prepare a thread for use. 13499026Sjulian */ 13599026Sjulianstatic void 13699026Sjulianthread_ctor(void *mem, int size, void *arg) 13799026Sjulian{ 13899026Sjulian struct thread *td; 13999026Sjulian 14099026Sjulian td = (struct thread *)mem; 141103216Sjulian td->td_state = TDS_INACTIVE; 14299026Sjulian} 14399026Sjulian 14499026Sjulian/* 14599026Sjulian * Reclaim a thread after use. 14699026Sjulian */ 14799026Sjulianstatic void 14899026Sjulianthread_dtor(void *mem, int size, void *arg) 14999026Sjulian{ 15099026Sjulian struct thread *td; 15199026Sjulian 15299026Sjulian td = (struct thread *)mem; 15399026Sjulian 15499026Sjulian#ifdef INVARIANTS 15599026Sjulian /* Verify that this thread is in a safe state to free. */ 15699026Sjulian switch (td->td_state) { 157103216Sjulian case TDS_INHIBITED: 158103216Sjulian case TDS_RUNNING: 159103216Sjulian case TDS_CAN_RUN: 16099026Sjulian case TDS_RUNQ: 16199026Sjulian /* 16299026Sjulian * We must never unlink a thread that is in one of 16399026Sjulian * these states, because it is currently active. 16499026Sjulian */ 16599026Sjulian panic("bad state for thread unlinking"); 16699026Sjulian /* NOTREACHED */ 167103216Sjulian case TDS_INACTIVE: 16899026Sjulian break; 16999026Sjulian default: 17099026Sjulian panic("bad thread state"); 17199026Sjulian /* NOTREACHED */ 17299026Sjulian } 17399026Sjulian#endif 17499026Sjulian} 17599026Sjulian 17699026Sjulian/* 17799026Sjulian * Initialize type-stable parts of a thread (when newly created). 17899026Sjulian */ 17999026Sjulianstatic void 18099026Sjulianthread_init(void *mem, int size) 18199026Sjulian{ 18299026Sjulian struct thread *td; 18399026Sjulian 18499026Sjulian td = (struct thread *)mem; 185103312Sjulian mtx_lock(&Giant); 186104354Sscottl pmap_new_thread(td, 0); 187103312Sjulian mtx_unlock(&Giant); 18899026Sjulian cpu_thread_setup(td); 189107126Sjeff td->td_sched = (struct td_sched *)&td[1]; 19099026Sjulian} 19199026Sjulian 19299026Sjulian/* 19399026Sjulian * Tear down type-stable parts of a thread (just before being discarded). 19499026Sjulian */ 19599026Sjulianstatic void 19699026Sjulianthread_fini(void *mem, int size) 19799026Sjulian{ 19899026Sjulian struct thread *td; 19999026Sjulian 20099026Sjulian td = (struct thread *)mem; 20199026Sjulian pmap_dispose_thread(td); 20299026Sjulian} 203111028Sjeff 204107126Sjeff/* 205107126Sjeff * Initialize type-stable parts of a kse (when newly created). 206107126Sjeff */ 207107126Sjeffstatic void 208107126Sjeffkse_init(void *mem, int size) 209107126Sjeff{ 210107126Sjeff struct kse *ke; 21199026Sjulian 212107126Sjeff ke = (struct kse *)mem; 213107126Sjeff ke->ke_sched = (struct ke_sched *)&ke[1]; 214107126Sjeff} 215111028Sjeff 216107126Sjeff/* 217107126Sjeff * Initialize type-stable parts of a ksegrp (when newly created). 218107126Sjeff */ 219107126Sjeffstatic void 220107126Sjeffksegrp_init(void *mem, int size) 221107126Sjeff{ 222107126Sjeff struct ksegrp *kg; 223107126Sjeff 224107126Sjeff kg = (struct ksegrp *)mem; 225107126Sjeff kg->kg_sched = (struct kg_sched *)&kg[1]; 226107126Sjeff} 227107126Sjeff 228105854Sjulian/* 229111028Sjeff * KSE is linked into kse group. 230105854Sjulian */ 231105854Sjulianvoid 232105854Sjuliankse_link(struct kse *ke, struct ksegrp *kg) 233105854Sjulian{ 234105854Sjulian struct proc *p = kg->kg_proc; 235105854Sjulian 236105854Sjulian TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 237105854Sjulian kg->kg_kses++; 238111028Sjeff ke->ke_state = KES_UNQUEUED; 239105854Sjulian ke->ke_proc = p; 240105854Sjulian ke->ke_ksegrp = kg; 241105854Sjulian ke->ke_thread = NULL; 242111028Sjeff ke->ke_oncpu = NOCPU; 243111028Sjeff ke->ke_flags = 0; 244105854Sjulian} 245105854Sjulian 246105854Sjulianvoid 247105854Sjuliankse_unlink(struct kse *ke) 248105854Sjulian{ 249105854Sjulian struct ksegrp *kg; 250105854Sjulian 251105854Sjulian mtx_assert(&sched_lock, MA_OWNED); 252105854Sjulian kg = ke->ke_ksegrp; 253105854Sjulian TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 254111028Sjeff if (ke->ke_state == KES_IDLE) { 255111028Sjeff TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 256111028Sjeff kg->kg_idle_kses--; 257105854Sjulian } 258111028Sjeff if (--kg->kg_kses == 0) 259111028Sjeff ksegrp_unlink(kg); 260105854Sjulian /* 261105854Sjulian * Aggregate stats from the KSE 262105854Sjulian */ 263105854Sjulian kse_stash(ke); 264105854Sjulian} 265105854Sjulian 266105854Sjulianvoid 267105854Sjulianksegrp_link(struct ksegrp *kg, struct proc *p) 268105854Sjulian{ 269105854Sjulian 270105854Sjulian TAILQ_INIT(&kg->kg_threads); 271105854Sjulian TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 272105854Sjulian TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 273105854Sjulian TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 274111028Sjeff TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 275111028Sjeff TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 276111028Sjeff kg->kg_proc = p; 277111028Sjeff /* 278111028Sjeff * the following counters are in the -zero- section 279111028Sjeff * and may not need clearing 280111028Sjeff */ 281105854Sjulian kg->kg_numthreads = 0; 282111028Sjeff kg->kg_runnable = 0; 283111028Sjeff kg->kg_kses = 0; 284111028Sjeff kg->kg_runq_kses = 0; /* XXXKSE change name */ 285111028Sjeff kg->kg_idle_kses = 0; 286111028Sjeff kg->kg_numupcalls = 0; 287111028Sjeff /* link it in now that it's consistent */ 288105854Sjulian p->p_numksegrps++; 289105854Sjulian TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 290105854Sjulian} 291105854Sjulian 292105854Sjulianvoid 293105854Sjulianksegrp_unlink(struct ksegrp *kg) 294105854Sjulian{ 295105854Sjulian struct proc *p; 296105854Sjulian 297105854Sjulian mtx_assert(&sched_lock, MA_OWNED); 298111028Sjeff KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 299111028Sjeff KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 300111028Sjeff KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 301111028Sjeff 302105854Sjulian p = kg->kg_proc; 303105854Sjulian TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 304105854Sjulian p->p_numksegrps--; 305105854Sjulian /* 306105854Sjulian * Aggregate stats from the KSE 307105854Sjulian */ 308105854Sjulian ksegrp_stash(kg); 309105854Sjulian} 310105854Sjulian 311111028Sjeffstruct kse_upcall * 312111028Sjeffupcall_alloc(void) 313111028Sjeff{ 314111028Sjeff struct kse_upcall *ku; 315111028Sjeff 316111125Sdavidxu ku = uma_zalloc(upcall_zone, M_WAITOK); 317111028Sjeff bzero(ku, sizeof(*ku)); 318111028Sjeff return (ku); 319111028Sjeff} 320111028Sjeff 321111028Sjeffvoid 322111028Sjeffupcall_free(struct kse_upcall *ku) 323111028Sjeff{ 324111028Sjeff 325111028Sjeff uma_zfree(upcall_zone, ku); 326111028Sjeff} 327111028Sjeff 328111028Sjeffvoid 329111028Sjeffupcall_link(struct kse_upcall *ku, struct ksegrp *kg) 330111028Sjeff{ 331111028Sjeff 332111028Sjeff mtx_assert(&sched_lock, MA_OWNED); 333111028Sjeff TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 334111028Sjeff ku->ku_ksegrp = kg; 335111028Sjeff kg->kg_numupcalls++; 336111028Sjeff} 337111028Sjeff 338111028Sjeffvoid 339111028Sjeffupcall_unlink(struct kse_upcall *ku) 340111028Sjeff{ 341111028Sjeff struct ksegrp *kg = ku->ku_ksegrp; 342111028Sjeff 343111028Sjeff mtx_assert(&sched_lock, MA_OWNED); 344111028Sjeff KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 345111028Sjeff TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 346111028Sjeff kg->kg_numupcalls--; 347111028Sjeff upcall_stash(ku); 348111028Sjeff} 349111028Sjeff 350111028Sjeffvoid 351111028Sjeffupcall_remove(struct thread *td) 352111028Sjeff{ 353111028Sjeff 354111028Sjeff if (td->td_upcall) { 355111028Sjeff td->td_upcall->ku_owner = NULL; 356111028Sjeff upcall_unlink(td->td_upcall); 357111028Sjeff td->td_upcall = 0; 358111028Sjeff } 359111028Sjeff} 360111028Sjeff 36199026Sjulian/* 362111028Sjeff * For a newly created process, 363111028Sjeff * link up all the structures and its initial threads etc. 364105854Sjulian */ 365105854Sjulianvoid 366105854Sjulianproc_linkup(struct proc *p, struct ksegrp *kg, 367111028Sjeff struct kse *ke, struct thread *td) 368105854Sjulian{ 369105854Sjulian 370105854Sjulian TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 371105854Sjulian TAILQ_INIT(&p->p_threads); /* all threads in proc */ 372105854Sjulian TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 373105854Sjulian p->p_numksegrps = 0; 374105854Sjulian p->p_numthreads = 0; 375105854Sjulian 376105854Sjulian ksegrp_link(kg, p); 377105854Sjulian kse_link(ke, kg); 378105854Sjulian thread_link(td, kg); 379105854Sjulian} 380105854Sjulian 381111028Sjeff/* 382111028Sjeffstruct kse_thr_interrupt_args { 383111028Sjeff struct kse_thr_mailbox * tmbx; 384111028Sjeff}; 385111028Sjeff*/ 386105854Sjulianint 387105854Sjuliankse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 388105854Sjulian{ 389106180Sdavidxu struct proc *p; 390106180Sdavidxu struct thread *td2; 391105854Sjulian 392106242Sdavidxu p = td->td_proc; 393111585Sjulian if (!(p->p_flag & P_THREADED) || (uap->tmbx == NULL)) 394106242Sdavidxu return (EINVAL); 395106180Sdavidxu mtx_lock_spin(&sched_lock); 396106180Sdavidxu FOREACH_THREAD_IN_PROC(p, td2) { 397106180Sdavidxu if (td2->td_mailbox == uap->tmbx) { 398106180Sdavidxu td2->td_flags |= TDF_INTERRUPT; 399106180Sdavidxu if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 400106180Sdavidxu if (td2->td_flags & TDF_CVWAITQ) 401106180Sdavidxu cv_abort(td2); 402106180Sdavidxu else 403106180Sdavidxu abortsleep(td2); 404111028Sjeff } 405106180Sdavidxu mtx_unlock_spin(&sched_lock); 406106182Sdavidxu return (0); 407106180Sdavidxu } 408106180Sdavidxu } 409106180Sdavidxu mtx_unlock_spin(&sched_lock); 410106182Sdavidxu return (ESRCH); 411105854Sjulian} 412105854Sjulian 413111028Sjeff/* 414111028Sjeffstruct kse_exit_args { 415111028Sjeff register_t dummy; 416111028Sjeff}; 417111028Sjeff*/ 418105854Sjulianint 419105854Sjuliankse_exit(struct thread *td, struct kse_exit_args *uap) 420105854Sjulian{ 421105854Sjulian struct proc *p; 422105854Sjulian struct ksegrp *kg; 423108640Sdavidxu struct kse *ke; 424105854Sjulian 425105854Sjulian p = td->td_proc; 426111028Sjeff /* 427111028Sjeff * Only UTS can call the syscall and current group 428111028Sjeff * should be a threaded group. 429111028Sjeff */ 430111028Sjeff if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 431106182Sdavidxu return (EINVAL); 432111028Sjeff KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 433111028Sjeff 434105854Sjulian kg = td->td_ksegrp; 435111028Sjeff /* Serialize removing upcall */ 436105854Sjulian PROC_LOCK(p); 437105854Sjulian mtx_lock_spin(&sched_lock); 438111028Sjeff if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 439105854Sjulian mtx_unlock_spin(&sched_lock); 440105854Sjulian PROC_UNLOCK(p); 441105854Sjulian return (EDEADLK); 442105854Sjulian } 443108640Sdavidxu ke = td->td_kse; 444111028Sjeff upcall_remove(td); 445108640Sdavidxu if (p->p_numthreads == 1) { 446111028Sjeff kse_purge(p, td); 447111585Sjulian p->p_flag &= ~P_THREADED; 448105854Sjulian mtx_unlock_spin(&sched_lock); 449105854Sjulian PROC_UNLOCK(p); 450105854Sjulian } else { 451111028Sjeff if (kg->kg_numthreads == 1) { /* Shutdown a group */ 452111028Sjeff kse_purge_group(td); 453111028Sjeff ke->ke_flags |= KEF_EXIT; 454111028Sjeff } 455112071Sdavidxu thread_stopped(p); 456105854Sjulian thread_exit(); 457105854Sjulian /* NOTREACHED */ 458105854Sjulian } 459106182Sdavidxu return (0); 460105854Sjulian} 461105854Sjulian 462107719Sjulian/* 463108338Sjulian * Either becomes an upcall or waits for an awakening event and 464111028Sjeff * then becomes an upcall. Only error cases return. 465107719Sjulian */ 466111028Sjeff/* 467111028Sjeffstruct kse_release_args { 468111169Sdavidxu struct timespec *timeout; 469111028Sjeff}; 470111028Sjeff*/ 471105854Sjulianint 472111028Sjeffkse_release(struct thread *td, struct kse_release_args *uap) 473105854Sjulian{ 474105854Sjulian struct proc *p; 475107719Sjulian struct ksegrp *kg; 476111169Sdavidxu struct timespec ts, ts2, ts3, timeout; 477111169Sdavidxu struct timeval tv; 478111169Sdavidxu int error; 479105854Sjulian 480105854Sjulian p = td->td_proc; 481107719Sjulian kg = td->td_ksegrp; 482106903Sdavidxu /* 483111028Sjeff * Only UTS can call the syscall and current group 484111028Sjeff * should be a threaded group. 485111028Sjeff */ 486111028Sjeff if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 487107719Sjulian return (EINVAL); 488111028Sjeff KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 489111169Sdavidxu if (uap->timeout != NULL) { 490111169Sdavidxu if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 491111169Sdavidxu return (error); 492111169Sdavidxu getnanouptime(&ts); 493111169Sdavidxu timespecadd(&ts, &timeout); 494111169Sdavidxu TIMESPEC_TO_TIMEVAL(&tv, &timeout); 495111169Sdavidxu } 496108613Sjulian mtx_lock_spin(&sched_lock); 497108338Sjulian /* Change OURSELF to become an upcall. */ 498111028Sjeff td->td_flags = TDF_UPCALLING; 499112888Sjeff#if 0 /* XXX This shouldn't be necessary */ 500111042Sdavidxu if (p->p_sflag & PS_NEEDSIGCHK) 501111042Sdavidxu td->td_flags |= TDF_ASTPENDING; 502112888Sjeff#endif 503111169Sdavidxu mtx_unlock_spin(&sched_lock); 504111169Sdavidxu PROC_LOCK(p); 505111169Sdavidxu while ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 506111169Sdavidxu (kg->kg_completed == NULL)) { 507111028Sjeff kg->kg_upsleeps++; 508111169Sdavidxu error = msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, 509111169Sdavidxu "kse_rel", (uap->timeout ? tvtohz(&tv) : 0)); 510111028Sjeff kg->kg_upsleeps--; 511110190Sjulian PROC_UNLOCK(p); 512111169Sdavidxu if (uap->timeout == NULL || error != EWOULDBLOCK) 513111169Sdavidxu return (0); 514111169Sdavidxu getnanouptime(&ts2); 515111169Sdavidxu if (timespeccmp(&ts2, &ts, >=)) 516111169Sdavidxu return (0); 517111169Sdavidxu ts3 = ts; 518111169Sdavidxu timespecsub(&ts3, &ts2); 519111169Sdavidxu TIMESPEC_TO_TIMEVAL(&tv, &ts3); 520111169Sdavidxu PROC_LOCK(p); 521105854Sjulian } 522111169Sdavidxu PROC_UNLOCK(p); 523107719Sjulian return (0); 524105854Sjulian} 525105854Sjulian 526105854Sjulian/* struct kse_wakeup_args { 527105854Sjulian struct kse_mailbox *mbx; 528105854Sjulian}; */ 529105854Sjulianint 530105854Sjuliankse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 531105854Sjulian{ 532105854Sjulian struct proc *p; 533105854Sjulian struct ksegrp *kg; 534111028Sjeff struct kse_upcall *ku; 535108338Sjulian struct thread *td2; 536105854Sjulian 537105854Sjulian p = td->td_proc; 538108338Sjulian td2 = NULL; 539111028Sjeff ku = NULL; 540105854Sjulian /* KSE-enabled processes only, please. */ 541111585Sjulian if (!(p->p_flag & P_THREADED)) 542111028Sjeff return (EINVAL); 543111028Sjeff PROC_LOCK(p); 544108613Sjulian mtx_lock_spin(&sched_lock); 545105854Sjulian if (uap->mbx) { 546105854Sjulian FOREACH_KSEGRP_IN_PROC(p, kg) { 547111028Sjeff FOREACH_UPCALL_IN_GROUP(kg, ku) { 548111207Sdavidxu if (ku->ku_mailbox == uap->mbx) 549111028Sjeff break; 550108613Sjulian } 551111028Sjeff if (ku) 552108338Sjulian break; 553105854Sjulian } 554105854Sjulian } else { 555105854Sjulian kg = td->td_ksegrp; 556111028Sjeff if (kg->kg_upsleeps) { 557111028Sjeff wakeup_one(&kg->kg_completed); 558111028Sjeff mtx_unlock_spin(&sched_lock); 559111028Sjeff PROC_UNLOCK(p); 560111028Sjeff return (0); 561108338Sjulian } 562111028Sjeff ku = TAILQ_FIRST(&kg->kg_upcalls); 563105854Sjulian } 564111028Sjeff if (ku) { 565111028Sjeff if ((td2 = ku->ku_owner) == NULL) { 566111028Sjeff panic("%s: no owner", __func__); 567111028Sjeff } else if (TD_ON_SLEEPQ(td2) && 568111028Sjeff (td2->td_wchan == &kg->kg_completed)) { 569111028Sjeff abortsleep(td2); 570111028Sjeff } else { 571111028Sjeff ku->ku_flags |= KUF_DOUPCALL; 572108613Sjulian } 573105854Sjulian mtx_unlock_spin(&sched_lock); 574111028Sjeff PROC_UNLOCK(p); 575108338Sjulian return (0); 576108613Sjulian } 577105854Sjulian mtx_unlock_spin(&sched_lock); 578111028Sjeff PROC_UNLOCK(p); 579108338Sjulian return (ESRCH); 580105854Sjulian} 581105854Sjulian 582105854Sjulian/* 583105854Sjulian * No new KSEG: first call: use current KSE, don't schedule an upcall 584111028Sjeff * All other situations, do allocate max new KSEs and schedule an upcall. 585105854Sjulian */ 586105854Sjulian/* struct kse_create_args { 587105854Sjulian struct kse_mailbox *mbx; 588105854Sjulian int newgroup; 589105854Sjulian}; */ 590105854Sjulianint 591105854Sjuliankse_create(struct thread *td, struct kse_create_args *uap) 592105854Sjulian{ 593105854Sjulian struct kse *newke; 594105854Sjulian struct ksegrp *newkg; 595105854Sjulian struct ksegrp *kg; 596105854Sjulian struct proc *p; 597105854Sjulian struct kse_mailbox mbx; 598111028Sjeff struct kse_upcall *newku; 599111028Sjeff int err, ncpus; 600105854Sjulian 601105854Sjulian p = td->td_proc; 602105854Sjulian if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 603105854Sjulian return (err); 604105854Sjulian 605111028Sjeff /* Too bad, why hasn't kernel always a cpu counter !? */ 606111028Sjeff#ifdef SMP 607111028Sjeff ncpus = mp_ncpus; 608111028Sjeff#else 609111028Sjeff ncpus = 1; 610111028Sjeff#endif 611111028Sjeff if (thread_debug && virtual_cpu != 0) 612111028Sjeff ncpus = virtual_cpu; 613111028Sjeff 614111028Sjeff /* Easier to just set it than to test and set */ 615112078Sdavidxu PROC_LOCK(p); 616111585Sjulian p->p_flag |= P_THREADED; 617112078Sdavidxu PROC_UNLOCK(p); 618105854Sjulian kg = td->td_ksegrp; 619105854Sjulian if (uap->newgroup) { 620111028Sjeff /* Have race condition but it is cheap */ 621107006Sdavidxu if (p->p_numksegrps >= max_groups_per_proc) 622107006Sdavidxu return (EPROCLIM); 623105854Sjulian /* 624105854Sjulian * If we want a new KSEGRP it doesn't matter whether 625105854Sjulian * we have already fired up KSE mode before or not. 626111028Sjeff * We put the process in KSE mode and create a new KSEGRP. 627105854Sjulian */ 628105854Sjulian newkg = ksegrp_alloc(); 629105854Sjulian bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 630111028Sjeff kg_startzero, kg_endzero)); 631105854Sjulian bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 632105854Sjulian RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 633111028Sjeff mtx_lock_spin(&sched_lock); 634111028Sjeff if (p->p_numksegrps >= max_groups_per_proc) { 635111028Sjeff mtx_unlock_spin(&sched_lock); 636111677Sdavidxu ksegrp_free(newkg); 637111028Sjeff return (EPROCLIM); 638111028Sjeff } 639111677Sdavidxu ksegrp_link(newkg, p); 640111028Sjeff mtx_unlock_spin(&sched_lock); 641105854Sjulian } else { 642111028Sjeff newkg = kg; 643111028Sjeff } 644111028Sjeff 645111028Sjeff /* 646111028Sjeff * Creating upcalls more than number of physical cpu does 647111028Sjeff * not help performance. 648111028Sjeff */ 649111028Sjeff if (newkg->kg_numupcalls >= ncpus) 650111028Sjeff return (EPROCLIM); 651111028Sjeff 652111028Sjeff if (newkg->kg_numupcalls == 0) { 653111028Sjeff /* 654111028Sjeff * Initialize KSE group, optimized for MP. 655111028Sjeff * Create KSEs as many as physical cpus, this increases 656111028Sjeff * concurrent even if userland is not MP safe and can only run 657111028Sjeff * on single CPU (for early version of libpthread, it is true). 658111028Sjeff * In ideal world, every physical cpu should execute a thread. 659111028Sjeff * If there is enough KSEs, threads in kernel can be 660111028Sjeff * executed parallel on different cpus with full speed, 661111028Sjeff * Concurrent in kernel shouldn't be restricted by number of 662111028Sjeff * upcalls userland provides. 663111028Sjeff * Adding more upcall structures only increases concurrent 664111028Sjeff * in userland. 665111028Sjeff * Highest performance configuration is: 666111028Sjeff * N kses = N upcalls = N phyiscal cpus 667105854Sjulian */ 668111028Sjeff while (newkg->kg_kses < ncpus) { 669105854Sjulian newke = kse_alloc(); 670111028Sjeff bzero(&newke->ke_startzero, RANGEOF(struct kse, 671111028Sjeff ke_startzero, ke_endzero)); 672105854Sjulian#if 0 673111028Sjeff mtx_lock_spin(&sched_lock); 674111028Sjeff bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 675111028Sjeff RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 676111028Sjeff mtx_unlock_spin(&sched_lock); 677105854Sjulian#endif 678111028Sjeff mtx_lock_spin(&sched_lock); 679111028Sjeff kse_link(newke, newkg); 680111028Sjeff /* Add engine */ 681111028Sjeff kse_reassign(newke); 682111028Sjeff mtx_unlock_spin(&sched_lock); 683105854Sjulian } 684111028Sjeff } 685111028Sjeff newku = upcall_alloc(); 686111028Sjeff newku->ku_mailbox = uap->mbx; 687111028Sjeff newku->ku_func = mbx.km_func; 688111028Sjeff bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 689111028Sjeff 690111028Sjeff /* For the first call this may not have been set */ 691111028Sjeff if (td->td_standin == NULL) 692111028Sjeff thread_alloc_spare(td, NULL); 693111028Sjeff 694111028Sjeff mtx_lock_spin(&sched_lock); 695111028Sjeff if (newkg->kg_numupcalls >= ncpus) { 696111595Sdavidxu mtx_unlock_spin(&sched_lock); 697111028Sjeff upcall_free(newku); 698111028Sjeff return (EPROCLIM); 699111028Sjeff } 700111028Sjeff upcall_link(newku, newkg); 701112397Sdavidxu if (mbx.km_quantum) 702112397Sdavidxu newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 703111028Sjeff 704111028Sjeff /* 705111028Sjeff * Each upcall structure has an owner thread, find which 706111028Sjeff * one owns it. 707111028Sjeff */ 708111028Sjeff if (uap->newgroup) { 709111028Sjeff /* 710111028Sjeff * Because new ksegrp hasn't thread, 711111028Sjeff * create an initial upcall thread to own it. 712111028Sjeff */ 713111028Sjeff thread_schedule_upcall(td, newku); 714105854Sjulian } else { 715105854Sjulian /* 716111028Sjeff * If current thread hasn't an upcall structure, 717111028Sjeff * just assign the upcall to it. 718105854Sjulian */ 719111028Sjeff if (td->td_upcall == NULL) { 720111028Sjeff newku->ku_owner = td; 721111028Sjeff td->td_upcall = newku; 722111028Sjeff } else { 723111028Sjeff /* 724111028Sjeff * Create a new upcall thread to own it. 725111028Sjeff */ 726111028Sjeff thread_schedule_upcall(td, newku); 727111028Sjeff } 728105854Sjulian } 729111028Sjeff mtx_unlock_spin(&sched_lock); 730105854Sjulian return (0); 731105854Sjulian} 732105854Sjulian 733105854Sjulian/* 734103410Smini * Fill a ucontext_t with a thread's context information. 735103410Smini * 736103410Smini * This is an analogue to getcontext(3). 737103410Smini */ 738103410Sminivoid 739103410Sminithread_getcontext(struct thread *td, ucontext_t *uc) 740103410Smini{ 741103410Smini 742103464Speter/* 743103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but 744103464Speter * is used in MI code. 745103464Speter */ 746103463Speter#ifdef __i386__ 747103410Smini get_mcontext(td, &uc->uc_mcontext); 748103463Speter#endif 749112888Sjeff uc->uc_sigmask = td->td_sigmask; 750103410Smini} 751103410Smini 752103410Smini/* 753103410Smini * Set a thread's context from a ucontext_t. 754103410Smini * 755103410Smini * This is an analogue to setcontext(3). 756103410Smini */ 757103410Sminiint 758103410Sminithread_setcontext(struct thread *td, ucontext_t *uc) 759103410Smini{ 760103410Smini int ret; 761103410Smini 762103464Speter/* 763103464Speter * XXX this is declared in a MD include file, i386/include/ucontext.h but 764103464Speter * is used in MI code. 765103464Speter */ 766103463Speter#ifdef __i386__ 767103410Smini ret = set_mcontext(td, &uc->uc_mcontext); 768103463Speter#else 769103463Speter ret = ENOSYS; 770103463Speter#endif 771103410Smini if (ret == 0) { 772103410Smini SIG_CANTMASK(uc->uc_sigmask); 773103410Smini PROC_LOCK(td->td_proc); 774112888Sjeff td->td_sigmask = uc->uc_sigmask; 775103410Smini PROC_UNLOCK(td->td_proc); 776103410Smini } 777103410Smini return (ret); 778103410Smini} 779103410Smini 780103410Smini/* 78199026Sjulian * Initialize global thread allocation resources. 78299026Sjulian */ 78399026Sjulianvoid 78499026Sjulianthreadinit(void) 78599026Sjulian{ 78699026Sjulian 787104437Speter#ifndef __ia64__ 788107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 78999026Sjulian thread_ctor, thread_dtor, thread_init, thread_fini, 79099026Sjulian UMA_ALIGN_CACHE, 0); 791104437Speter#else 792104437Speter /* 793104437Speter * XXX the ia64 kstack allocator is really lame and is at the mercy 794104437Speter * of contigmallloc(). This hackery is to pre-construct a whole 795104437Speter * pile of thread structures with associated kernel stacks early 796104437Speter * in the system startup while contigmalloc() still works. Once we 797104437Speter * have them, keep them. Sigh. 798104437Speter */ 799107126Sjeff thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 800104437Speter thread_ctor, thread_dtor, thread_init, thread_fini, 801104437Speter UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 802104437Speter uma_prealloc(thread_zone, 512); /* XXX arbitary */ 803104437Speter#endif 804107126Sjeff ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 805107126Sjeff NULL, NULL, ksegrp_init, NULL, 806103367Sjulian UMA_ALIGN_CACHE, 0); 807107126Sjeff kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 808107126Sjeff NULL, NULL, kse_init, NULL, 809103367Sjulian UMA_ALIGN_CACHE, 0); 810111028Sjeff upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 811111028Sjeff NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 81299026Sjulian} 81399026Sjulian 81499026Sjulian/* 815103002Sjulian * Stash an embarasingly extra thread into the zombie thread queue. 81699026Sjulian */ 81799026Sjulianvoid 81899026Sjulianthread_stash(struct thread *td) 81999026Sjulian{ 820111028Sjeff mtx_lock_spin(&kse_zombie_lock); 82199026Sjulian TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 822111028Sjeff mtx_unlock_spin(&kse_zombie_lock); 82399026Sjulian} 82499026Sjulian 825103410Smini/* 826105854Sjulian * Stash an embarasingly extra kse into the zombie kse queue. 827105854Sjulian */ 828105854Sjulianvoid 829105854Sjuliankse_stash(struct kse *ke) 830105854Sjulian{ 831111028Sjeff mtx_lock_spin(&kse_zombie_lock); 832105854Sjulian TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 833111028Sjeff mtx_unlock_spin(&kse_zombie_lock); 834105854Sjulian} 835105854Sjulian 836105854Sjulian/* 837111028Sjeff * Stash an embarasingly extra upcall into the zombie upcall queue. 838111028Sjeff */ 839111028Sjeff 840111028Sjeffvoid 841111028Sjeffupcall_stash(struct kse_upcall *ku) 842111028Sjeff{ 843111028Sjeff mtx_lock_spin(&kse_zombie_lock); 844111028Sjeff TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 845111028Sjeff mtx_unlock_spin(&kse_zombie_lock); 846111028Sjeff} 847111028Sjeff 848111028Sjeff/* 849105854Sjulian * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 850105854Sjulian */ 851105854Sjulianvoid 852105854Sjulianksegrp_stash(struct ksegrp *kg) 853105854Sjulian{ 854111028Sjeff mtx_lock_spin(&kse_zombie_lock); 855105854Sjulian TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 856111028Sjeff mtx_unlock_spin(&kse_zombie_lock); 857105854Sjulian} 858105854Sjulian 859105854Sjulian/* 860111028Sjeff * Reap zombie kse resource. 86199026Sjulian */ 86299026Sjulianvoid 86399026Sjulianthread_reap(void) 86499026Sjulian{ 865105854Sjulian struct thread *td_first, *td_next; 866105854Sjulian struct kse *ke_first, *ke_next; 867105854Sjulian struct ksegrp *kg_first, * kg_next; 868111028Sjeff struct kse_upcall *ku_first, *ku_next; 86999026Sjulian 87099026Sjulian /* 871111028Sjeff * Don't even bother to lock if none at this instant, 872111028Sjeff * we really don't care about the next instant.. 87399026Sjulian */ 874105854Sjulian if ((!TAILQ_EMPTY(&zombie_threads)) 875105854Sjulian || (!TAILQ_EMPTY(&zombie_kses)) 876111028Sjeff || (!TAILQ_EMPTY(&zombie_ksegrps)) 877111028Sjeff || (!TAILQ_EMPTY(&zombie_upcalls))) { 878111028Sjeff mtx_lock_spin(&kse_zombie_lock); 879105854Sjulian td_first = TAILQ_FIRST(&zombie_threads); 880105854Sjulian ke_first = TAILQ_FIRST(&zombie_kses); 881105854Sjulian kg_first = TAILQ_FIRST(&zombie_ksegrps); 882111028Sjeff ku_first = TAILQ_FIRST(&zombie_upcalls); 883105854Sjulian if (td_first) 884105854Sjulian TAILQ_INIT(&zombie_threads); 885105854Sjulian if (ke_first) 886105854Sjulian TAILQ_INIT(&zombie_kses); 887105854Sjulian if (kg_first) 888105854Sjulian TAILQ_INIT(&zombie_ksegrps); 889111028Sjeff if (ku_first) 890111028Sjeff TAILQ_INIT(&zombie_upcalls); 891111028Sjeff mtx_unlock_spin(&kse_zombie_lock); 892105854Sjulian while (td_first) { 893105854Sjulian td_next = TAILQ_NEXT(td_first, td_runq); 894111028Sjeff if (td_first->td_ucred) 895111028Sjeff crfree(td_first->td_ucred); 896105854Sjulian thread_free(td_first); 897105854Sjulian td_first = td_next; 89899026Sjulian } 899105854Sjulian while (ke_first) { 900105854Sjulian ke_next = TAILQ_NEXT(ke_first, ke_procq); 901105854Sjulian kse_free(ke_first); 902105854Sjulian ke_first = ke_next; 903105854Sjulian } 904105854Sjulian while (kg_first) { 905105854Sjulian kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 906105854Sjulian ksegrp_free(kg_first); 907105854Sjulian kg_first = kg_next; 908105854Sjulian } 909111028Sjeff while (ku_first) { 910111028Sjeff ku_next = TAILQ_NEXT(ku_first, ku_link); 911111028Sjeff upcall_free(ku_first); 912111028Sjeff ku_first = ku_next; 913111028Sjeff } 91499026Sjulian } 91599026Sjulian} 91699026Sjulian 91799026Sjulian/* 918103367Sjulian * Allocate a ksegrp. 919103367Sjulian */ 920103367Sjulianstruct ksegrp * 921103367Sjulianksegrp_alloc(void) 922103367Sjulian{ 923111119Simp return (uma_zalloc(ksegrp_zone, M_WAITOK)); 924103367Sjulian} 925103367Sjulian 926103367Sjulian/* 927103367Sjulian * Allocate a kse. 928103367Sjulian */ 929103367Sjulianstruct kse * 930103367Sjuliankse_alloc(void) 931103367Sjulian{ 932111119Simp return (uma_zalloc(kse_zone, M_WAITOK)); 933103367Sjulian} 934103367Sjulian 935103367Sjulian/* 93699026Sjulian * Allocate a thread. 93799026Sjulian */ 93899026Sjulianstruct thread * 93999026Sjulianthread_alloc(void) 94099026Sjulian{ 94199026Sjulian thread_reap(); /* check if any zombies to get */ 942111119Simp return (uma_zalloc(thread_zone, M_WAITOK)); 94399026Sjulian} 94499026Sjulian 94599026Sjulian/* 946103367Sjulian * Deallocate a ksegrp. 947103367Sjulian */ 948103367Sjulianvoid 949103367Sjulianksegrp_free(struct ksegrp *td) 950103367Sjulian{ 951103367Sjulian uma_zfree(ksegrp_zone, td); 952103367Sjulian} 953103367Sjulian 954103367Sjulian/* 955103367Sjulian * Deallocate a kse. 956103367Sjulian */ 957103367Sjulianvoid 958103367Sjuliankse_free(struct kse *td) 959103367Sjulian{ 960103367Sjulian uma_zfree(kse_zone, td); 961103367Sjulian} 962103367Sjulian 963103367Sjulian/* 96499026Sjulian * Deallocate a thread. 96599026Sjulian */ 96699026Sjulianvoid 96799026Sjulianthread_free(struct thread *td) 96899026Sjulian{ 969107719Sjulian 970107719Sjulian cpu_thread_clean(td); 97199026Sjulian uma_zfree(thread_zone, td); 97299026Sjulian} 97399026Sjulian 97499026Sjulian/* 97599026Sjulian * Store the thread context in the UTS's mailbox. 976104031Sjulian * then add the mailbox at the head of a list we are building in user space. 977104031Sjulian * The list is anchored in the ksegrp structure. 97899026Sjulian */ 97999026Sjulianint 98099026Sjulianthread_export_context(struct thread *td) 98199026Sjulian{ 982104503Sjmallett struct proc *p; 983104031Sjulian struct ksegrp *kg; 984104031Sjulian uintptr_t mbx; 985104031Sjulian void *addr; 986111028Sjeff int error,temp; 987103410Smini ucontext_t uc; 98899026Sjulian 989104503Sjmallett p = td->td_proc; 990104503Sjmallett kg = td->td_ksegrp; 991104503Sjmallett 992104031Sjulian /* Export the user/machine context. */ 993111028Sjeff addr = (void *)(&td->td_mailbox->tm_context); 994104031Sjulian error = copyin(addr, &uc, sizeof(ucontext_t)); 995108338Sjulian if (error) 996108338Sjulian goto bad; 997104031Sjulian 998108338Sjulian thread_getcontext(td, &uc); 999108338Sjulian error = copyout(&uc, addr, sizeof(ucontext_t)); 1000108338Sjulian if (error) 1001108338Sjulian goto bad; 1002108338Sjulian 1003111028Sjeff /* Exports clock ticks in kernel mode */ 1004111028Sjeff addr = (caddr_t)(&td->td_mailbox->tm_sticks); 1005111028Sjeff temp = fuword(addr) + td->td_usticks; 1006111028Sjeff if (suword(addr, temp)) 1007111028Sjeff goto bad; 1008111028Sjeff 1009111028Sjeff /* Get address in latest mbox of list pointer */ 1010104031Sjulian addr = (void *)(&td->td_mailbox->tm_next); 1011104031Sjulian /* 1012104031Sjulian * Put the saved address of the previous first 1013104031Sjulian * entry into this one 1014104031Sjulian */ 1015104031Sjulian for (;;) { 1016104031Sjulian mbx = (uintptr_t)kg->kg_completed; 1017104031Sjulian if (suword(addr, mbx)) { 1018108338Sjulian error = EFAULT; 1019107034Sdavidxu goto bad; 1020104031Sjulian } 1021104126Sjulian PROC_LOCK(p); 1022104031Sjulian if (mbx == (uintptr_t)kg->kg_completed) { 1023104031Sjulian kg->kg_completed = td->td_mailbox; 1024111028Sjeff /* 1025111028Sjeff * The thread context may be taken away by 1026111028Sjeff * other upcall threads when we unlock 1027111028Sjeff * process lock. it's no longer valid to 1028111028Sjeff * use it again in any other places. 1029111028Sjeff */ 1030111028Sjeff td->td_mailbox = NULL; 1031104126Sjulian PROC_UNLOCK(p); 1032104031Sjulian break; 1033104031Sjulian } 1034104126Sjulian PROC_UNLOCK(p); 1035104031Sjulian } 1036111028Sjeff td->td_usticks = 0; 1037104031Sjulian return (0); 1038107034Sdavidxu 1039107034Sdavidxubad: 1040107034Sdavidxu PROC_LOCK(p); 1041107034Sdavidxu psignal(p, SIGSEGV); 1042107034Sdavidxu PROC_UNLOCK(p); 1043111028Sjeff /* The mailbox is bad, don't use it */ 1044111028Sjeff td->td_mailbox = NULL; 1045111028Sjeff td->td_usticks = 0; 1046108338Sjulian return (error); 1047104031Sjulian} 104899026Sjulian 1049104031Sjulian/* 1050104031Sjulian * Take the list of completed mailboxes for this KSEGRP and put them on this 1051111028Sjeff * upcall's mailbox as it's the next one going up. 1052104031Sjulian */ 1053104031Sjulianstatic int 1054111028Sjeffthread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1055104031Sjulian{ 1056104126Sjulian struct proc *p = kg->kg_proc; 1057104031Sjulian void *addr; 1058104031Sjulian uintptr_t mbx; 1059104031Sjulian 1060111028Sjeff addr = (void *)(&ku->ku_mailbox->km_completed); 1061104031Sjulian for (;;) { 1062104031Sjulian mbx = (uintptr_t)kg->kg_completed; 1063104031Sjulian if (suword(addr, mbx)) { 1064104126Sjulian PROC_LOCK(p); 1065104126Sjulian psignal(p, SIGSEGV); 1066104126Sjulian PROC_UNLOCK(p); 1067104031Sjulian return (EFAULT); 1068104031Sjulian } 1069104126Sjulian PROC_LOCK(p); 1070104031Sjulian if (mbx == (uintptr_t)kg->kg_completed) { 1071104031Sjulian kg->kg_completed = NULL; 1072104126Sjulian PROC_UNLOCK(p); 1073104031Sjulian break; 1074104031Sjulian } 1075104126Sjulian PROC_UNLOCK(p); 107699026Sjulian } 1077104031Sjulian return (0); 107899026Sjulian} 107999026Sjulian 108099026Sjulian/* 1081107034Sdavidxu * This function should be called at statclock interrupt time 1082107034Sdavidxu */ 1083107034Sdavidxuint 1084111028Sjeffthread_statclock(int user) 1085107034Sdavidxu{ 1086107034Sdavidxu struct thread *td = curthread; 1087107034Sdavidxu 1088111028Sjeff if (td->td_ksegrp->kg_numupcalls == 0) 1089111028Sjeff return (-1); 1090107034Sdavidxu if (user) { 1091107034Sdavidxu /* Current always do via ast() */ 1092111976Sdavidxu mtx_lock_spin(&sched_lock); 1093111032Sjulian td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1094111976Sdavidxu mtx_unlock_spin(&sched_lock); 1095111028Sjeff td->td_uuticks++; 1096107034Sdavidxu } else { 1097107034Sdavidxu if (td->td_mailbox != NULL) 1098111028Sjeff td->td_usticks++; 1099111028Sjeff else { 1100111028Sjeff /* XXXKSE 1101111028Sjeff * We will call thread_user_enter() for every 1102111028Sjeff * kernel entry in future, so if the thread mailbox 1103111028Sjeff * is NULL, it must be a UTS kernel, don't account 1104111028Sjeff * clock ticks for it. 1105111028Sjeff */ 1106111028Sjeff } 1107107034Sdavidxu } 1108111028Sjeff return (0); 1109107034Sdavidxu} 1110107034Sdavidxu 1111111028Sjeff/* 1112111515Sdavidxu * Export state clock ticks for userland 1113111028Sjeff */ 1114107034Sdavidxustatic int 1115111515Sdavidxuthread_update_usr_ticks(struct thread *td, int user) 1116107034Sdavidxu{ 1117107034Sdavidxu struct proc *p = td->td_proc; 1118107034Sdavidxu struct kse_thr_mailbox *tmbx; 1119111028Sjeff struct kse_upcall *ku; 1120112397Sdavidxu struct ksegrp *kg; 1121107034Sdavidxu caddr_t addr; 1122111028Sjeff uint uticks; 1123107034Sdavidxu 1124111028Sjeff if ((ku = td->td_upcall) == NULL) 1125111028Sjeff return (-1); 1126111028Sjeff 1127111028Sjeff tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1128107034Sdavidxu if ((tmbx == NULL) || (tmbx == (void *)-1)) 1129111028Sjeff return (-1); 1130111515Sdavidxu if (user) { 1131111515Sdavidxu uticks = td->td_uuticks; 1132111515Sdavidxu td->td_uuticks = 0; 1133111515Sdavidxu addr = (caddr_t)&tmbx->tm_uticks; 1134111515Sdavidxu } else { 1135111515Sdavidxu uticks = td->td_usticks; 1136111515Sdavidxu td->td_usticks = 0; 1137111515Sdavidxu addr = (caddr_t)&tmbx->tm_sticks; 1138111515Sdavidxu } 1139107034Sdavidxu if (uticks) { 1140111515Sdavidxu if (suword(addr, uticks+fuword(addr))) { 1141111028Sjeff PROC_LOCK(p); 1142111028Sjeff psignal(p, SIGSEGV); 1143111028Sjeff PROC_UNLOCK(p); 1144111028Sjeff return (-2); 1145111028Sjeff } 1146107034Sdavidxu } 1147112397Sdavidxu kg = td->td_ksegrp; 1148112397Sdavidxu if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1149112397Sdavidxu mtx_lock_spin(&sched_lock); 1150112397Sdavidxu td->td_upcall->ku_flags |= KUF_DOUPCALL; 1151112397Sdavidxu mtx_unlock_spin(&sched_lock); 1152112397Sdavidxu } 1153111028Sjeff return (0); 1154111028Sjeff} 1155111028Sjeff 1156111028Sjeff/* 115799026Sjulian * Discard the current thread and exit from its context. 115899026Sjulian * 115999026Sjulian * Because we can't free a thread while we're operating under its context, 1160107719Sjulian * push the current thread into our CPU's deadthread holder. This means 1161107719Sjulian * we needn't worry about someone else grabbing our context before we 1162107719Sjulian * do a cpu_throw(). 116399026Sjulian */ 116499026Sjulianvoid 116599026Sjulianthread_exit(void) 116699026Sjulian{ 116799026Sjulian struct thread *td; 116899026Sjulian struct kse *ke; 116999026Sjulian struct proc *p; 117099026Sjulian struct ksegrp *kg; 117199026Sjulian 117299026Sjulian td = curthread; 117399026Sjulian kg = td->td_ksegrp; 117499026Sjulian p = td->td_proc; 117599026Sjulian ke = td->td_kse; 117699026Sjulian 117799026Sjulian mtx_assert(&sched_lock, MA_OWNED); 1178102581Sjulian KASSERT(p != NULL, ("thread exiting without a process")); 1179102581Sjulian KASSERT(ke != NULL, ("thread exiting without a kse")); 1180102581Sjulian KASSERT(kg != NULL, ("thread exiting without a kse group")); 118199026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 118299026Sjulian CTR1(KTR_PROC, "thread_exit: thread %p", td); 118399026Sjulian KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 118499026Sjulian 1185104695Sjulian if (td->td_standin != NULL) { 1186104695Sjulian thread_stash(td->td_standin); 1187104695Sjulian td->td_standin = NULL; 1188104695Sjulian } 1189104695Sjulian 119099026Sjulian cpu_thread_exit(td); /* XXXSMP */ 119199026Sjulian 1192102581Sjulian /* 1193103002Sjulian * The last thread is left attached to the process 1194103002Sjulian * So that the whole bundle gets recycled. Skip 1195103002Sjulian * all this stuff. 1196102581Sjulian */ 1197103002Sjulian if (p->p_numthreads > 1) { 1198105854Sjulian /* 1199105854Sjulian * Unlink this thread from its proc and the kseg. 1200105854Sjulian * In keeping with the other structs we probably should 1201105854Sjulian * have a thread_unlink() that does some of this but it 1202105854Sjulian * would only be called from here (I think) so it would 1203105854Sjulian * be a waste. (might be useful for proc_fini() as well.) 1204105854Sjulian */ 1205103002Sjulian TAILQ_REMOVE(&p->p_threads, td, td_plist); 1206103002Sjulian p->p_numthreads--; 1207103002Sjulian TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1208103002Sjulian kg->kg_numthreads--; 1209111115Sdavidxu if (p->p_maxthrwaits) 1210111115Sdavidxu wakeup(&p->p_numthreads); 1211103002Sjulian /* 1212103002Sjulian * The test below is NOT true if we are the 1213103002Sjulian * sole exiting thread. P_STOPPED_SNGL is unset 1214103002Sjulian * in exit1() after it is the only survivor. 1215103002Sjulian */ 1216103002Sjulian if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1217103002Sjulian if (p->p_numthreads == p->p_suspcount) { 1218103216Sjulian thread_unsuspend_one(p->p_singlethread); 1219103002Sjulian } 122099026Sjulian } 1221104695Sjulian 1222111028Sjeff /* 1223111028Sjeff * Because each upcall structure has an owner thread, 1224111028Sjeff * owner thread exits only when process is in exiting 1225111028Sjeff * state, so upcall to userland is no longer needed, 1226111028Sjeff * deleting upcall structure is safe here. 1227111028Sjeff * So when all threads in a group is exited, all upcalls 1228111028Sjeff * in the group should be automatically freed. 1229111028Sjeff */ 1230111028Sjeff if (td->td_upcall) 1231111028Sjeff upcall_remove(td); 1232111028Sjeff 1233104695Sjulian ke->ke_state = KES_UNQUEUED; 1234111028Sjeff ke->ke_thread = NULL; 1235104695Sjulian /* 1236108338Sjulian * Decide what to do with the KSE attached to this thread. 1237104695Sjulian */ 1238111028Sjeff if (ke->ke_flags & KEF_EXIT) 1239105854Sjulian kse_unlink(ke); 1240111028Sjeff else 1241105854Sjulian kse_reassign(ke); 1242105854Sjulian PROC_UNLOCK(p); 1243111028Sjeff td->td_kse = NULL; 1244105854Sjulian td->td_state = TDS_INACTIVE; 1245105854Sjulian td->td_proc = NULL; 1246105854Sjulian td->td_ksegrp = NULL; 1247105854Sjulian td->td_last_kse = NULL; 1248107719Sjulian PCPU_SET(deadthread, td); 1249103002Sjulian } else { 1250103002Sjulian PROC_UNLOCK(p); 125199026Sjulian } 1252112888Sjeff /* XXX Shouldn't cpu_throw() here. */ 1253112993Speter mtx_assert(&sched_lock, MA_OWNED); 1254112993Speter#if defined(__i386__) || defined(__sparc64__) 1255112993Speter cpu_throw(td, choosethread()); 1256112993Speter#else 125799026Sjulian cpu_throw(); 1258112993Speter#endif 1259112993Speter panic("I'm a teapot!"); 126099026Sjulian /* NOTREACHED */ 126199026Sjulian} 126299026Sjulian 1263107719Sjulian/* 1264107719Sjulian * Do any thread specific cleanups that may be needed in wait() 1265107719Sjulian * called with Giant held, proc and schedlock not held. 1266107719Sjulian */ 1267107719Sjulianvoid 1268107719Sjulianthread_wait(struct proc *p) 1269107719Sjulian{ 1270107719Sjulian struct thread *td; 1271107719Sjulian 1272107719Sjulian KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1273107719Sjulian KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1274107719Sjulian FOREACH_THREAD_IN_PROC(p, td) { 1275107719Sjulian if (td->td_standin != NULL) { 1276107719Sjulian thread_free(td->td_standin); 1277107719Sjulian td->td_standin = NULL; 1278107719Sjulian } 1279107719Sjulian cpu_thread_clean(td); 1280107719Sjulian } 1281107719Sjulian thread_reap(); /* check for zombie threads etc. */ 1282107719Sjulian} 1283107719Sjulian 128499026Sjulian/* 128599026Sjulian * Link a thread to a process. 1286103002Sjulian * set up anything that needs to be initialized for it to 1287103002Sjulian * be used by the process. 128899026Sjulian * 128999026Sjulian * Note that we do not link to the proc's ucred here. 129099026Sjulian * The thread is linked as if running but no KSE assigned. 129199026Sjulian */ 129299026Sjulianvoid 129399026Sjulianthread_link(struct thread *td, struct ksegrp *kg) 129499026Sjulian{ 129599026Sjulian struct proc *p; 129699026Sjulian 129799026Sjulian p = kg->kg_proc; 1298111028Sjeff td->td_state = TDS_INACTIVE; 1299111028Sjeff td->td_proc = p; 1300111028Sjeff td->td_ksegrp = kg; 1301111028Sjeff td->td_last_kse = NULL; 1302111028Sjeff td->td_flags = 0; 1303111028Sjeff td->td_kse = NULL; 130499026Sjulian 1305103002Sjulian LIST_INIT(&td->td_contested); 1306103002Sjulian callout_init(&td->td_slpcallout, 1); 130799026Sjulian TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 130899026Sjulian TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 130999026Sjulian p->p_numthreads++; 131099026Sjulian kg->kg_numthreads++; 131199026Sjulian} 131299026Sjulian 1313111028Sjeff/* 1314111028Sjeff * Purge a ksegrp resource. When a ksegrp is preparing to 1315111028Sjeff * exit, it calls this function. 1316111028Sjeff */ 1317105854Sjulianvoid 1318111028Sjeffkse_purge_group(struct thread *td) 1319111028Sjeff{ 1320111028Sjeff struct ksegrp *kg; 1321111028Sjeff struct kse *ke; 1322111028Sjeff 1323111028Sjeff kg = td->td_ksegrp; 1324111028Sjeff KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1325111028Sjeff while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1326111028Sjeff KASSERT(ke->ke_state == KES_IDLE, 1327111028Sjeff ("%s: wrong idle KSE state", __func__)); 1328111028Sjeff kse_unlink(ke); 1329111028Sjeff } 1330111028Sjeff KASSERT((kg->kg_kses == 1), 1331111028Sjeff ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1332111028Sjeff KASSERT((kg->kg_numupcalls == 0), 1333111028Sjeff ("%s: ksegrp still has %d upcall datas", 1334111028Sjeff __func__, kg->kg_numupcalls)); 1335111028Sjeff} 1336111028Sjeff 1337111028Sjeff/* 1338111028Sjeff * Purge a process's KSE resource. When a process is preparing to 1339111028Sjeff * exit, it calls kse_purge to release any extra KSE resources in 1340111028Sjeff * the process. 1341111028Sjeff */ 1342111028Sjeffvoid 1343105854Sjuliankse_purge(struct proc *p, struct thread *td) 1344105854Sjulian{ 1345105854Sjulian struct ksegrp *kg; 1346111028Sjeff struct kse *ke; 1347105854Sjulian 1348105854Sjulian KASSERT(p->p_numthreads == 1, ("bad thread number")); 1349105854Sjulian mtx_lock_spin(&sched_lock); 1350105854Sjulian while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1351105854Sjulian TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1352105854Sjulian p->p_numksegrps--; 1353111028Sjeff /* 1354111028Sjeff * There is no ownership for KSE, after all threads 1355111028Sjeff * in the group exited, it is possible that some KSEs 1356111028Sjeff * were left in idle queue, gc them now. 1357111028Sjeff */ 1358111028Sjeff while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1359111028Sjeff KASSERT(ke->ke_state == KES_IDLE, 1360111028Sjeff ("%s: wrong idle KSE state", __func__)); 1361111028Sjeff TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1362111028Sjeff kg->kg_idle_kses--; 1363111028Sjeff TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1364111028Sjeff kg->kg_kses--; 1365111028Sjeff kse_stash(ke); 1366111028Sjeff } 1367105854Sjulian KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1368111028Sjeff ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1369111028Sjeff ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1370111028Sjeff KASSERT((kg->kg_numupcalls == 0), 1371111028Sjeff ("%s: ksegrp still has %d upcall datas", 1372111028Sjeff __func__, kg->kg_numupcalls)); 1373111028Sjeff 1374111028Sjeff if (kg != td->td_ksegrp) 1375105854Sjulian ksegrp_stash(kg); 1376105854Sjulian } 1377105854Sjulian TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1378105854Sjulian p->p_numksegrps++; 1379105854Sjulian mtx_unlock_spin(&sched_lock); 1380105854Sjulian} 1381105854Sjulian 1382111028Sjeff/* 1383111028Sjeff * This function is intended to be used to initialize a spare thread 1384111028Sjeff * for upcall. Initialize thread's large data area outside sched_lock 1385111028Sjeff * for thread_schedule_upcall(). 1386111028Sjeff */ 1387111028Sjeffvoid 1388111028Sjeffthread_alloc_spare(struct thread *td, struct thread *spare) 1389111028Sjeff{ 1390111028Sjeff if (td->td_standin) 1391111028Sjeff return; 1392111028Sjeff if (spare == NULL) 1393111028Sjeff spare = thread_alloc(); 1394111028Sjeff td->td_standin = spare; 1395111028Sjeff bzero(&spare->td_startzero, 1396111028Sjeff (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1397111028Sjeff spare->td_proc = td->td_proc; 1398111028Sjeff /* Setup PCB and fork address */ 1399111028Sjeff cpu_set_upcall(spare, td->td_pcb); 1400111028Sjeff /* 1401111028Sjeff * XXXKSE do we really need this? (default values for the 1402111028Sjeff * frame). 1403111028Sjeff */ 1404111028Sjeff bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); 1405111028Sjeff spare->td_ucred = crhold(td->td_ucred); 1406111028Sjeff} 1407105854Sjulian 140899026Sjulian/* 1409103410Smini * Create a thread and schedule it for upcall on the KSE given. 1410108338Sjulian * Use our thread's standin so that we don't have to allocate one. 141199026Sjulian */ 141299026Sjulianstruct thread * 1413111028Sjeffthread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 141499026Sjulian{ 141599026Sjulian struct thread *td2; 141699026Sjulian 141799026Sjulian mtx_assert(&sched_lock, MA_OWNED); 1418104695Sjulian 1419104695Sjulian /* 1420111028Sjeff * Schedule an upcall thread on specified kse_upcall, 1421111028Sjeff * the kse_upcall must be free. 1422111028Sjeff * td must have a spare thread. 1423104695Sjulian */ 1424111028Sjeff KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1425104695Sjulian if ((td2 = td->td_standin) != NULL) { 1426104695Sjulian td->td_standin = NULL; 142799026Sjulian } else { 1428111028Sjeff panic("no reserve thread when scheduling an upcall"); 1429106182Sdavidxu return (NULL); 143099026Sjulian } 143199026Sjulian CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1432104695Sjulian td2, td->td_proc->p_pid, td->td_proc->p_comm); 1433103002Sjulian bcopy(&td->td_startcopy, &td2->td_startcopy, 1434103002Sjulian (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1435111028Sjeff thread_link(td2, ku->ku_ksegrp); 1436111028Sjeff /* Let the new thread become owner of the upcall */ 1437111028Sjeff ku->ku_owner = td2; 1438111028Sjeff td2->td_upcall = ku; 1439111028Sjeff td2->td_flags = TDF_UPCALLING; 1440112888Sjeff#if 0 /* XXX This shouldn't be necessary */ 1441111041Sdavidxu if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1442111041Sdavidxu td2->td_flags |= TDF_ASTPENDING; 1443112888Sjeff#endif 1444111028Sjeff td2->td_kse = NULL; 1445111028Sjeff td2->td_state = TDS_CAN_RUN; 1446104695Sjulian td2->td_inhibitors = 0; 1447111028Sjeff setrunqueue(td2); 1448104695Sjulian return (td2); /* bogus.. should be a void function */ 144999026Sjulian} 145099026Sjulian 1451111033Sjeffvoid 1452111033Sjeffthread_signal_add(struct thread *td, int sig) 1453103410Smini{ 1454111033Sjeff struct kse_upcall *ku; 1455111033Sjeff struct proc *p; 1456103410Smini sigset_t ss; 1457103410Smini int error; 1458103410Smini 1459111033Sjeff PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1460111033Sjeff td = curthread; 1461111033Sjeff ku = td->td_upcall; 1462111033Sjeff p = td->td_proc; 1463111033Sjeff 1464103410Smini PROC_UNLOCK(p); 1465111033Sjeff error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1466103410Smini if (error) 1467111033Sjeff goto error; 1468111033Sjeff 1469103410Smini SIGADDSET(ss, sig); 1470111033Sjeff 1471111033Sjeff error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1472111033Sjeff if (error) 1473111033Sjeff goto error; 1474111033Sjeff 1475103410Smini PROC_LOCK(p); 1476111033Sjeff return; 1477111033Sjefferror: 1478111033Sjeff PROC_LOCK(p); 1479111033Sjeff sigexit(td, SIGILL); 1480111033Sjeff} 1481111033Sjeff 1482111033Sjeff 1483111033Sjeff/* 1484111033Sjeff * Schedule an upcall to notify a KSE process recieved signals. 1485111033Sjeff * 1486111033Sjeff */ 1487111033Sjeffvoid 1488111033Sjeffthread_signal_upcall(struct thread *td) 1489111033Sjeff{ 1490103410Smini mtx_lock_spin(&sched_lock); 1491111033Sjeff td->td_flags |= TDF_UPCALLING; 1492103410Smini mtx_unlock_spin(&sched_lock); 1493111033Sjeff 1494111033Sjeff return; 1495103410Smini} 1496103410Smini 1497112397Sdavidxuvoid 1498112397Sdavidxuthread_switchout(struct thread *td) 1499112397Sdavidxu{ 1500112397Sdavidxu struct kse_upcall *ku; 1501112397Sdavidxu 1502112397Sdavidxu mtx_assert(&sched_lock, MA_OWNED); 1503112397Sdavidxu 1504112397Sdavidxu /* 1505112397Sdavidxu * If the outgoing thread is in threaded group and has never 1506112397Sdavidxu * scheduled an upcall, decide whether this is a short 1507112397Sdavidxu * or long term event and thus whether or not to schedule 1508112397Sdavidxu * an upcall. 1509112397Sdavidxu * If it is a short term event, just suspend it in 1510112397Sdavidxu * a way that takes its KSE with it. 1511112397Sdavidxu * Select the events for which we want to schedule upcalls. 1512112397Sdavidxu * For now it's just sleep. 1513112397Sdavidxu * XXXKSE eventually almost any inhibition could do. 1514112397Sdavidxu */ 1515112397Sdavidxu if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1516112397Sdavidxu /* 1517112397Sdavidxu * Release ownership of upcall, and schedule an upcall 1518112397Sdavidxu * thread, this new upcall thread becomes the owner of 1519112397Sdavidxu * the upcall structure. 1520112397Sdavidxu */ 1521112397Sdavidxu ku = td->td_upcall; 1522112397Sdavidxu ku->ku_owner = NULL; 1523112397Sdavidxu td->td_upcall = NULL; 1524112397Sdavidxu td->td_flags &= ~TDF_CAN_UNBIND; 1525112397Sdavidxu thread_schedule_upcall(td, ku); 1526112397Sdavidxu } 1527112397Sdavidxu} 1528112397Sdavidxu 1529103410Smini/* 1530111028Sjeff * Setup done on the thread when it enters the kernel. 1531105900Sjulian * XXXKSE Presently only for syscalls but eventually all kernel entries. 1532105900Sjulian */ 1533105900Sjulianvoid 1534105900Sjulianthread_user_enter(struct proc *p, struct thread *td) 1535105900Sjulian{ 1536111028Sjeff struct ksegrp *kg; 1537111028Sjeff struct kse_upcall *ku; 1538105900Sjulian 1539111028Sjeff kg = td->td_ksegrp; 1540105900Sjulian /* 1541105900Sjulian * First check that we shouldn't just abort. 1542105900Sjulian * But check if we are the single thread first! 1543105900Sjulian * XXX p_singlethread not locked, but should be safe. 1544105900Sjulian */ 1545111028Sjeff if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1546105900Sjulian PROC_LOCK(p); 1547105900Sjulian mtx_lock_spin(&sched_lock); 1548112071Sdavidxu thread_stopped(p); 1549105900Sjulian thread_exit(); 1550105900Sjulian /* NOTREACHED */ 1551105900Sjulian } 1552105900Sjulian 1553105900Sjulian /* 1554105900Sjulian * If we are doing a syscall in a KSE environment, 1555105900Sjulian * note where our mailbox is. There is always the 1556108338Sjulian * possibility that we could do this lazily (in kse_reassign()), 1557105900Sjulian * but for now do it every time. 1558105900Sjulian */ 1559111028Sjeff kg = td->td_ksegrp; 1560111028Sjeff if (kg->kg_numupcalls) { 1561111028Sjeff ku = td->td_upcall; 1562111028Sjeff KASSERT(ku, ("%s: no upcall owned", __func__)); 1563111028Sjeff KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1564105900Sjulian td->td_mailbox = 1565111028Sjeff (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1566105900Sjulian if ((td->td_mailbox == NULL) || 1567107034Sdavidxu (td->td_mailbox == (void *)-1)) { 1568111028Sjeff /* Don't schedule upcall when blocked */ 1569111028Sjeff td->td_mailbox = NULL; 1570107034Sdavidxu mtx_lock_spin(&sched_lock); 1571111028Sjeff td->td_flags &= ~TDF_CAN_UNBIND; 1572107034Sdavidxu mtx_unlock_spin(&sched_lock); 1573105900Sjulian } else { 1574111115Sdavidxu if (td->td_standin == NULL) 1575111115Sdavidxu thread_alloc_spare(td, NULL); 1576111115Sdavidxu mtx_lock_spin(&sched_lock); 1577111115Sdavidxu td->td_flags |= TDF_CAN_UNBIND; 1578111115Sdavidxu mtx_unlock_spin(&sched_lock); 1579105900Sjulian } 1580105900Sjulian } 1581105900Sjulian} 1582105900Sjulian 1583105900Sjulian/* 1584103410Smini * The extra work we go through if we are a threaded process when we 1585103410Smini * return to userland. 1586103410Smini * 158799026Sjulian * If we are a KSE process and returning to user mode, check for 158899026Sjulian * extra work to do before we return (e.g. for more syscalls 158999026Sjulian * to complete first). If we were in a critical section, we should 159099026Sjulian * just return to let it finish. Same if we were in the UTS (in 1591103410Smini * which case the mailbox's context's busy indicator will be set). 1592103410Smini * The only traps we suport will have set the mailbox. 1593103410Smini * We will clear it here. 159499026Sjulian */ 159599026Sjulianint 1596103838Sjulianthread_userret(struct thread *td, struct trapframe *frame) 159799026Sjulian{ 1598111115Sdavidxu int error = 0, upcalls; 1599111028Sjeff struct kse_upcall *ku; 1600111115Sdavidxu struct ksegrp *kg, *kg2; 1601104695Sjulian struct proc *p; 1602107060Sdavidxu struct timespec ts; 160399026Sjulian 1604111028Sjeff p = td->td_proc; 1605110190Sjulian kg = td->td_ksegrp; 1606104695Sjulian 1607112888Sjeff 1608111028Sjeff /* Nothing to do with non-threaded group/process */ 1609111028Sjeff if (td->td_ksegrp->kg_numupcalls == 0) 1610111028Sjeff return (0); 1611108338Sjulian 1612103410Smini /* 1613111028Sjeff * Stat clock interrupt hit in userland, it 1614111028Sjeff * is returning from interrupt, charge thread's 1615111028Sjeff * userland time for UTS. 1616103410Smini */ 1617111028Sjeff if (td->td_flags & TDF_USTATCLOCK) { 1618111515Sdavidxu thread_update_usr_ticks(td, 1); 1619111028Sjeff mtx_lock_spin(&sched_lock); 1620111028Sjeff td->td_flags &= ~TDF_USTATCLOCK; 1621111028Sjeff mtx_unlock_spin(&sched_lock); 1622111515Sdavidxu if (kg->kg_completed || 1623111515Sdavidxu (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1624111515Sdavidxu thread_user_enter(p, td); 1625111028Sjeff } 1626108338Sjulian 1627111028Sjeff /* 1628111028Sjeff * Optimisation: 1629111028Sjeff * This thread has not started any upcall. 1630111028Sjeff * If there is no work to report other than ourself, 1631111028Sjeff * then it can return direct to userland. 1632111028Sjeff */ 1633108338Sjulian if (TD_CAN_UNBIND(td)) { 1634111028Sjeff mtx_lock_spin(&sched_lock); 1635111028Sjeff td->td_flags &= ~TDF_CAN_UNBIND; 1636112222Sdavidxu ku = td->td_upcall; 1637112888Sjeff if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1638112077Sdavidxu (kg->kg_completed == NULL) && 1639112397Sdavidxu (ku->ku_flags & KUF_DOUPCALL) == 0 && 1640112397Sdavidxu (kg->kg_upquantum && ticks >= kg->kg_nextupcall)) { 1641112888Sjeff mtx_unlock_spin(&sched_lock); 1642111515Sdavidxu thread_update_usr_ticks(td, 0); 1643112222Sdavidxu nanotime(&ts); 1644112397Sdavidxu error = copyout(&ts, 1645112222Sdavidxu (caddr_t)&ku->ku_mailbox->km_timeofday, 1646112222Sdavidxu sizeof(ts)); 1647112077Sdavidxu td->td_mailbox = 0; 1648112222Sdavidxu if (error) 1649112222Sdavidxu goto out; 1650112077Sdavidxu return (0); 1651108338Sjulian } 1652112888Sjeff mtx_unlock_spin(&sched_lock); 1653104695Sjulian error = thread_export_context(td); 1654104695Sjulian if (error) { 1655104695Sjulian /* 1656111028Sjeff * Failing to do the KSE operation just defaults 1657104695Sjulian * back to synchonous operation, so just return from 1658108338Sjulian * the syscall. 1659104695Sjulian */ 1660111028Sjeff return (0); 1661104695Sjulian } 1662104695Sjulian /* 1663111028Sjeff * There is something to report, and we own an upcall 1664111028Sjeff * strucuture, we can go to userland. 1665111028Sjeff * Turn ourself into an upcall thread. 1666104695Sjulian */ 1667111028Sjeff mtx_lock_spin(&sched_lock); 1668104695Sjulian td->td_flags |= TDF_UPCALLING; 1669108338Sjulian mtx_unlock_spin(&sched_lock); 1670111028Sjeff } else if (td->td_mailbox) { 1671108338Sjulian error = thread_export_context(td); 1672112071Sdavidxu /* possibly upcall with error? */ 1673112071Sdavidxu PROC_LOCK(p); 1674112071Sdavidxu /* 1675112071Sdavidxu * There are upcall threads waiting for 1676112071Sdavidxu * work to do, wake one of them up. 1677112071Sdavidxu * XXXKSE Maybe wake all of them up. 1678112071Sdavidxu */ 1679112071Sdavidxu if (!error && kg->kg_upsleeps) 1680112071Sdavidxu wakeup_one(&kg->kg_completed); 1681112071Sdavidxu mtx_lock_spin(&sched_lock); 1682112071Sdavidxu thread_stopped(p); 1683108338Sjulian thread_exit(); 1684111028Sjeff /* NOTREACHED */ 1685104695Sjulian } 1686104695Sjulian 1687111154Sdavidxu KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1688111154Sdavidxu 1689111154Sdavidxu if (p->p_numthreads > max_threads_per_proc) { 1690111154Sdavidxu max_threads_hits++; 1691111154Sdavidxu PROC_LOCK(p); 1692111154Sdavidxu while (p->p_numthreads > max_threads_per_proc) { 1693111154Sdavidxu if (P_SHOULDSTOP(p)) 1694111154Sdavidxu break; 1695111154Sdavidxu upcalls = 0; 1696111154Sdavidxu mtx_lock_spin(&sched_lock); 1697111154Sdavidxu FOREACH_KSEGRP_IN_PROC(p, kg2) { 1698111154Sdavidxu if (kg2->kg_numupcalls == 0) 1699111154Sdavidxu upcalls++; 1700111154Sdavidxu else 1701111154Sdavidxu upcalls += kg2->kg_numupcalls; 1702111154Sdavidxu } 1703111154Sdavidxu mtx_unlock_spin(&sched_lock); 1704111154Sdavidxu if (upcalls >= max_threads_per_proc) 1705111154Sdavidxu break; 1706111154Sdavidxu p->p_maxthrwaits++; 1707111154Sdavidxu msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1708111154Sdavidxu "maxthreads", NULL); 1709111154Sdavidxu p->p_maxthrwaits--; 1710111154Sdavidxu } 1711111154Sdavidxu PROC_UNLOCK(p); 1712111154Sdavidxu } 1713111154Sdavidxu 1714108338Sjulian if (td->td_flags & TDF_UPCALLING) { 1715112397Sdavidxu kg->kg_nextupcall = ticks+kg->kg_upquantum; 1716111028Sjeff ku = td->td_upcall; 1717108338Sjulian /* 1718108338Sjulian * There is no more work to do and we are going to ride 1719111028Sjeff * this thread up to userland as an upcall. 1720108338Sjulian * Do the last parts of the setup needed for the upcall. 1721108338Sjulian */ 1722108338Sjulian CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1723108338Sjulian td, td->td_proc->p_pid, td->td_proc->p_comm); 1724104695Sjulian 1725108338Sjulian /* 1726108338Sjulian * Set user context to the UTS. 1727108338Sjulian * Will use Giant in cpu_thread_clean() because it uses 1728108338Sjulian * kmem_free(kernel_map, ...) 1729108338Sjulian */ 1730111028Sjeff cpu_set_upcall_kse(td, ku); 1731111028Sjeff mtx_lock_spin(&sched_lock); 1732111028Sjeff td->td_flags &= ~TDF_UPCALLING; 1733111028Sjeff if (ku->ku_flags & KUF_DOUPCALL) 1734111028Sjeff ku->ku_flags &= ~KUF_DOUPCALL; 1735111028Sjeff mtx_unlock_spin(&sched_lock); 1736111028Sjeff 1737111028Sjeff /* 1738108338Sjulian * Unhook the list of completed threads. 1739108338Sjulian * anything that completes after this gets to 1740108338Sjulian * come in next time. 1741108338Sjulian * Put the list of completed thread mailboxes on 1742108338Sjulian * this KSE's mailbox. 1743108338Sjulian */ 1744111028Sjeff error = thread_link_mboxes(kg, ku); 1745108338Sjulian if (error) 1746111115Sdavidxu goto out; 174799026Sjulian 1748108338Sjulian /* 1749108338Sjulian * Set state and clear the thread mailbox pointer. 1750108338Sjulian * From now on we are just a bound outgoing process. 1751108338Sjulian * **Problem** userret is often called several times. 1752108338Sjulian * it would be nice if this all happenned only on the first 1753108338Sjulian * time through. (the scan for extra work etc.) 1754108338Sjulian */ 1755111028Sjeff error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); 1756108338Sjulian if (error) 1757111115Sdavidxu goto out; 1758111028Sjeff 1759111028Sjeff /* Export current system time */ 1760107060Sdavidxu nanotime(&ts); 1761111115Sdavidxu error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, 1762111115Sdavidxu sizeof(ts)); 1763111115Sdavidxu } 1764111115Sdavidxu 1765111115Sdavidxuout: 1766111115Sdavidxu if (error) { 1767111115Sdavidxu /* 1768111129Sdavidxu * Things are going to be so screwed we should just kill 1769111129Sdavidxu * the process. 1770111115Sdavidxu * how do we do that? 1771111115Sdavidxu */ 1772111115Sdavidxu PROC_LOCK(td->td_proc); 1773111115Sdavidxu psignal(td->td_proc, SIGSEGV); 1774111115Sdavidxu PROC_UNLOCK(td->td_proc); 1775111115Sdavidxu } else { 1776111115Sdavidxu /* 1777111115Sdavidxu * Optimisation: 1778111115Sdavidxu * Ensure that we have a spare thread available, 1779111115Sdavidxu * for when we re-enter the kernel. 1780111115Sdavidxu */ 1781111115Sdavidxu if (td->td_standin == NULL) 1782111115Sdavidxu thread_alloc_spare(td, NULL); 1783111115Sdavidxu } 1784111115Sdavidxu 1785111028Sjeff /* 1786111028Sjeff * Clear thread mailbox first, then clear system tick count. 1787111028Sjeff * The order is important because thread_statclock() use 1788111028Sjeff * mailbox pointer to see if it is an userland thread or 1789111028Sjeff * an UTS kernel thread. 1790111028Sjeff */ 1791108338Sjulian td->td_mailbox = NULL; 1792111028Sjeff td->td_usticks = 0; 1793104695Sjulian return (error); /* go sync */ 179499026Sjulian} 179599026Sjulian 179699026Sjulian/* 179799026Sjulian * Enforce single-threading. 179899026Sjulian * 179999026Sjulian * Returns 1 if the caller must abort (another thread is waiting to 180099026Sjulian * exit the process or similar). Process is locked! 180199026Sjulian * Returns 0 when you are successfully the only thread running. 180299026Sjulian * A process has successfully single threaded in the suspend mode when 180399026Sjulian * There are no threads in user mode. Threads in the kernel must be 180499026Sjulian * allowed to continue until they get to the user boundary. They may even 180599026Sjulian * copy out their return values and data before suspending. They may however be 180699026Sjulian * accellerated in reaching the user boundary as we will wake up 180799026Sjulian * any sleeping threads that are interruptable. (PCATCH). 180899026Sjulian */ 180999026Sjulianint 181099026Sjulianthread_single(int force_exit) 181199026Sjulian{ 181299026Sjulian struct thread *td; 181399026Sjulian struct thread *td2; 181499026Sjulian struct proc *p; 181599026Sjulian 181699026Sjulian td = curthread; 181799026Sjulian p = td->td_proc; 1818107719Sjulian mtx_assert(&Giant, MA_OWNED); 181999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 182099026Sjulian KASSERT((td != NULL), ("curthread is NULL")); 182199026Sjulian 1822112910Sjeff if ((p->p_flag & P_THREADED) == 0 && p->p_numthreads == 1) 182399026Sjulian return (0); 182499026Sjulian 1825100648Sjulian /* Is someone already single threading? */ 1826100648Sjulian if (p->p_singlethread) 182799026Sjulian return (1); 182899026Sjulian 1829108338Sjulian if (force_exit == SINGLE_EXIT) { 183099026Sjulian p->p_flag |= P_SINGLE_EXIT; 1831108338Sjulian } else 183299026Sjulian p->p_flag &= ~P_SINGLE_EXIT; 1833102950Sdavidxu p->p_flag |= P_STOPPED_SINGLE; 183499026Sjulian p->p_singlethread = td; 1835105911Sjulian /* XXXKSE Which lock protects the below values? */ 183699026Sjulian while ((p->p_numthreads - p->p_suspcount) != 1) { 1837103216Sjulian mtx_lock_spin(&sched_lock); 183899026Sjulian FOREACH_THREAD_IN_PROC(p, td2) { 183999026Sjulian if (td2 == td) 184099026Sjulian continue; 1841111115Sdavidxu td->td_flags |= TDF_ASTPENDING; 1842103216Sjulian if (TD_IS_INHIBITED(td2)) { 1843105911Sjulian if (force_exit == SINGLE_EXIT) { 1844105911Sjulian if (TD_IS_SUSPENDED(td2)) { 1845103216Sjulian thread_unsuspend_one(td2); 1846105911Sjulian } 1847105911Sjulian if (TD_ON_SLEEPQ(td2) && 1848105911Sjulian (td2->td_flags & TDF_SINTR)) { 1849105911Sjulian if (td2->td_flags & TDF_CVWAITQ) 1850105911Sjulian cv_abort(td2); 1851105911Sjulian else 1852105911Sjulian abortsleep(td2); 1853105911Sjulian } 1854105911Sjulian } else { 1855105911Sjulian if (TD_IS_SUSPENDED(td2)) 1856105874Sdavidxu continue; 1857111028Sjeff /* 1858111028Sjeff * maybe other inhibitted states too? 1859111028Sjeff * XXXKSE Is it totally safe to 1860111028Sjeff * suspend a non-interruptable thread? 1861111028Sjeff */ 1862108338Sjulian if (td2->td_inhibitors & 1863111028Sjeff (TDI_SLEEPING | TDI_SWAPPED)) 1864105911Sjulian thread_suspend_one(td2); 186599026Sjulian } 186699026Sjulian } 186799026Sjulian } 1868105911Sjulian /* 1869105911Sjulian * Maybe we suspended some threads.. was it enough? 1870105911Sjulian */ 1871105911Sjulian if ((p->p_numthreads - p->p_suspcount) == 1) { 1872105911Sjulian mtx_unlock_spin(&sched_lock); 1873105911Sjulian break; 1874105911Sjulian } 1875105911Sjulian 187699026Sjulian /* 187799026Sjulian * Wake us up when everyone else has suspended. 1878100648Sjulian * In the mean time we suspend as well. 187999026Sjulian */ 1880103216Sjulian thread_suspend_one(td); 1881112910Sjeff /* XXX If you recursed this is broken. */ 188299026Sjulian mtx_unlock(&Giant); 188399026Sjulian PROC_UNLOCK(p); 1884107719Sjulian p->p_stats->p_ru.ru_nvcsw++; 188599026Sjulian mi_switch(); 188699026Sjulian mtx_unlock_spin(&sched_lock); 188799026Sjulian mtx_lock(&Giant); 188899026Sjulian PROC_LOCK(p); 188999026Sjulian } 1890111028Sjeff if (force_exit == SINGLE_EXIT) { 1891111028Sjeff if (td->td_upcall) { 1892111028Sjeff mtx_lock_spin(&sched_lock); 1893111028Sjeff upcall_remove(td); 1894111028Sjeff mtx_unlock_spin(&sched_lock); 1895111028Sjeff } 1896105854Sjulian kse_purge(p, td); 1897111028Sjeff } 189899026Sjulian return (0); 189999026Sjulian} 190099026Sjulian 190199026Sjulian/* 190299026Sjulian * Called in from locations that can safely check to see 190399026Sjulian * whether we have to suspend or at least throttle for a 190499026Sjulian * single-thread event (e.g. fork). 190599026Sjulian * 190699026Sjulian * Such locations include userret(). 190799026Sjulian * If the "return_instead" argument is non zero, the thread must be able to 190899026Sjulian * accept 0 (caller may continue), or 1 (caller must abort) as a result. 190999026Sjulian * 191099026Sjulian * The 'return_instead' argument tells the function if it may do a 191199026Sjulian * thread_exit() or suspend, or whether the caller must abort and back 191299026Sjulian * out instead. 191399026Sjulian * 191499026Sjulian * If the thread that set the single_threading request has set the 191599026Sjulian * P_SINGLE_EXIT bit in the process flags then this call will never return 191699026Sjulian * if 'return_instead' is false, but will exit. 191799026Sjulian * 191899026Sjulian * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 191999026Sjulian *---------------+--------------------+--------------------- 192099026Sjulian * 0 | returns 0 | returns 0 or 1 192199026Sjulian * | when ST ends | immediatly 192299026Sjulian *---------------+--------------------+--------------------- 192399026Sjulian * 1 | thread exits | returns 1 192499026Sjulian * | | immediatly 192599026Sjulian * 0 = thread_exit() or suspension ok, 192699026Sjulian * other = return error instead of stopping the thread. 192799026Sjulian * 192899026Sjulian * While a full suspension is under effect, even a single threading 192999026Sjulian * thread would be suspended if it made this call (but it shouldn't). 193099026Sjulian * This call should only be made from places where 193199026Sjulian * thread_exit() would be safe as that may be the outcome unless 193299026Sjulian * return_instead is set. 193399026Sjulian */ 193499026Sjulianint 193599026Sjulianthread_suspend_check(int return_instead) 193699026Sjulian{ 1937104502Sjmallett struct thread *td; 1938104502Sjmallett struct proc *p; 1939105854Sjulian struct ksegrp *kg; 194099026Sjulian 194199026Sjulian td = curthread; 194299026Sjulian p = td->td_proc; 1943105854Sjulian kg = td->td_ksegrp; 194499026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 194599026Sjulian while (P_SHOULDSTOP(p)) { 1946102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 194799026Sjulian KASSERT(p->p_singlethread != NULL, 194899026Sjulian ("singlethread not set")); 194999026Sjulian /* 1950100648Sjulian * The only suspension in action is a 1951100648Sjulian * single-threading. Single threader need not stop. 1952100646Sjulian * XXX Should be safe to access unlocked 1953100646Sjulian * as it can only be set to be true by us. 195499026Sjulian */ 1955100648Sjulian if (p->p_singlethread == td) 195699026Sjulian return (0); /* Exempt from stopping. */ 195799026Sjulian } 1958100648Sjulian if (return_instead) 195999026Sjulian return (1); 196099026Sjulian 1961112071Sdavidxu mtx_lock_spin(&sched_lock); 1962112071Sdavidxu thread_stopped(p); 196399026Sjulian /* 196499026Sjulian * If the process is waiting for us to exit, 196599026Sjulian * this thread should just suicide. 1966102950Sdavidxu * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 196799026Sjulian */ 196899026Sjulian if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 196999026Sjulian while (mtx_owned(&Giant)) 197099026Sjulian mtx_unlock(&Giant); 1971112910Sjeff if (p->p_flag & P_THREADED) 1972112910Sjeff thread_exit(); 1973112910Sjeff else 1974112910Sjeff thr_exit1(); 197599026Sjulian } 197699026Sjulian 1977112910Sjeff mtx_assert(&Giant, MA_NOTOWNED); 197899026Sjulian /* 197999026Sjulian * When a thread suspends, it just 198099026Sjulian * moves to the processes's suspend queue 198199026Sjulian * and stays there. 198299026Sjulian */ 1983103216Sjulian thread_suspend_one(td); 198499026Sjulian PROC_UNLOCK(p); 1985102950Sdavidxu if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1986100632Sjulian if (p->p_numthreads == p->p_suspcount) { 1987103216Sjulian thread_unsuspend_one(p->p_singlethread); 1988100632Sjulian } 1989100632Sjulian } 1990100594Sjulian p->p_stats->p_ru.ru_nivcsw++; 199199026Sjulian mi_switch(); 199299026Sjulian mtx_unlock_spin(&sched_lock); 199399026Sjulian PROC_LOCK(p); 199499026Sjulian } 199599026Sjulian return (0); 199699026Sjulian} 199799026Sjulian 1998102898Sdavidxuvoid 1999102898Sdavidxuthread_suspend_one(struct thread *td) 2000102898Sdavidxu{ 2001102898Sdavidxu struct proc *p = td->td_proc; 2002102898Sdavidxu 2003102898Sdavidxu mtx_assert(&sched_lock, MA_OWNED); 2004112071Sdavidxu KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 2005102898Sdavidxu p->p_suspcount++; 2006103216Sjulian TD_SET_SUSPENDED(td); 2007102898Sdavidxu TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 2008103216Sjulian /* 2009103216Sjulian * Hack: If we are suspending but are on the sleep queue 2010103216Sjulian * then we are in msleep or the cv equivalent. We 2011103216Sjulian * want to look like we have two Inhibitors. 2012105911Sjulian * May already be set.. doesn't matter. 2013103216Sjulian */ 2014103216Sjulian if (TD_ON_SLEEPQ(td)) 2015103216Sjulian TD_SET_SLEEPING(td); 2016102898Sdavidxu} 2017102898Sdavidxu 2018102898Sdavidxuvoid 2019102898Sdavidxuthread_unsuspend_one(struct thread *td) 2020102898Sdavidxu{ 2021102898Sdavidxu struct proc *p = td->td_proc; 2022102898Sdavidxu 2023102898Sdavidxu mtx_assert(&sched_lock, MA_OWNED); 2024102898Sdavidxu TAILQ_REMOVE(&p->p_suspended, td, td_runq); 2025103216Sjulian TD_CLR_SUSPENDED(td); 2026102898Sdavidxu p->p_suspcount--; 2027103216Sjulian setrunnable(td); 2028102898Sdavidxu} 2029102898Sdavidxu 203099026Sjulian/* 203199026Sjulian * Allow all threads blocked by single threading to continue running. 203299026Sjulian */ 203399026Sjulianvoid 203499026Sjulianthread_unsuspend(struct proc *p) 203599026Sjulian{ 203699026Sjulian struct thread *td; 203799026Sjulian 2038100646Sjulian mtx_assert(&sched_lock, MA_OWNED); 203999026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 204099026Sjulian if (!P_SHOULDSTOP(p)) { 204199026Sjulian while (( td = TAILQ_FIRST(&p->p_suspended))) { 2042102898Sdavidxu thread_unsuspend_one(td); 204399026Sjulian } 2044102950Sdavidxu } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 204599026Sjulian (p->p_numthreads == p->p_suspcount)) { 204699026Sjulian /* 204799026Sjulian * Stopping everything also did the job for the single 204899026Sjulian * threading request. Now we've downgraded to single-threaded, 204999026Sjulian * let it continue. 205099026Sjulian */ 2051102898Sdavidxu thread_unsuspend_one(p->p_singlethread); 205299026Sjulian } 205399026Sjulian} 205499026Sjulian 205599026Sjulianvoid 205699026Sjulianthread_single_end(void) 205799026Sjulian{ 205899026Sjulian struct thread *td; 205999026Sjulian struct proc *p; 206099026Sjulian 206199026Sjulian td = curthread; 206299026Sjulian p = td->td_proc; 206399026Sjulian PROC_LOCK_ASSERT(p, MA_OWNED); 2064102950Sdavidxu p->p_flag &= ~P_STOPPED_SINGLE; 206599026Sjulian p->p_singlethread = NULL; 2066102292Sjulian /* 2067102292Sjulian * If there are other threads they mey now run, 2068102292Sjulian * unless of course there is a blanket 'stop order' 2069102292Sjulian * on the process. The single threader must be allowed 2070102292Sjulian * to continue however as this is a bad place to stop. 2071102292Sjulian */ 2072102292Sjulian if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2073102292Sjulian mtx_lock_spin(&sched_lock); 2074102292Sjulian while (( td = TAILQ_FIRST(&p->p_suspended))) { 2075103216Sjulian thread_unsuspend_one(td); 2076102292Sjulian } 2077102292Sjulian mtx_unlock_spin(&sched_lock); 2078102292Sjulian } 207999026Sjulian} 208099026Sjulian 2081102292Sjulian 2082