kern_thread.c revision 176730
1/*- 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 176730 2008-03-02 07:39:22Z jeff $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/resourcevar.h> 39#include <sys/smp.h> 40#include <sys/sysctl.h> 41#include <sys/sched.h> 42#include <sys/sleepqueue.h> 43#include <sys/selinfo.h> 44#include <sys/turnstile.h> 45#include <sys/ktr.h> 46#include <sys/umtx.h> 47#include <sys/cpuset.h> 48 49#include <security/audit/audit.h> 50 51#include <vm/vm.h> 52#include <vm/vm_extern.h> 53#include <vm/uma.h> 54#include <sys/eventhandler.h> 55 56/* 57 * thread related storage. 58 */ 59static uma_zone_t thread_zone; 60 61SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 62 63int max_threads_per_proc = 1500; 64SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 65 &max_threads_per_proc, 0, "Limit on threads per proc"); 66 67int max_threads_hits; 68SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 69 &max_threads_hits, 0, ""); 70 71#ifdef KSE 72int virtual_cpu; 73 74#endif 75TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 76static struct mtx zombie_lock; 77MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); 78 79static void thread_zombie(struct thread *); 80 81#ifdef KSE 82static int 83sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 84{ 85 int error, new_val; 86 int def_val; 87 88 def_val = mp_ncpus; 89 if (virtual_cpu == 0) 90 new_val = def_val; 91 else 92 new_val = virtual_cpu; 93 error = sysctl_handle_int(oidp, &new_val, 0, req); 94 if (error != 0 || req->newptr == NULL) 95 return (error); 96 if (new_val < 0) 97 return (EINVAL); 98 virtual_cpu = new_val; 99 return (0); 100} 101 102/* DEBUG ONLY */ 103SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 104 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 105 "debug virtual cpus"); 106#endif 107 108struct mtx tid_lock; 109static struct unrhdr *tid_unrhdr; 110 111/* 112 * Prepare a thread for use. 113 */ 114static int 115thread_ctor(void *mem, int size, void *arg, int flags) 116{ 117 struct thread *td; 118 119 td = (struct thread *)mem; 120 td->td_state = TDS_INACTIVE; 121 td->td_oncpu = NOCPU; 122 123 td->td_tid = alloc_unr(tid_unrhdr); 124 td->td_syscalls = 0; 125 126 /* 127 * Note that td_critnest begins life as 1 because the thread is not 128 * running and is thereby implicitly waiting to be on the receiving 129 * end of a context switch. 130 */ 131 td->td_critnest = 1; 132 EVENTHANDLER_INVOKE(thread_ctor, td); 133#ifdef AUDIT 134 audit_thread_alloc(td); 135#endif 136 umtx_thread_alloc(td); 137 return (0); 138} 139 140/* 141 * Reclaim a thread after use. 142 */ 143static void 144thread_dtor(void *mem, int size, void *arg) 145{ 146 struct thread *td; 147 148 td = (struct thread *)mem; 149 150#ifdef INVARIANTS 151 /* Verify that this thread is in a safe state to free. */ 152 switch (td->td_state) { 153 case TDS_INHIBITED: 154 case TDS_RUNNING: 155 case TDS_CAN_RUN: 156 case TDS_RUNQ: 157 /* 158 * We must never unlink a thread that is in one of 159 * these states, because it is currently active. 160 */ 161 panic("bad state for thread unlinking"); 162 /* NOTREACHED */ 163 case TDS_INACTIVE: 164 break; 165 default: 166 panic("bad thread state"); 167 /* NOTREACHED */ 168 } 169#endif 170#ifdef AUDIT 171 audit_thread_free(td); 172#endif 173 EVENTHANDLER_INVOKE(thread_dtor, td); 174 free_unr(tid_unrhdr, td->td_tid); 175 sched_newthread(td); 176} 177 178/* 179 * Initialize type-stable parts of a thread (when newly created). 180 */ 181static int 182thread_init(void *mem, int size, int flags) 183{ 184 struct thread *td; 185 186 td = (struct thread *)mem; 187 188 td->td_sleepqueue = sleepq_alloc(); 189 td->td_turnstile = turnstile_alloc(); 190 EVENTHANDLER_INVOKE(thread_init, td); 191 td->td_sched = (struct td_sched *)&td[1]; 192 sched_newthread(td); 193 umtx_thread_init(td); 194 td->td_kstack = 0; 195 return (0); 196} 197 198/* 199 * Tear down type-stable parts of a thread (just before being discarded). 200 */ 201static void 202thread_fini(void *mem, int size) 203{ 204 struct thread *td; 205 206 td = (struct thread *)mem; 207 EVENTHANDLER_INVOKE(thread_fini, td); 208 turnstile_free(td->td_turnstile); 209 sleepq_free(td->td_sleepqueue); 210 umtx_thread_fini(td); 211 seltdfini(td); 212} 213 214/* 215 * For a newly created process, 216 * link up all the structures and its initial threads etc. 217 * called from: 218 * {arch}/{arch}/machdep.c ia64_init(), init386() etc. 219 * proc_dtor() (should go away) 220 * proc_init() 221 */ 222void 223proc_linkup0(struct proc *p, struct thread *td) 224{ 225 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 226 proc_linkup(p, td); 227} 228 229void 230proc_linkup(struct proc *p, struct thread *td) 231{ 232 233#ifdef KSE 234 TAILQ_INIT(&p->p_upcalls); /* upcall list */ 235#endif 236 sigqueue_init(&p->p_sigqueue, p); 237 p->p_ksi = ksiginfo_alloc(1); 238 if (p->p_ksi != NULL) { 239 /* XXX p_ksi may be null if ksiginfo zone is not ready */ 240 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; 241 } 242 LIST_INIT(&p->p_mqnotifier); 243 p->p_numthreads = 0; 244 thread_link(td, p); 245} 246 247/* 248 * Initialize global thread allocation resources. 249 */ 250void 251threadinit(void) 252{ 253 254 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); 255 /* leave one number for thread0 */ 256 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); 257 258 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 259 thread_ctor, thread_dtor, thread_init, thread_fini, 260 16 - 1, 0); 261#ifdef KSE 262 kseinit(); /* set up kse specific stuff e.g. upcall zone*/ 263#endif 264} 265 266/* 267 * Place an unused thread on the zombie list. 268 * Use the slpq as that must be unused by now. 269 */ 270void 271thread_zombie(struct thread *td) 272{ 273 mtx_lock_spin(&zombie_lock); 274 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); 275 mtx_unlock_spin(&zombie_lock); 276} 277 278/* 279 * Release a thread that has exited after cpu_throw(). 280 */ 281void 282thread_stash(struct thread *td) 283{ 284 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); 285 thread_zombie(td); 286} 287 288/* 289 * Reap zombie kse resource. 290 */ 291void 292thread_reap(void) 293{ 294 struct thread *td_first, *td_next; 295 296 /* 297 * Don't even bother to lock if none at this instant, 298 * we really don't care about the next instant.. 299 */ 300 if (!TAILQ_EMPTY(&zombie_threads)) { 301 mtx_lock_spin(&zombie_lock); 302 td_first = TAILQ_FIRST(&zombie_threads); 303 if (td_first) 304 TAILQ_INIT(&zombie_threads); 305 mtx_unlock_spin(&zombie_lock); 306 while (td_first) { 307 td_next = TAILQ_NEXT(td_first, td_slpq); 308 if (td_first->td_ucred) 309 crfree(td_first->td_ucred); 310 thread_free(td_first); 311 td_first = td_next; 312 } 313 } 314#ifdef KSE 315 upcall_reap(); 316#endif 317} 318 319/* 320 * Allocate a thread. 321 */ 322struct thread * 323thread_alloc(void) 324{ 325 struct thread *td; 326 327 thread_reap(); /* check if any zombies to get */ 328 329 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); 330 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); 331 if (!vm_thread_new(td, 0)) { 332 uma_zfree(thread_zone, td); 333 return (NULL); 334 } 335 cpu_thread_alloc(td); 336 return (td); 337} 338 339 340/* 341 * Deallocate a thread. 342 */ 343void 344thread_free(struct thread *td) 345{ 346 cpuset_rel(td->td_cpuset); 347 td->td_cpuset = NULL; 348 cpu_thread_free(td); 349 if (td->td_altkstack != 0) 350 vm_thread_dispose_altkstack(td); 351 if (td->td_kstack != 0) 352 vm_thread_dispose(td); 353 uma_zfree(thread_zone, td); 354} 355 356/* 357 * Discard the current thread and exit from its context. 358 * Always called with scheduler locked. 359 * 360 * Because we can't free a thread while we're operating under its context, 361 * push the current thread into our CPU's deadthread holder. This means 362 * we needn't worry about someone else grabbing our context before we 363 * do a cpu_throw(). This may not be needed now as we are under schedlock. 364 * Maybe we can just do a thread_stash() as thr_exit1 does. 365 */ 366/* XXX 367 * libthr expects its thread exit to return for the last 368 * thread, meaning that the program is back to non-threaded 369 * mode I guess. Because we do this (cpu_throw) unconditionally 370 * here, they have their own version of it. (thr_exit1()) 371 * that doesn't do it all if this was the last thread. 372 * It is also called from thread_suspend_check(). 373 * Of course in the end, they end up coming here through exit1 374 * anyhow.. After fixing 'thr' to play by the rules we should be able 375 * to merge these two functions together. 376 * 377 * called from: 378 * exit1() 379 * kse_exit() 380 * thr_exit() 381 * ifdef KSE 382 * thread_user_enter() 383 * thread_userret() 384 * endif 385 * thread_suspend_check() 386 */ 387void 388thread_exit(void) 389{ 390 uint64_t new_switchtime; 391 struct thread *td; 392 struct thread *td2; 393 struct proc *p; 394 395 td = curthread; 396 p = td->td_proc; 397 398 PROC_SLOCK_ASSERT(p, MA_OWNED); 399 mtx_assert(&Giant, MA_NOTOWNED); 400 401 PROC_LOCK_ASSERT(p, MA_OWNED); 402 KASSERT(p != NULL, ("thread exiting without a process")); 403 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, 404 (long)p->p_pid, td->td_name); 405 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); 406 407#ifdef AUDIT 408 AUDIT_SYSCALL_EXIT(0, td); 409#endif 410 411#ifdef KSE 412 if (td->td_standin != NULL) { 413 /* 414 * Note that we don't need to free the cred here as it 415 * is done in thread_reap(). 416 */ 417 thread_zombie(td->td_standin); 418 td->td_standin = NULL; 419 } 420#endif 421 422 umtx_thread_exit(td); 423 424 /* 425 * drop FPU & debug register state storage, or any other 426 * architecture specific resources that 427 * would not be on a new untouched process. 428 */ 429 cpu_thread_exit(td); /* XXXSMP */ 430 431 /* Do the same timestamp bookkeeping that mi_switch() would do. */ 432 new_switchtime = cpu_ticks(); 433 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime)); 434 PCPU_SET(switchtime, new_switchtime); 435 PCPU_SET(switchticks, ticks); 436 PCPU_INC(cnt.v_swtch); 437 /* Save our resource usage in our process. */ 438 td->td_ru.ru_nvcsw++; 439 rucollect(&p->p_ru, &td->td_ru); 440 /* 441 * The last thread is left attached to the process 442 * So that the whole bundle gets recycled. Skip 443 * all this stuff if we never had threads. 444 * EXIT clears all sign of other threads when 445 * it goes to single threading, so the last thread always 446 * takes the short path. 447 */ 448 if (p->p_flag & P_HADTHREADS) { 449 if (p->p_numthreads > 1) { 450 thread_lock(td); 451#ifdef KSE 452 kse_unlink(td); 453#else 454 thread_unlink(td); 455#endif 456 thread_unlock(td); 457 td2 = FIRST_THREAD_IN_PROC(p); 458 sched_exit_thread(td2, td); 459 460 /* 461 * The test below is NOT true if we are the 462 * sole exiting thread. P_STOPPED_SNGL is unset 463 * in exit1() after it is the only survivor. 464 */ 465 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 466 if (p->p_numthreads == p->p_suspcount) { 467 thread_lock(p->p_singlethread); 468 thread_unsuspend_one(p->p_singlethread); 469 thread_unlock(p->p_singlethread); 470 } 471 } 472 473 atomic_add_int(&td->td_proc->p_exitthreads, 1); 474 PCPU_SET(deadthread, td); 475 } else { 476 /* 477 * The last thread is exiting.. but not through exit() 478 * what should we do? 479 * Theoretically this can't happen 480 * exit1() - clears threading flags before coming here 481 * kse_exit() - treats last thread specially 482 * thr_exit() - treats last thread specially 483 * ifdef KSE 484 * thread_user_enter() - only if more exist 485 * thread_userret() - only if more exist 486 * endif 487 * thread_suspend_check() - only if more exist 488 */ 489 panic ("thread_exit: Last thread exiting on its own"); 490 } 491 } 492 PROC_UNLOCK(p); 493 thread_lock(td); 494 /* Save our tick information with both the thread and proc locked */ 495 ruxagg(&p->p_rux, td); 496 PROC_SUNLOCK(p); 497 td->td_state = TDS_INACTIVE; 498 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); 499 sched_throw(td); 500 panic("I'm a teapot!"); 501 /* NOTREACHED */ 502} 503 504/* 505 * Do any thread specific cleanups that may be needed in wait() 506 * called with Giant, proc and schedlock not held. 507 */ 508void 509thread_wait(struct proc *p) 510{ 511 struct thread *td; 512 513 mtx_assert(&Giant, MA_NOTOWNED); 514 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()")); 515 td = FIRST_THREAD_IN_PROC(p); 516#ifdef KSE 517 if (td->td_standin != NULL) { 518 if (td->td_standin->td_ucred != NULL) { 519 crfree(td->td_standin->td_ucred); 520 td->td_standin->td_ucred = NULL; 521 } 522 thread_free(td->td_standin); 523 td->td_standin = NULL; 524 } 525#endif 526 /* Lock the last thread so we spin until it exits cpu_throw(). */ 527 thread_lock(td); 528 thread_unlock(td); 529 /* Wait for any remaining threads to exit cpu_throw(). */ 530 while (p->p_exitthreads) 531 sched_relinquish(curthread); 532 cpuset_rel(td->td_cpuset); 533 td->td_cpuset = NULL; 534 cpu_thread_clean(td); 535 crfree(td->td_ucred); 536 thread_reap(); /* check for zombie threads etc. */ 537} 538 539/* 540 * Link a thread to a process. 541 * set up anything that needs to be initialized for it to 542 * be used by the process. 543 * 544 * Note that we do not link to the proc's ucred here. 545 * The thread is linked as if running but no KSE assigned. 546 * Called from: 547 * proc_linkup() 548 * thread_schedule_upcall() 549 * thr_create() 550 */ 551void 552thread_link(struct thread *td, struct proc *p) 553{ 554 555 /* 556 * XXX This can't be enabled because it's called for proc0 before 557 * it's spinlock has been created. 558 * PROC_SLOCK_ASSERT(p, MA_OWNED); 559 */ 560 td->td_state = TDS_INACTIVE; 561 td->td_proc = p; 562 td->td_flags = TDF_INMEM; 563 564 LIST_INIT(&td->td_contested); 565 LIST_INIT(&td->td_lprof[0]); 566 LIST_INIT(&td->td_lprof[1]); 567 sigqueue_init(&td->td_sigqueue, p); 568 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE); 569 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 570 p->p_numthreads++; 571} 572 573/* 574 * Convert a process with one thread to an unthreaded process. 575 * Called from: 576 * thread_single(exit) (called from execve and exit) 577 * kse_exit() XXX may need cleaning up wrt KSE stuff 578 */ 579void 580thread_unthread(struct thread *td) 581{ 582 struct proc *p = td->td_proc; 583 584 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads")); 585#ifdef KSE 586 thread_lock(td); 587 upcall_remove(td); 588 thread_unlock(td); 589 p->p_flag &= ~(P_SA|P_HADTHREADS); 590 td->td_mailbox = NULL; 591 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND); 592 if (td->td_standin != NULL) { 593 thread_zombie(td->td_standin); 594 td->td_standin = NULL; 595 } 596#else 597 p->p_flag &= ~P_HADTHREADS; 598#endif 599} 600 601/* 602 * Called from: 603 * thread_exit() 604 */ 605void 606thread_unlink(struct thread *td) 607{ 608 struct proc *p = td->td_proc; 609 610 PROC_SLOCK_ASSERT(p, MA_OWNED); 611 TAILQ_REMOVE(&p->p_threads, td, td_plist); 612 p->p_numthreads--; 613 /* could clear a few other things here */ 614 /* Must NOT clear links to proc! */ 615} 616 617/* 618 * Enforce single-threading. 619 * 620 * Returns 1 if the caller must abort (another thread is waiting to 621 * exit the process or similar). Process is locked! 622 * Returns 0 when you are successfully the only thread running. 623 * A process has successfully single threaded in the suspend mode when 624 * There are no threads in user mode. Threads in the kernel must be 625 * allowed to continue until they get to the user boundary. They may even 626 * copy out their return values and data before suspending. They may however be 627 * accelerated in reaching the user boundary as we will wake up 628 * any sleeping threads that are interruptable. (PCATCH). 629 */ 630int 631thread_single(int mode) 632{ 633 struct thread *td; 634 struct thread *td2; 635 struct proc *p; 636 int remaining; 637 638 td = curthread; 639 p = td->td_proc; 640 mtx_assert(&Giant, MA_NOTOWNED); 641 PROC_LOCK_ASSERT(p, MA_OWNED); 642 KASSERT((td != NULL), ("curthread is NULL")); 643 644 if ((p->p_flag & P_HADTHREADS) == 0) 645 return (0); 646 647 /* Is someone already single threading? */ 648 if (p->p_singlethread != NULL && p->p_singlethread != td) 649 return (1); 650 651 if (mode == SINGLE_EXIT) { 652 p->p_flag |= P_SINGLE_EXIT; 653 p->p_flag &= ~P_SINGLE_BOUNDARY; 654 } else { 655 p->p_flag &= ~P_SINGLE_EXIT; 656 if (mode == SINGLE_BOUNDARY) 657 p->p_flag |= P_SINGLE_BOUNDARY; 658 else 659 p->p_flag &= ~P_SINGLE_BOUNDARY; 660 } 661 p->p_flag |= P_STOPPED_SINGLE; 662 PROC_SLOCK(p); 663 p->p_singlethread = td; 664 if (mode == SINGLE_EXIT) 665 remaining = p->p_numthreads; 666 else if (mode == SINGLE_BOUNDARY) 667 remaining = p->p_numthreads - p->p_boundary_count; 668 else 669 remaining = p->p_numthreads - p->p_suspcount; 670 while (remaining != 1) { 671 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) 672 goto stopme; 673 FOREACH_THREAD_IN_PROC(p, td2) { 674 if (td2 == td) 675 continue; 676 thread_lock(td2); 677 td2->td_flags |= TDF_ASTPENDING; 678 if (TD_IS_INHIBITED(td2)) { 679 switch (mode) { 680 case SINGLE_EXIT: 681 if (td->td_flags & TDF_DBSUSPEND) 682 td->td_flags &= ~TDF_DBSUSPEND; 683 if (TD_IS_SUSPENDED(td2)) 684 thread_unsuspend_one(td2); 685 if (TD_ON_SLEEPQ(td2) && 686 (td2->td_flags & TDF_SINTR)) 687 sleepq_abort(td2, EINTR); 688 break; 689 case SINGLE_BOUNDARY: 690 break; 691 default: 692 if (TD_IS_SUSPENDED(td2)) { 693 thread_unlock(td2); 694 continue; 695 } 696 /* 697 * maybe other inhibited states too? 698 */ 699 if ((td2->td_flags & TDF_SINTR) && 700 (td2->td_inhibitors & 701 (TDI_SLEEPING | TDI_SWAPPED))) 702 thread_suspend_one(td2); 703 break; 704 } 705 } 706#ifdef SMP 707 else if (TD_IS_RUNNING(td2) && td != td2) { 708 forward_signal(td2); 709 } 710#endif 711 thread_unlock(td2); 712 } 713 if (mode == SINGLE_EXIT) 714 remaining = p->p_numthreads; 715 else if (mode == SINGLE_BOUNDARY) 716 remaining = p->p_numthreads - p->p_boundary_count; 717 else 718 remaining = p->p_numthreads - p->p_suspcount; 719 720 /* 721 * Maybe we suspended some threads.. was it enough? 722 */ 723 if (remaining == 1) 724 break; 725 726stopme: 727 /* 728 * Wake us up when everyone else has suspended. 729 * In the mean time we suspend as well. 730 */ 731 thread_suspend_switch(td); 732 if (mode == SINGLE_EXIT) 733 remaining = p->p_numthreads; 734 else if (mode == SINGLE_BOUNDARY) 735 remaining = p->p_numthreads - p->p_boundary_count; 736 else 737 remaining = p->p_numthreads - p->p_suspcount; 738 } 739 if (mode == SINGLE_EXIT) { 740 /* 741 * We have gotten rid of all the other threads and we 742 * are about to either exit or exec. In either case, 743 * we try our utmost to revert to being a non-threaded 744 * process. 745 */ 746 p->p_singlethread = NULL; 747 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); 748 thread_unthread(td); 749 } 750 PROC_SUNLOCK(p); 751 return (0); 752} 753 754/* 755 * Called in from locations that can safely check to see 756 * whether we have to suspend or at least throttle for a 757 * single-thread event (e.g. fork). 758 * 759 * Such locations include userret(). 760 * If the "return_instead" argument is non zero, the thread must be able to 761 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 762 * 763 * The 'return_instead' argument tells the function if it may do a 764 * thread_exit() or suspend, or whether the caller must abort and back 765 * out instead. 766 * 767 * If the thread that set the single_threading request has set the 768 * P_SINGLE_EXIT bit in the process flags then this call will never return 769 * if 'return_instead' is false, but will exit. 770 * 771 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 772 *---------------+--------------------+--------------------- 773 * 0 | returns 0 | returns 0 or 1 774 * | when ST ends | immediatly 775 *---------------+--------------------+--------------------- 776 * 1 | thread exits | returns 1 777 * | | immediatly 778 * 0 = thread_exit() or suspension ok, 779 * other = return error instead of stopping the thread. 780 * 781 * While a full suspension is under effect, even a single threading 782 * thread would be suspended if it made this call (but it shouldn't). 783 * This call should only be made from places where 784 * thread_exit() would be safe as that may be the outcome unless 785 * return_instead is set. 786 */ 787int 788thread_suspend_check(int return_instead) 789{ 790 struct thread *td; 791 struct proc *p; 792 793 td = curthread; 794 p = td->td_proc; 795 mtx_assert(&Giant, MA_NOTOWNED); 796 PROC_LOCK_ASSERT(p, MA_OWNED); 797 while (P_SHOULDSTOP(p) || 798 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) { 799 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 800 KASSERT(p->p_singlethread != NULL, 801 ("singlethread not set")); 802 /* 803 * The only suspension in action is a 804 * single-threading. Single threader need not stop. 805 * XXX Should be safe to access unlocked 806 * as it can only be set to be true by us. 807 */ 808 if (p->p_singlethread == td) 809 return (0); /* Exempt from stopping. */ 810 } 811 if ((p->p_flag & P_SINGLE_EXIT) && return_instead) 812 return (EINTR); 813 814 /* Should we goto user boundary if we didn't come from there? */ 815 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && 816 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) 817 return (ERESTART); 818 819 /* If thread will exit, flush its pending signals */ 820 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 821 sigqueue_flush(&td->td_sigqueue); 822 823 PROC_SLOCK(p); 824 thread_stopped(p); 825 /* 826 * If the process is waiting for us to exit, 827 * this thread should just suicide. 828 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 829 */ 830 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) 831 thread_exit(); 832 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 833 if (p->p_numthreads == p->p_suspcount + 1) { 834 thread_lock(p->p_singlethread); 835 thread_unsuspend_one(p->p_singlethread); 836 thread_unlock(p->p_singlethread); 837 } 838 } 839 PROC_UNLOCK(p); 840 thread_lock(td); 841 /* 842 * When a thread suspends, it just 843 * gets taken off all queues. 844 */ 845 thread_suspend_one(td); 846 if (return_instead == 0) { 847 p->p_boundary_count++; 848 td->td_flags |= TDF_BOUNDARY; 849 } 850 PROC_SUNLOCK(p); 851 mi_switch(SW_INVOL, NULL); 852 if (return_instead == 0) 853 td->td_flags &= ~TDF_BOUNDARY; 854 thread_unlock(td); 855 PROC_LOCK(p); 856 if (return_instead == 0) 857 p->p_boundary_count--; 858 } 859 return (0); 860} 861 862void 863thread_suspend_switch(struct thread *td) 864{ 865 struct proc *p; 866 867 p = td->td_proc; 868 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 869 PROC_LOCK_ASSERT(p, MA_OWNED); 870 PROC_SLOCK_ASSERT(p, MA_OWNED); 871 /* 872 * We implement thread_suspend_one in stages here to avoid 873 * dropping the proc lock while the thread lock is owned. 874 */ 875 thread_stopped(p); 876 p->p_suspcount++; 877 PROC_UNLOCK(p); 878 thread_lock(td); 879 sched_sleep(td); 880 TD_SET_SUSPENDED(td); 881 PROC_SUNLOCK(p); 882 DROP_GIANT(); 883 mi_switch(SW_VOL, NULL); 884 thread_unlock(td); 885 PICKUP_GIANT(); 886 PROC_LOCK(p); 887 PROC_SLOCK(p); 888} 889 890void 891thread_suspend_one(struct thread *td) 892{ 893 struct proc *p = td->td_proc; 894 895 PROC_SLOCK_ASSERT(p, MA_OWNED); 896 THREAD_LOCK_ASSERT(td, MA_OWNED); 897 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 898 p->p_suspcount++; 899 sched_sleep(td); 900 TD_SET_SUSPENDED(td); 901} 902 903void 904thread_unsuspend_one(struct thread *td) 905{ 906 struct proc *p = td->td_proc; 907 908 PROC_SLOCK_ASSERT(p, MA_OWNED); 909 THREAD_LOCK_ASSERT(td, MA_OWNED); 910 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); 911 TD_CLR_SUSPENDED(td); 912 p->p_suspcount--; 913 setrunnable(td); 914} 915 916/* 917 * Allow all threads blocked by single threading to continue running. 918 */ 919void 920thread_unsuspend(struct proc *p) 921{ 922 struct thread *td; 923 924 PROC_LOCK_ASSERT(p, MA_OWNED); 925 PROC_SLOCK_ASSERT(p, MA_OWNED); 926 if (!P_SHOULDSTOP(p)) { 927 FOREACH_THREAD_IN_PROC(p, td) { 928 thread_lock(td); 929 if (TD_IS_SUSPENDED(td)) { 930 thread_unsuspend_one(td); 931 } 932 thread_unlock(td); 933 } 934 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 935 (p->p_numthreads == p->p_suspcount)) { 936 /* 937 * Stopping everything also did the job for the single 938 * threading request. Now we've downgraded to single-threaded, 939 * let it continue. 940 */ 941 thread_lock(p->p_singlethread); 942 thread_unsuspend_one(p->p_singlethread); 943 thread_unlock(p->p_singlethread); 944 } 945} 946 947/* 948 * End the single threading mode.. 949 */ 950void 951thread_single_end(void) 952{ 953 struct thread *td; 954 struct proc *p; 955 956 td = curthread; 957 p = td->td_proc; 958 PROC_LOCK_ASSERT(p, MA_OWNED); 959 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); 960 PROC_SLOCK(p); 961 p->p_singlethread = NULL; 962 /* 963 * If there are other threads they mey now run, 964 * unless of course there is a blanket 'stop order' 965 * on the process. The single threader must be allowed 966 * to continue however as this is a bad place to stop. 967 */ 968 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 969 FOREACH_THREAD_IN_PROC(p, td) { 970 thread_lock(td); 971 if (TD_IS_SUSPENDED(td)) { 972 thread_unsuspend_one(td); 973 } 974 thread_unlock(td); 975 } 976 } 977 PROC_SUNLOCK(p); 978} 979 980struct thread * 981thread_find(struct proc *p, lwpid_t tid) 982{ 983 struct thread *td; 984 985 PROC_LOCK_ASSERT(p, MA_OWNED); 986 PROC_SLOCK(p); 987 FOREACH_THREAD_IN_PROC(p, td) { 988 if (td->td_tid == tid) 989 break; 990 } 991 PROC_SUNLOCK(p); 992 return (td); 993} 994