kern_thread.c revision 116963
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/kern/kern_thread.c 116963 2003-06-28 08:29:05Z davidxu $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mutex.h> 38#include <sys/proc.h> 39#include <sys/smp.h> 40#include <sys/sysctl.h> 41#include <sys/sysproto.h> 42#include <sys/filedesc.h> 43#include <sys/sched.h> 44#include <sys/signalvar.h> 45#include <sys/sx.h> 46#include <sys/tty.h> 47#include <sys/user.h> 48#include <sys/jail.h> 49#include <sys/kse.h> 50#include <sys/ktr.h> 51#include <sys/ucontext.h> 52 53#include <vm/vm.h> 54#include <vm/vm_extern.h> 55#include <vm/vm_object.h> 56#include <vm/pmap.h> 57#include <vm/uma.h> 58#include <vm/vm_map.h> 59 60#include <machine/frame.h> 61 62/* 63 * KSEGRP related storage. 64 */ 65static uma_zone_t ksegrp_zone; 66static uma_zone_t kse_zone; 67static uma_zone_t thread_zone; 68static uma_zone_t upcall_zone; 69 70/* DEBUG ONLY */ 71SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 72static int thread_debug = 0; 73SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 74 &thread_debug, 0, "thread debug"); 75 76static int max_threads_per_proc = 150; 77SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 78 &max_threads_per_proc, 0, "Limit on threads per proc"); 79 80static int max_groups_per_proc = 50; 81SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 82 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 83 84static int max_threads_hits; 85SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 86 &max_threads_hits, 0, ""); 87 88static int virtual_cpu; 89 90#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 91 92TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 93TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 94TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 95TAILQ_HEAD(, kse_upcall) zombie_upcalls = 96 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 97struct mtx kse_zombie_lock; 98MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 99 100static void kse_purge(struct proc *p, struct thread *td); 101static void kse_purge_group(struct thread *td); 102static int thread_update_usr_ticks(struct thread *td, int user); 103static void thread_alloc_spare(struct thread *td, struct thread *spare); 104 105static int 106sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 107{ 108 int error, new_val; 109 int def_val; 110 111#ifdef SMP 112 def_val = mp_ncpus; 113#else 114 def_val = 1; 115#endif 116 if (virtual_cpu == 0) 117 new_val = def_val; 118 else 119 new_val = virtual_cpu; 120 error = sysctl_handle_int(oidp, &new_val, 0, req); 121 if (error != 0 || req->newptr == NULL) 122 return (error); 123 if (new_val < 0) 124 return (EINVAL); 125 virtual_cpu = new_val; 126 return (0); 127} 128 129/* DEBUG ONLY */ 130SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 131 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 132 "debug virtual cpus"); 133 134/* 135 * Prepare a thread for use. 136 */ 137static void 138thread_ctor(void *mem, int size, void *arg) 139{ 140 struct thread *td; 141 142 td = (struct thread *)mem; 143 td->td_state = TDS_INACTIVE; 144 td->td_oncpu = NOCPU; 145} 146 147/* 148 * Reclaim a thread after use. 149 */ 150static void 151thread_dtor(void *mem, int size, void *arg) 152{ 153 struct thread *td; 154 155 td = (struct thread *)mem; 156 157#ifdef INVARIANTS 158 /* Verify that this thread is in a safe state to free. */ 159 switch (td->td_state) { 160 case TDS_INHIBITED: 161 case TDS_RUNNING: 162 case TDS_CAN_RUN: 163 case TDS_RUNQ: 164 /* 165 * We must never unlink a thread that is in one of 166 * these states, because it is currently active. 167 */ 168 panic("bad state for thread unlinking"); 169 /* NOTREACHED */ 170 case TDS_INACTIVE: 171 break; 172 default: 173 panic("bad thread state"); 174 /* NOTREACHED */ 175 } 176#endif 177} 178 179/* 180 * Initialize type-stable parts of a thread (when newly created). 181 */ 182static void 183thread_init(void *mem, int size) 184{ 185 struct thread *td; 186 187 td = (struct thread *)mem; 188 mtx_lock(&Giant); 189 vm_thread_new(td, 0); 190 mtx_unlock(&Giant); 191 cpu_thread_setup(td); 192 td->td_sched = (struct td_sched *)&td[1]; 193} 194 195/* 196 * Tear down type-stable parts of a thread (just before being discarded). 197 */ 198static void 199thread_fini(void *mem, int size) 200{ 201 struct thread *td; 202 203 td = (struct thread *)mem; 204 vm_thread_dispose(td); 205} 206 207/* 208 * Initialize type-stable parts of a kse (when newly created). 209 */ 210static void 211kse_init(void *mem, int size) 212{ 213 struct kse *ke; 214 215 ke = (struct kse *)mem; 216 ke->ke_sched = (struct ke_sched *)&ke[1]; 217} 218 219/* 220 * Initialize type-stable parts of a ksegrp (when newly created). 221 */ 222static void 223ksegrp_init(void *mem, int size) 224{ 225 struct ksegrp *kg; 226 227 kg = (struct ksegrp *)mem; 228 kg->kg_sched = (struct kg_sched *)&kg[1]; 229} 230 231/* 232 * KSE is linked into kse group. 233 */ 234void 235kse_link(struct kse *ke, struct ksegrp *kg) 236{ 237 struct proc *p = kg->kg_proc; 238 239 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 240 kg->kg_kses++; 241 ke->ke_state = KES_UNQUEUED; 242 ke->ke_proc = p; 243 ke->ke_ksegrp = kg; 244 ke->ke_thread = NULL; 245 ke->ke_oncpu = NOCPU; 246 ke->ke_flags = 0; 247} 248 249void 250kse_unlink(struct kse *ke) 251{ 252 struct ksegrp *kg; 253 254 mtx_assert(&sched_lock, MA_OWNED); 255 kg = ke->ke_ksegrp; 256 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 257 if (ke->ke_state == KES_IDLE) { 258 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 259 kg->kg_idle_kses--; 260 } 261 if (--kg->kg_kses == 0) 262 ksegrp_unlink(kg); 263 /* 264 * Aggregate stats from the KSE 265 */ 266 kse_stash(ke); 267} 268 269void 270ksegrp_link(struct ksegrp *kg, struct proc *p) 271{ 272 273 TAILQ_INIT(&kg->kg_threads); 274 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 275 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 276 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 277 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 278 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 279 kg->kg_proc = p; 280 /* 281 * the following counters are in the -zero- section 282 * and may not need clearing 283 */ 284 kg->kg_numthreads = 0; 285 kg->kg_runnable = 0; 286 kg->kg_kses = 0; 287 kg->kg_runq_kses = 0; /* XXXKSE change name */ 288 kg->kg_idle_kses = 0; 289 kg->kg_numupcalls = 0; 290 /* link it in now that it's consistent */ 291 p->p_numksegrps++; 292 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 293} 294 295void 296ksegrp_unlink(struct ksegrp *kg) 297{ 298 struct proc *p; 299 300 mtx_assert(&sched_lock, MA_OWNED); 301 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 302 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 303 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 304 305 p = kg->kg_proc; 306 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 307 p->p_numksegrps--; 308 /* 309 * Aggregate stats from the KSE 310 */ 311 ksegrp_stash(kg); 312} 313 314struct kse_upcall * 315upcall_alloc(void) 316{ 317 struct kse_upcall *ku; 318 319 ku = uma_zalloc(upcall_zone, M_WAITOK); 320 bzero(ku, sizeof(*ku)); 321 return (ku); 322} 323 324void 325upcall_free(struct kse_upcall *ku) 326{ 327 328 uma_zfree(upcall_zone, ku); 329} 330 331void 332upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 333{ 334 335 mtx_assert(&sched_lock, MA_OWNED); 336 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 337 ku->ku_ksegrp = kg; 338 kg->kg_numupcalls++; 339} 340 341void 342upcall_unlink(struct kse_upcall *ku) 343{ 344 struct ksegrp *kg = ku->ku_ksegrp; 345 346 mtx_assert(&sched_lock, MA_OWNED); 347 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 348 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 349 kg->kg_numupcalls--; 350 upcall_stash(ku); 351} 352 353void 354upcall_remove(struct thread *td) 355{ 356 357 if (td->td_upcall) { 358 td->td_upcall->ku_owner = NULL; 359 upcall_unlink(td->td_upcall); 360 td->td_upcall = 0; 361 } 362} 363 364/* 365 * For a newly created process, 366 * link up all the structures and its initial threads etc. 367 */ 368void 369proc_linkup(struct proc *p, struct ksegrp *kg, 370 struct kse *ke, struct thread *td) 371{ 372 373 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 374 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 375 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 376 p->p_numksegrps = 0; 377 p->p_numthreads = 0; 378 379 ksegrp_link(kg, p); 380 kse_link(ke, kg); 381 thread_link(td, kg); 382} 383 384/* 385struct kse_thr_interrupt_args { 386 struct kse_thr_mailbox * tmbx; 387 int sig; 388}; 389*/ 390int 391kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 392{ 393 struct proc *p; 394 struct thread *td2; 395 int sig = uap->sig; 396 397 p = td->td_proc; 398 if (!(p->p_flag & P_SA) || (uap->tmbx == NULL) || 399 (sig < -2) || (sig > _SIG_MAXSIG)) 400 return (EINVAL); 401 402 PROC_LOCK(p); 403 mtx_lock_spin(&sched_lock); 404 FOREACH_THREAD_IN_PROC(p, td2) { 405 if (td2->td_mailbox == uap->tmbx) 406 break; 407 } 408 if (td2 == NULL) { 409 mtx_unlock_spin(&sched_lock); 410 PROC_UNLOCK(p); 411 return (ESRCH); 412 } 413 if (sig > 0) { 414 td2->td_flags &= ~TDF_INTERRUPT; 415 mtx_unlock_spin(&sched_lock); 416 tdsignal(td2, sig); 417 } else if (sig == 0) { 418 mtx_unlock_spin(&sched_lock); 419 } else { 420 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING; 421 if (TD_CAN_UNBIND(td2)) 422 td->td_upcall->ku_flags |= KUF_DOUPCALL; 423 if (sig == -1) 424 td2->td_intrval = EINTR; 425 else if (sig == -2) 426 td2->td_intrval = ERESTART; 427 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 428 if (td2->td_flags & TDF_CVWAITQ) 429 cv_abort(td2); 430 else 431 abortsleep(td2); 432 } 433 mtx_unlock_spin(&sched_lock); 434 } 435 PROC_UNLOCK(p); 436 return (0); 437} 438 439/* 440struct kse_exit_args { 441 register_t dummy; 442}; 443*/ 444int 445kse_exit(struct thread *td, struct kse_exit_args *uap) 446{ 447 struct proc *p; 448 struct ksegrp *kg; 449 struct kse *ke; 450 struct kse_upcall *ku, *ku2; 451 int error, count; 452 453 p = td->td_proc; 454 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 455 return (EINVAL); 456 kg = td->td_ksegrp; 457 count = 0; 458 PROC_LOCK(p); 459 mtx_lock_spin(&sched_lock); 460 FOREACH_UPCALL_IN_GROUP(kg, ku2) { 461 if (ku2->ku_flags & KUF_EXITING) 462 count++; 463 } 464 if ((kg->kg_numupcalls - count) == 1 && 465 (kg->kg_numthreads > 1)) { 466 mtx_unlock_spin(&sched_lock); 467 PROC_UNLOCK(p); 468 return (EDEADLK); 469 } 470 ku->ku_flags |= KUF_EXITING; 471 mtx_unlock_spin(&sched_lock); 472 PROC_UNLOCK(p); 473 error = suword(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE); 474 PROC_LOCK(p); 475 if (error) 476 psignal(p, SIGSEGV); 477 mtx_lock_spin(&sched_lock); 478 upcall_remove(td); 479 ke = td->td_kse; 480 if (p->p_numthreads == 1) { 481 kse_purge(p, td); 482 p->p_flag &= ~P_SA; 483 mtx_unlock_spin(&sched_lock); 484 PROC_UNLOCK(p); 485 } else { 486 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 487 kse_purge_group(td); 488 ke->ke_flags |= KEF_EXIT; 489 } 490 thread_stopped(p); 491 thread_exit(); 492 /* NOTREACHED */ 493 } 494 return (0); 495} 496 497/* 498 * Either becomes an upcall or waits for an awakening event and 499 * then becomes an upcall. Only error cases return. 500 */ 501/* 502struct kse_release_args { 503 struct timespec *timeout; 504}; 505*/ 506int 507kse_release(struct thread *td, struct kse_release_args *uap) 508{ 509 struct proc *p; 510 struct ksegrp *kg; 511 struct kse_upcall *ku; 512 struct timespec timeout; 513 struct timeval tv; 514 sigset_t sigset; 515 int error; 516 517 p = td->td_proc; 518 kg = td->td_ksegrp; 519 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) 520 return (EINVAL); 521 if (uap->timeout != NULL) { 522 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout)))) 523 return (error); 524 TIMESPEC_TO_TIMEVAL(&tv, &timeout); 525 } 526 if (td->td_flags & TDF_SA) 527 td->td_pflags |= TDP_UPCALLING; 528 else { 529 ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags); 530 if (ku->ku_mflags == -1) { 531 PROC_LOCK(p); 532 sigexit(td, SIGSEGV); 533 } 534 } 535 PROC_LOCK(p); 536 if (ku->ku_mflags & KMF_WAITSIGEVENT) { 537 /* UTS wants to wait for signal event */ 538 if (!(p->p_flag & P_SIGEVENT) && !(ku->ku_flags & KUF_DOUPCALL)) 539 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH, 540 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0)); 541 p->p_flag &= ~P_SIGEVENT; 542 sigset = p->p_siglist; 543 PROC_UNLOCK(p); 544 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught, 545 sizeof(sigset)); 546 } else { 547 if (! kg->kg_completed && !(ku->ku_flags & KUF_DOUPCALL)) { 548 kg->kg_upsleeps++; 549 error = msleep(&kg->kg_completed, &p->p_mtx, 550 PPAUSE|PCATCH, "kserel", 551 (uap->timeout ? tvtohz(&tv) : 0)); 552 kg->kg_upsleeps--; 553 } 554 PROC_UNLOCK(p); 555 } 556 if (ku->ku_flags & KUF_DOUPCALL) { 557 mtx_lock_spin(&sched_lock); 558 ku->ku_flags &= ~KUF_DOUPCALL; 559 mtx_unlock_spin(&sched_lock); 560 } 561 return (0); 562} 563 564/* struct kse_wakeup_args { 565 struct kse_mailbox *mbx; 566}; */ 567int 568kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 569{ 570 struct proc *p; 571 struct ksegrp *kg; 572 struct kse_upcall *ku; 573 struct thread *td2; 574 575 p = td->td_proc; 576 td2 = NULL; 577 ku = NULL; 578 /* KSE-enabled processes only, please. */ 579 if (!(p->p_flag & P_SA)) 580 return (EINVAL); 581 PROC_LOCK(p); 582 mtx_lock_spin(&sched_lock); 583 if (uap->mbx) { 584 FOREACH_KSEGRP_IN_PROC(p, kg) { 585 FOREACH_UPCALL_IN_GROUP(kg, ku) { 586 if (ku->ku_mailbox == uap->mbx) 587 break; 588 } 589 if (ku) 590 break; 591 } 592 } else { 593 kg = td->td_ksegrp; 594 if (kg->kg_upsleeps) { 595 wakeup_one(&kg->kg_completed); 596 mtx_unlock_spin(&sched_lock); 597 PROC_UNLOCK(p); 598 return (0); 599 } 600 ku = TAILQ_FIRST(&kg->kg_upcalls); 601 } 602 if (ku) { 603 if ((td2 = ku->ku_owner) == NULL) { 604 panic("%s: no owner", __func__); 605 } else if (TD_ON_SLEEPQ(td2) && 606 ((td2->td_wchan == &kg->kg_completed) || 607 (td2->td_wchan == &p->p_siglist && 608 (ku->ku_mflags & KMF_WAITSIGEVENT)))) { 609 abortsleep(td2); 610 } else { 611 ku->ku_flags |= KUF_DOUPCALL; 612 } 613 mtx_unlock_spin(&sched_lock); 614 PROC_UNLOCK(p); 615 return (0); 616 } 617 mtx_unlock_spin(&sched_lock); 618 PROC_UNLOCK(p); 619 return (ESRCH); 620} 621 622/* 623 * No new KSEG: first call: use current KSE, don't schedule an upcall 624 * All other situations, do allocate max new KSEs and schedule an upcall. 625 */ 626/* struct kse_create_args { 627 struct kse_mailbox *mbx; 628 int newgroup; 629}; */ 630int 631kse_create(struct thread *td, struct kse_create_args *uap) 632{ 633 struct kse *newke; 634 struct ksegrp *newkg; 635 struct ksegrp *kg; 636 struct proc *p; 637 struct kse_mailbox mbx; 638 struct kse_upcall *newku; 639 int err, ncpus, sa = 0, first = 0; 640 struct thread *newtd; 641 642 p = td->td_proc; 643 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 644 return (err); 645 646 /* Too bad, why hasn't kernel always a cpu counter !? */ 647#ifdef SMP 648 ncpus = mp_ncpus; 649#else 650 ncpus = 1; 651#endif 652 if (virtual_cpu != 0) 653 ncpus = virtual_cpu; 654 if (!(mbx.km_flags & KMF_BOUND)) 655 sa = TDF_SA; 656 else 657 ncpus = 1; 658 PROC_LOCK(p); 659 if (!(p->p_flag & P_SA)) { 660 first = 1; 661 p->p_flag |= P_SA; 662 } 663 PROC_UNLOCK(p); 664 if (!sa && !uap->newgroup && !first) 665 return (EINVAL); 666 kg = td->td_ksegrp; 667 if (uap->newgroup) { 668 /* Have race condition but it is cheap */ 669 if (p->p_numksegrps >= max_groups_per_proc) 670 return (EPROCLIM); 671 /* 672 * If we want a new KSEGRP it doesn't matter whether 673 * we have already fired up KSE mode before or not. 674 * We put the process in KSE mode and create a new KSEGRP. 675 */ 676 newkg = ksegrp_alloc(); 677 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 678 kg_startzero, kg_endzero)); 679 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 680 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 681 mtx_lock_spin(&sched_lock); 682 if (p->p_numksegrps >= max_groups_per_proc) { 683 mtx_unlock_spin(&sched_lock); 684 ksegrp_free(newkg); 685 return (EPROCLIM); 686 } 687 ksegrp_link(newkg, p); 688 mtx_unlock_spin(&sched_lock); 689 } else { 690 if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0) 691 return (EINVAL); 692 newkg = kg; 693 } 694 695 /* 696 * Creating upcalls more than number of physical cpu does 697 * not help performance. 698 */ 699 if (newkg->kg_numupcalls >= ncpus) 700 return (EPROCLIM); 701 702 if (newkg->kg_numupcalls == 0) { 703 /* 704 * Initialize KSE group 705 * 706 * For multiplxed group, create KSEs as many as physical 707 * cpus. This increases concurrent even if userland 708 * is not MP safe and can only run on single CPU. 709 * In ideal world, every physical cpu should execute a thread. 710 * If there is enough KSEs, threads in kernel can be 711 * executed parallel on different cpus with full speed, 712 * Concurrent in kernel shouldn't be restricted by number of 713 * upcalls userland provides. Adding more upcall structures 714 * only increases concurrent in userland. 715 * 716 * For bound thread group, because there is only thread in the 717 * group, we only create one KSE for the group. Thread in this 718 * kind of group will never schedule an upcall when blocked, 719 * this intends to simulate pthread system scope thread. 720 */ 721 while (newkg->kg_kses < ncpus) { 722 newke = kse_alloc(); 723 bzero(&newke->ke_startzero, RANGEOF(struct kse, 724 ke_startzero, ke_endzero)); 725#if 0 726 mtx_lock_spin(&sched_lock); 727 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 728 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 729 mtx_unlock_spin(&sched_lock); 730#endif 731 mtx_lock_spin(&sched_lock); 732 kse_link(newke, newkg); 733 /* Add engine */ 734 kse_reassign(newke); 735 mtx_unlock_spin(&sched_lock); 736 } 737 } 738 newku = upcall_alloc(); 739 newku->ku_mailbox = uap->mbx; 740 newku->ku_func = mbx.km_func; 741 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 742 743 /* For the first call this may not have been set */ 744 if (td->td_standin == NULL) 745 thread_alloc_spare(td, NULL); 746 747 PROC_LOCK(p); 748 if (newkg->kg_numupcalls >= ncpus) { 749 PROC_UNLOCK(p); 750 upcall_free(newku); 751 return (EPROCLIM); 752 } 753 if (first) { 754 SIGSETOR(p->p_siglist, td->td_siglist); 755 SIGEMPTYSET(td->td_siglist); 756 SIGFILLSET(td->td_sigmask); 757 SIG_CANTMASK(td->td_sigmask); 758 } 759 mtx_lock_spin(&sched_lock); 760 PROC_UNLOCK(p); 761 upcall_link(newku, newkg); 762 if (mbx.km_quantum) 763 newkg->kg_upquantum = max(1, mbx.km_quantum/tick); 764 765 /* 766 * Each upcall structure has an owner thread, find which 767 * one owns it. 768 */ 769 if (uap->newgroup) { 770 /* 771 * Because new ksegrp hasn't thread, 772 * create an initial upcall thread to own it. 773 */ 774 newtd = thread_schedule_upcall(td, newku); 775 } else { 776 /* 777 * If current thread hasn't an upcall structure, 778 * just assign the upcall to it. 779 */ 780 if (td->td_upcall == NULL) { 781 newku->ku_owner = td; 782 td->td_upcall = newku; 783 newtd = td; 784 } else { 785 /* 786 * Create a new upcall thread to own it. 787 */ 788 newtd = thread_schedule_upcall(td, newku); 789 } 790 } 791 if (!sa) { 792 newtd->td_mailbox = mbx.km_curthread; 793 newtd->td_flags &= ~TDF_SA; 794 if (newtd != td) { 795 mtx_unlock_spin(&sched_lock); 796 cpu_set_upcall_kse(newtd, newku); 797 mtx_lock_spin(&sched_lock); 798 } 799 } else { 800 newtd->td_flags |= TDF_SA; 801 } 802 if (newtd != td) 803 setrunqueue(newtd); 804 mtx_unlock_spin(&sched_lock); 805 return (0); 806} 807 808/* 809 * Initialize global thread allocation resources. 810 */ 811void 812threadinit(void) 813{ 814 815 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 816 thread_ctor, thread_dtor, thread_init, thread_fini, 817 UMA_ALIGN_CACHE, 0); 818 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 819 NULL, NULL, ksegrp_init, NULL, 820 UMA_ALIGN_CACHE, 0); 821 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 822 NULL, NULL, kse_init, NULL, 823 UMA_ALIGN_CACHE, 0); 824 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 825 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 826} 827 828/* 829 * Stash an embarasingly extra thread into the zombie thread queue. 830 */ 831void 832thread_stash(struct thread *td) 833{ 834 mtx_lock_spin(&kse_zombie_lock); 835 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 836 mtx_unlock_spin(&kse_zombie_lock); 837} 838 839/* 840 * Stash an embarasingly extra kse into the zombie kse queue. 841 */ 842void 843kse_stash(struct kse *ke) 844{ 845 mtx_lock_spin(&kse_zombie_lock); 846 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 847 mtx_unlock_spin(&kse_zombie_lock); 848} 849 850/* 851 * Stash an embarasingly extra upcall into the zombie upcall queue. 852 */ 853 854void 855upcall_stash(struct kse_upcall *ku) 856{ 857 mtx_lock_spin(&kse_zombie_lock); 858 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 859 mtx_unlock_spin(&kse_zombie_lock); 860} 861 862/* 863 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 864 */ 865void 866ksegrp_stash(struct ksegrp *kg) 867{ 868 mtx_lock_spin(&kse_zombie_lock); 869 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 870 mtx_unlock_spin(&kse_zombie_lock); 871} 872 873/* 874 * Reap zombie kse resource. 875 */ 876void 877thread_reap(void) 878{ 879 struct thread *td_first, *td_next; 880 struct kse *ke_first, *ke_next; 881 struct ksegrp *kg_first, * kg_next; 882 struct kse_upcall *ku_first, *ku_next; 883 884 /* 885 * Don't even bother to lock if none at this instant, 886 * we really don't care about the next instant.. 887 */ 888 if ((!TAILQ_EMPTY(&zombie_threads)) 889 || (!TAILQ_EMPTY(&zombie_kses)) 890 || (!TAILQ_EMPTY(&zombie_ksegrps)) 891 || (!TAILQ_EMPTY(&zombie_upcalls))) { 892 mtx_lock_spin(&kse_zombie_lock); 893 td_first = TAILQ_FIRST(&zombie_threads); 894 ke_first = TAILQ_FIRST(&zombie_kses); 895 kg_first = TAILQ_FIRST(&zombie_ksegrps); 896 ku_first = TAILQ_FIRST(&zombie_upcalls); 897 if (td_first) 898 TAILQ_INIT(&zombie_threads); 899 if (ke_first) 900 TAILQ_INIT(&zombie_kses); 901 if (kg_first) 902 TAILQ_INIT(&zombie_ksegrps); 903 if (ku_first) 904 TAILQ_INIT(&zombie_upcalls); 905 mtx_unlock_spin(&kse_zombie_lock); 906 while (td_first) { 907 td_next = TAILQ_NEXT(td_first, td_runq); 908 if (td_first->td_ucred) 909 crfree(td_first->td_ucred); 910 thread_free(td_first); 911 td_first = td_next; 912 } 913 while (ke_first) { 914 ke_next = TAILQ_NEXT(ke_first, ke_procq); 915 kse_free(ke_first); 916 ke_first = ke_next; 917 } 918 while (kg_first) { 919 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 920 ksegrp_free(kg_first); 921 kg_first = kg_next; 922 } 923 while (ku_first) { 924 ku_next = TAILQ_NEXT(ku_first, ku_link); 925 upcall_free(ku_first); 926 ku_first = ku_next; 927 } 928 } 929} 930 931/* 932 * Allocate a ksegrp. 933 */ 934struct ksegrp * 935ksegrp_alloc(void) 936{ 937 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 938} 939 940/* 941 * Allocate a kse. 942 */ 943struct kse * 944kse_alloc(void) 945{ 946 return (uma_zalloc(kse_zone, M_WAITOK)); 947} 948 949/* 950 * Allocate a thread. 951 */ 952struct thread * 953thread_alloc(void) 954{ 955 thread_reap(); /* check if any zombies to get */ 956 return (uma_zalloc(thread_zone, M_WAITOK)); 957} 958 959/* 960 * Deallocate a ksegrp. 961 */ 962void 963ksegrp_free(struct ksegrp *td) 964{ 965 uma_zfree(ksegrp_zone, td); 966} 967 968/* 969 * Deallocate a kse. 970 */ 971void 972kse_free(struct kse *td) 973{ 974 uma_zfree(kse_zone, td); 975} 976 977/* 978 * Deallocate a thread. 979 */ 980void 981thread_free(struct thread *td) 982{ 983 984 cpu_thread_clean(td); 985 uma_zfree(thread_zone, td); 986} 987 988/* 989 * Store the thread context in the UTS's mailbox. 990 * then add the mailbox at the head of a list we are building in user space. 991 * The list is anchored in the ksegrp structure. 992 */ 993int 994thread_export_context(struct thread *td) 995{ 996 struct proc *p; 997 struct ksegrp *kg; 998 uintptr_t mbx; 999 void *addr; 1000 int error = 0, temp, sig; 1001 mcontext_t mc; 1002 1003 p = td->td_proc; 1004 kg = td->td_ksegrp; 1005 1006 /* Export the user/machine context. */ 1007 get_mcontext(td, &mc, 0); 1008 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext); 1009 error = copyout(&mc, addr, sizeof(mcontext_t)); 1010 if (error) 1011 goto bad; 1012 1013 /* Exports clock ticks in kernel mode */ 1014 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 1015 temp = fuword(addr) + td->td_usticks; 1016 if (suword(addr, temp)) { 1017 error = EFAULT; 1018 goto bad; 1019 } 1020 1021 /* 1022 * Post sync signal, or process SIGKILL and SIGSTOP. 1023 * For sync signal, it is only possible when the signal is not 1024 * caught by userland or process is being debugged. 1025 */ 1026 if (td->td_flags & TDF_NEEDSIGCHK) { 1027 mtx_lock_spin(&sched_lock); 1028 td->td_flags &= ~TDF_NEEDSIGCHK; 1029 mtx_unlock_spin(&sched_lock); 1030 PROC_LOCK(p); 1031 mtx_lock(&p->p_sigacts->ps_mtx); 1032 while ((sig = cursig(td)) != 0) 1033 postsig(sig); 1034 mtx_unlock(&p->p_sigacts->ps_mtx); 1035 PROC_UNLOCK(p); 1036 } 1037 1038 /* Get address in latest mbox of list pointer */ 1039 addr = (void *)(&td->td_mailbox->tm_next); 1040 /* 1041 * Put the saved address of the previous first 1042 * entry into this one 1043 */ 1044 for (;;) { 1045 mbx = (uintptr_t)kg->kg_completed; 1046 if (suword(addr, mbx)) { 1047 error = EFAULT; 1048 goto bad; 1049 } 1050 PROC_LOCK(p); 1051 if (mbx == (uintptr_t)kg->kg_completed) { 1052 kg->kg_completed = td->td_mailbox; 1053 /* 1054 * The thread context may be taken away by 1055 * other upcall threads when we unlock 1056 * process lock. it's no longer valid to 1057 * use it again in any other places. 1058 */ 1059 td->td_mailbox = NULL; 1060 PROC_UNLOCK(p); 1061 break; 1062 } 1063 PROC_UNLOCK(p); 1064 } 1065 td->td_usticks = 0; 1066 return (0); 1067 1068bad: 1069 PROC_LOCK(p); 1070 psignal(p, SIGSEGV); 1071 PROC_UNLOCK(p); 1072 /* The mailbox is bad, don't use it */ 1073 td->td_mailbox = NULL; 1074 td->td_usticks = 0; 1075 return (error); 1076} 1077 1078/* 1079 * Take the list of completed mailboxes for this KSEGRP and put them on this 1080 * upcall's mailbox as it's the next one going up. 1081 */ 1082static int 1083thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1084{ 1085 struct proc *p = kg->kg_proc; 1086 void *addr; 1087 uintptr_t mbx; 1088 1089 addr = (void *)(&ku->ku_mailbox->km_completed); 1090 for (;;) { 1091 mbx = (uintptr_t)kg->kg_completed; 1092 if (suword(addr, mbx)) { 1093 PROC_LOCK(p); 1094 psignal(p, SIGSEGV); 1095 PROC_UNLOCK(p); 1096 return (EFAULT); 1097 } 1098 PROC_LOCK(p); 1099 if (mbx == (uintptr_t)kg->kg_completed) { 1100 kg->kg_completed = NULL; 1101 PROC_UNLOCK(p); 1102 break; 1103 } 1104 PROC_UNLOCK(p); 1105 } 1106 return (0); 1107} 1108 1109/* 1110 * This function should be called at statclock interrupt time 1111 */ 1112int 1113thread_statclock(int user) 1114{ 1115 struct thread *td = curthread; 1116 struct ksegrp *kg = td->td_ksegrp; 1117 1118 if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA)) 1119 return (0); 1120 if (user) { 1121 /* Current always do via ast() */ 1122 mtx_lock_spin(&sched_lock); 1123 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1124 mtx_unlock_spin(&sched_lock); 1125 td->td_uuticks++; 1126 } else { 1127 if (td->td_mailbox != NULL) 1128 td->td_usticks++; 1129 else { 1130 /* XXXKSE 1131 * We will call thread_user_enter() for every 1132 * kernel entry in future, so if the thread mailbox 1133 * is NULL, it must be a UTS kernel, don't account 1134 * clock ticks for it. 1135 */ 1136 } 1137 } 1138 return (0); 1139} 1140 1141/* 1142 * Export state clock ticks for userland 1143 */ 1144static int 1145thread_update_usr_ticks(struct thread *td, int user) 1146{ 1147 struct proc *p = td->td_proc; 1148 struct kse_thr_mailbox *tmbx; 1149 struct kse_upcall *ku; 1150 struct ksegrp *kg; 1151 caddr_t addr; 1152 uint uticks; 1153 1154 if ((ku = td->td_upcall) == NULL) 1155 return (-1); 1156 1157 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1158 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1159 return (-1); 1160 if (user) { 1161 uticks = td->td_uuticks; 1162 td->td_uuticks = 0; 1163 addr = (caddr_t)&tmbx->tm_uticks; 1164 } else { 1165 uticks = td->td_usticks; 1166 td->td_usticks = 0; 1167 addr = (caddr_t)&tmbx->tm_sticks; 1168 } 1169 if (uticks) { 1170 if (suword(addr, uticks+fuword(addr))) { 1171 PROC_LOCK(p); 1172 psignal(p, SIGSEGV); 1173 PROC_UNLOCK(p); 1174 return (-2); 1175 } 1176 } 1177 kg = td->td_ksegrp; 1178 if (kg->kg_upquantum && ticks >= kg->kg_nextupcall) { 1179 mtx_lock_spin(&sched_lock); 1180 td->td_upcall->ku_flags |= KUF_DOUPCALL; 1181 mtx_unlock_spin(&sched_lock); 1182 } 1183 return (0); 1184} 1185 1186/* 1187 * Discard the current thread and exit from its context. 1188 * 1189 * Because we can't free a thread while we're operating under its context, 1190 * push the current thread into our CPU's deadthread holder. This means 1191 * we needn't worry about someone else grabbing our context before we 1192 * do a cpu_throw(). 1193 */ 1194void 1195thread_exit(void) 1196{ 1197 struct thread *td; 1198 struct kse *ke; 1199 struct proc *p; 1200 struct ksegrp *kg; 1201 1202 td = curthread; 1203 kg = td->td_ksegrp; 1204 p = td->td_proc; 1205 ke = td->td_kse; 1206 1207 mtx_assert(&sched_lock, MA_OWNED); 1208 KASSERT(p != NULL, ("thread exiting without a process")); 1209 KASSERT(ke != NULL, ("thread exiting without a kse")); 1210 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1211 PROC_LOCK_ASSERT(p, MA_OWNED); 1212 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1213 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1214 1215 if (td->td_standin != NULL) { 1216 thread_stash(td->td_standin); 1217 td->td_standin = NULL; 1218 } 1219 1220 cpu_thread_exit(td); /* XXXSMP */ 1221 1222 /* 1223 * The last thread is left attached to the process 1224 * So that the whole bundle gets recycled. Skip 1225 * all this stuff. 1226 */ 1227 if (p->p_numthreads > 1) { 1228 thread_unlink(td); 1229 if (p->p_maxthrwaits) 1230 wakeup(&p->p_numthreads); 1231 /* 1232 * The test below is NOT true if we are the 1233 * sole exiting thread. P_STOPPED_SNGL is unset 1234 * in exit1() after it is the only survivor. 1235 */ 1236 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1237 if (p->p_numthreads == p->p_suspcount) { 1238 thread_unsuspend_one(p->p_singlethread); 1239 } 1240 } 1241 1242 /* 1243 * Because each upcall structure has an owner thread, 1244 * owner thread exits only when process is in exiting 1245 * state, so upcall to userland is no longer needed, 1246 * deleting upcall structure is safe here. 1247 * So when all threads in a group is exited, all upcalls 1248 * in the group should be automatically freed. 1249 */ 1250 if (td->td_upcall) 1251 upcall_remove(td); 1252 1253 ke->ke_state = KES_UNQUEUED; 1254 ke->ke_thread = NULL; 1255 /* 1256 * Decide what to do with the KSE attached to this thread. 1257 */ 1258 if (ke->ke_flags & KEF_EXIT) 1259 kse_unlink(ke); 1260 else 1261 kse_reassign(ke); 1262 PROC_UNLOCK(p); 1263 td->td_kse = NULL; 1264 td->td_state = TDS_INACTIVE; 1265#if 0 1266 td->td_proc = NULL; 1267#endif 1268 td->td_ksegrp = NULL; 1269 td->td_last_kse = NULL; 1270 PCPU_SET(deadthread, td); 1271 } else { 1272 PROC_UNLOCK(p); 1273 } 1274 /* XXX Shouldn't cpu_throw() here. */ 1275 mtx_assert(&sched_lock, MA_OWNED); 1276#if !defined(__alpha__) && !defined(__powerpc__) 1277 cpu_throw(td, choosethread()); 1278#else 1279 cpu_throw(); 1280#endif 1281 panic("I'm a teapot!"); 1282 /* NOTREACHED */ 1283} 1284 1285/* 1286 * Do any thread specific cleanups that may be needed in wait() 1287 * called with Giant held, proc and schedlock not held. 1288 */ 1289void 1290thread_wait(struct proc *p) 1291{ 1292 struct thread *td; 1293 1294 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1295 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1296 FOREACH_THREAD_IN_PROC(p, td) { 1297 if (td->td_standin != NULL) { 1298 thread_free(td->td_standin); 1299 td->td_standin = NULL; 1300 } 1301 cpu_thread_clean(td); 1302 } 1303 thread_reap(); /* check for zombie threads etc. */ 1304} 1305 1306/* 1307 * Link a thread to a process. 1308 * set up anything that needs to be initialized for it to 1309 * be used by the process. 1310 * 1311 * Note that we do not link to the proc's ucred here. 1312 * The thread is linked as if running but no KSE assigned. 1313 */ 1314void 1315thread_link(struct thread *td, struct ksegrp *kg) 1316{ 1317 struct proc *p; 1318 1319 p = kg->kg_proc; 1320 td->td_state = TDS_INACTIVE; 1321 td->td_proc = p; 1322 td->td_ksegrp = kg; 1323 td->td_last_kse = NULL; 1324 td->td_flags = 0; 1325 td->td_kse = NULL; 1326 1327 LIST_INIT(&td->td_contested); 1328 callout_init(&td->td_slpcallout, 1); 1329 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1330 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1331 p->p_numthreads++; 1332 kg->kg_numthreads++; 1333} 1334 1335void 1336thread_unlink(struct thread *td) 1337{ 1338 struct proc *p = td->td_proc; 1339 struct ksegrp *kg = td->td_ksegrp; 1340 1341 mtx_assert(&sched_lock, MA_OWNED); 1342 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1343 p->p_numthreads--; 1344 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1345 kg->kg_numthreads--; 1346 /* could clear a few other things here */ 1347} 1348 1349/* 1350 * Purge a ksegrp resource. When a ksegrp is preparing to 1351 * exit, it calls this function. 1352 */ 1353static void 1354kse_purge_group(struct thread *td) 1355{ 1356 struct ksegrp *kg; 1357 struct kse *ke; 1358 1359 kg = td->td_ksegrp; 1360 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1361 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1362 KASSERT(ke->ke_state == KES_IDLE, 1363 ("%s: wrong idle KSE state", __func__)); 1364 kse_unlink(ke); 1365 } 1366 KASSERT((kg->kg_kses == 1), 1367 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1368 KASSERT((kg->kg_numupcalls == 0), 1369 ("%s: ksegrp still has %d upcall datas", 1370 __func__, kg->kg_numupcalls)); 1371} 1372 1373/* 1374 * Purge a process's KSE resource. When a process is preparing to 1375 * exit, it calls kse_purge to release any extra KSE resources in 1376 * the process. 1377 */ 1378static void 1379kse_purge(struct proc *p, struct thread *td) 1380{ 1381 struct ksegrp *kg; 1382 struct kse *ke; 1383 1384 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1385 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1386 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1387 p->p_numksegrps--; 1388 /* 1389 * There is no ownership for KSE, after all threads 1390 * in the group exited, it is possible that some KSEs 1391 * were left in idle queue, gc them now. 1392 */ 1393 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1394 KASSERT(ke->ke_state == KES_IDLE, 1395 ("%s: wrong idle KSE state", __func__)); 1396 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1397 kg->kg_idle_kses--; 1398 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1399 kg->kg_kses--; 1400 kse_stash(ke); 1401 } 1402 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1403 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1404 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1405 KASSERT((kg->kg_numupcalls == 0), 1406 ("%s: ksegrp still has %d upcall datas", 1407 __func__, kg->kg_numupcalls)); 1408 1409 if (kg != td->td_ksegrp) 1410 ksegrp_stash(kg); 1411 } 1412 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1413 p->p_numksegrps++; 1414} 1415 1416/* 1417 * This function is intended to be used to initialize a spare thread 1418 * for upcall. Initialize thread's large data area outside sched_lock 1419 * for thread_schedule_upcall(). 1420 */ 1421void 1422thread_alloc_spare(struct thread *td, struct thread *spare) 1423{ 1424 if (td->td_standin) 1425 return; 1426 if (spare == NULL) 1427 spare = thread_alloc(); 1428 td->td_standin = spare; 1429 bzero(&spare->td_startzero, 1430 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1431 spare->td_proc = td->td_proc; 1432 spare->td_ucred = crhold(td->td_ucred); 1433} 1434 1435/* 1436 * Create a thread and schedule it for upcall on the KSE given. 1437 * Use our thread's standin so that we don't have to allocate one. 1438 */ 1439struct thread * 1440thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1441{ 1442 struct thread *td2; 1443 1444 mtx_assert(&sched_lock, MA_OWNED); 1445 1446 /* 1447 * Schedule an upcall thread on specified kse_upcall, 1448 * the kse_upcall must be free. 1449 * td must have a spare thread. 1450 */ 1451 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1452 if ((td2 = td->td_standin) != NULL) { 1453 td->td_standin = NULL; 1454 } else { 1455 panic("no reserve thread when scheduling an upcall"); 1456 return (NULL); 1457 } 1458 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1459 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1460 bcopy(&td->td_startcopy, &td2->td_startcopy, 1461 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1462 thread_link(td2, ku->ku_ksegrp); 1463 /* inherit blocked thread's context */ 1464 cpu_set_upcall(td2, td); 1465 /* Let the new thread become owner of the upcall */ 1466 ku->ku_owner = td2; 1467 td2->td_upcall = ku; 1468 td2->td_flags = TDF_SA; 1469 td2->td_pflags = TDP_UPCALLING; 1470 td2->td_kse = NULL; 1471 td2->td_state = TDS_CAN_RUN; 1472 td2->td_inhibitors = 0; 1473 SIGFILLSET(td2->td_sigmask); 1474 SIG_CANTMASK(td2->td_sigmask); 1475 return (td2); /* bogus.. should be a void function */ 1476} 1477 1478/* 1479 * It is only used when thread generated a trap and process is being 1480 * debugged. 1481 */ 1482void 1483thread_signal_add(struct thread *td, int sig) 1484{ 1485 struct proc *p; 1486 siginfo_t siginfo; 1487 struct sigacts *ps; 1488 int error; 1489 1490 p = td->td_proc; 1491 PROC_LOCK_ASSERT(p, MA_OWNED); 1492 ps = p->p_sigacts; 1493 mtx_assert(&ps->ps_mtx, MA_OWNED); 1494 1495 thread_siginfo(sig, 0, &siginfo); 1496 mtx_unlock(&ps->ps_mtx); 1497 PROC_UNLOCK(p); 1498 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo)); 1499 if (error) { 1500 PROC_LOCK(p); 1501 sigexit(td, SIGILL); 1502 } 1503 PROC_LOCK(p); 1504 SIGADDSET(td->td_sigmask, sig); 1505 mtx_lock(&ps->ps_mtx); 1506} 1507 1508void 1509thread_switchout(struct thread *td) 1510{ 1511 struct kse_upcall *ku; 1512 struct thread *td2; 1513 1514 mtx_assert(&sched_lock, MA_OWNED); 1515 1516 /* 1517 * If the outgoing thread is in threaded group and has never 1518 * scheduled an upcall, decide whether this is a short 1519 * or long term event and thus whether or not to schedule 1520 * an upcall. 1521 * If it is a short term event, just suspend it in 1522 * a way that takes its KSE with it. 1523 * Select the events for which we want to schedule upcalls. 1524 * For now it's just sleep. 1525 * XXXKSE eventually almost any inhibition could do. 1526 */ 1527 if (TD_CAN_UNBIND(td) && (td->td_standin) && TD_ON_SLEEPQ(td)) { 1528 /* 1529 * Release ownership of upcall, and schedule an upcall 1530 * thread, this new upcall thread becomes the owner of 1531 * the upcall structure. 1532 */ 1533 ku = td->td_upcall; 1534 ku->ku_owner = NULL; 1535 td->td_upcall = NULL; 1536 td->td_flags &= ~TDF_CAN_UNBIND; 1537 td2 = thread_schedule_upcall(td, ku); 1538 setrunqueue(td2); 1539 } 1540} 1541 1542/* 1543 * Setup done on the thread when it enters the kernel. 1544 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1545 */ 1546void 1547thread_user_enter(struct proc *p, struct thread *td) 1548{ 1549 struct ksegrp *kg; 1550 struct kse_upcall *ku; 1551 struct kse_thr_mailbox *tmbx; 1552 1553 kg = td->td_ksegrp; 1554 1555 /* 1556 * First check that we shouldn't just abort. 1557 * But check if we are the single thread first! 1558 */ 1559 if (p->p_flag & P_SINGLE_EXIT) { 1560 PROC_LOCK(p); 1561 mtx_lock_spin(&sched_lock); 1562 thread_stopped(p); 1563 thread_exit(); 1564 /* NOTREACHED */ 1565 } 1566 1567 /* 1568 * If we are doing a syscall in a KSE environment, 1569 * note where our mailbox is. There is always the 1570 * possibility that we could do this lazily (in kse_reassign()), 1571 * but for now do it every time. 1572 */ 1573 kg = td->td_ksegrp; 1574 if (td->td_flags & TDF_SA) { 1575 ku = td->td_upcall; 1576 KASSERT(ku, ("%s: no upcall owned", __func__)); 1577 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1578 KASSERT(!TD_CAN_UNBIND(td), ("%s: can unbind", __func__)); 1579 ku->ku_mflags = fuword((void *)&ku->ku_mailbox->km_flags); 1580 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1581 if ((tmbx == NULL) || (tmbx == (void *)-1)) { 1582 td->td_mailbox = NULL; 1583 } else { 1584 td->td_mailbox = tmbx; 1585 if (td->td_standin == NULL) 1586 thread_alloc_spare(td, NULL); 1587 mtx_lock_spin(&sched_lock); 1588 if (ku->ku_mflags & KMF_NOUPCALL) 1589 td->td_flags &= ~TDF_CAN_UNBIND; 1590 else 1591 td->td_flags |= TDF_CAN_UNBIND; 1592 mtx_unlock_spin(&sched_lock); 1593 } 1594 } 1595} 1596 1597/* 1598 * The extra work we go through if we are a threaded process when we 1599 * return to userland. 1600 * 1601 * If we are a KSE process and returning to user mode, check for 1602 * extra work to do before we return (e.g. for more syscalls 1603 * to complete first). If we were in a critical section, we should 1604 * just return to let it finish. Same if we were in the UTS (in 1605 * which case the mailbox's context's busy indicator will be set). 1606 * The only traps we suport will have set the mailbox. 1607 * We will clear it here. 1608 */ 1609int 1610thread_userret(struct thread *td, struct trapframe *frame) 1611{ 1612 int error = 0, upcalls, uts_crit; 1613 struct kse_upcall *ku; 1614 struct ksegrp *kg, *kg2; 1615 struct proc *p; 1616 struct timespec ts; 1617 1618 p = td->td_proc; 1619 kg = td->td_ksegrp; 1620 ku = td->td_upcall; 1621 1622 /* Nothing to do with bound thread */ 1623 if (!(td->td_flags & TDF_SA)) 1624 return (0); 1625 1626 /* 1627 * Stat clock interrupt hit in userland, it 1628 * is returning from interrupt, charge thread's 1629 * userland time for UTS. 1630 */ 1631 if (td->td_flags & TDF_USTATCLOCK) { 1632 thread_update_usr_ticks(td, 1); 1633 mtx_lock_spin(&sched_lock); 1634 td->td_flags &= ~TDF_USTATCLOCK; 1635 mtx_unlock_spin(&sched_lock); 1636 if (kg->kg_completed || 1637 (td->td_upcall->ku_flags & KUF_DOUPCALL)) 1638 thread_user_enter(p, td); 1639 } 1640 1641 uts_crit = (td->td_mailbox == NULL); 1642 /* 1643 * Optimisation: 1644 * This thread has not started any upcall. 1645 * If there is no work to report other than ourself, 1646 * then it can return direct to userland. 1647 */ 1648 if (TD_CAN_UNBIND(td)) { 1649 mtx_lock_spin(&sched_lock); 1650 td->td_flags &= ~TDF_CAN_UNBIND; 1651 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 && 1652 (kg->kg_completed == NULL) && 1653 (ku->ku_flags & KUF_DOUPCALL) == 0 && 1654 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) { 1655 mtx_unlock_spin(&sched_lock); 1656 thread_update_usr_ticks(td, 0); 1657 nanotime(&ts); 1658 error = copyout(&ts, 1659 (caddr_t)&ku->ku_mailbox->km_timeofday, 1660 sizeof(ts)); 1661 td->td_mailbox = 0; 1662 ku->ku_mflags = 0; 1663 if (error) 1664 goto out; 1665 return (0); 1666 } 1667 mtx_unlock_spin(&sched_lock); 1668 error = thread_export_context(td); 1669 if (error) { 1670 /* 1671 * Failing to do the KSE operation just defaults 1672 * back to synchonous operation, so just return from 1673 * the syscall. 1674 */ 1675 goto out; 1676 } 1677 /* 1678 * There is something to report, and we own an upcall 1679 * strucuture, we can go to userland. 1680 * Turn ourself into an upcall thread. 1681 */ 1682 td->td_pflags |= TDP_UPCALLING; 1683 } else if (td->td_mailbox && (ku == NULL)) { 1684 /* 1685 * Because we are exiting, SIGKILL and SIGSTOP shouldn't 1686 * be posted to us anymore, otherwise they will be lost. 1687 */ 1688 mtx_lock_spin(&sched_lock); 1689 td->td_flags |= TDF_NOSIGPOST; 1690 mtx_unlock_spin(&sched_lock); 1691 error = thread_export_context(td); 1692 /* possibly upcall with error? */ 1693 PROC_LOCK(p); 1694 /* 1695 * There are upcall threads waiting for 1696 * work to do, wake one of them up. 1697 * XXXKSE Maybe wake all of them up. 1698 */ 1699 if (!error && kg->kg_upsleeps) 1700 wakeup_one(&kg->kg_completed); 1701 mtx_lock_spin(&sched_lock); 1702 thread_stopped(p); 1703 thread_exit(); 1704 /* NOTREACHED */ 1705 } 1706 1707 KASSERT(ku != NULL, ("upcall is NULL\n")); 1708 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind")); 1709 1710 if (p->p_numthreads > max_threads_per_proc) { 1711 max_threads_hits++; 1712 PROC_LOCK(p); 1713 mtx_lock_spin(&sched_lock); 1714 p->p_maxthrwaits++; 1715 while (p->p_numthreads > max_threads_per_proc) { 1716 upcalls = 0; 1717 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1718 if (kg2->kg_numupcalls == 0) 1719 upcalls++; 1720 else 1721 upcalls += kg2->kg_numupcalls; 1722 } 1723 if (upcalls >= max_threads_per_proc) 1724 break; 1725 mtx_unlock_spin(&sched_lock); 1726 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1727 "maxthreads", NULL)) { 1728 mtx_lock_spin(&sched_lock); 1729 break; 1730 } else { 1731 mtx_lock_spin(&sched_lock); 1732 } 1733 } 1734 p->p_maxthrwaits--; 1735 mtx_unlock_spin(&sched_lock); 1736 PROC_UNLOCK(p); 1737 } 1738 1739 if (td->td_pflags & TDP_UPCALLING) { 1740 uts_crit = 0; 1741 kg->kg_nextupcall = ticks+kg->kg_upquantum; 1742 /* 1743 * There is no more work to do and we are going to ride 1744 * this thread up to userland as an upcall. 1745 * Do the last parts of the setup needed for the upcall. 1746 */ 1747 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1748 td, td->td_proc->p_pid, td->td_proc->p_comm); 1749 1750 td->td_pflags &= ~TDP_UPCALLING; 1751 if (ku->ku_flags & KUF_DOUPCALL) { 1752 mtx_lock_spin(&sched_lock); 1753 ku->ku_flags &= ~KUF_DOUPCALL; 1754 mtx_unlock_spin(&sched_lock); 1755 } 1756 /* 1757 * Set user context to the UTS 1758 */ 1759 if (!(ku->ku_mflags & KMF_NOUPCALL)) { 1760 cpu_set_upcall_kse(td, ku); 1761 error = suword(&ku->ku_mailbox->km_curthread, 0); 1762 if (error) 1763 goto out; 1764 } 1765 1766 /* 1767 * Unhook the list of completed threads. 1768 * anything that completes after this gets to 1769 * come in next time. 1770 * Put the list of completed thread mailboxes on 1771 * this KSE's mailbox. 1772 */ 1773 if (!(ku->ku_mflags & KMF_NOCOMPLETED) && 1774 (error = thread_link_mboxes(kg, ku)) != 0) 1775 goto out; 1776 } 1777 if (!uts_crit) { 1778 nanotime(&ts); 1779 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts)); 1780 } 1781 1782out: 1783 if (error) { 1784 /* 1785 * Things are going to be so screwed we should just kill 1786 * the process. 1787 * how do we do that? 1788 */ 1789 PROC_LOCK(td->td_proc); 1790 psignal(td->td_proc, SIGSEGV); 1791 PROC_UNLOCK(td->td_proc); 1792 } else { 1793 /* 1794 * Optimisation: 1795 * Ensure that we have a spare thread available, 1796 * for when we re-enter the kernel. 1797 */ 1798 if (td->td_standin == NULL) 1799 thread_alloc_spare(td, NULL); 1800 } 1801 1802 ku->ku_mflags = 0; 1803 /* 1804 * Clear thread mailbox first, then clear system tick count. 1805 * The order is important because thread_statclock() use 1806 * mailbox pointer to see if it is an userland thread or 1807 * an UTS kernel thread. 1808 */ 1809 td->td_mailbox = NULL; 1810 td->td_usticks = 0; 1811 return (error); /* go sync */ 1812} 1813 1814/* 1815 * Enforce single-threading. 1816 * 1817 * Returns 1 if the caller must abort (another thread is waiting to 1818 * exit the process or similar). Process is locked! 1819 * Returns 0 when you are successfully the only thread running. 1820 * A process has successfully single threaded in the suspend mode when 1821 * There are no threads in user mode. Threads in the kernel must be 1822 * allowed to continue until they get to the user boundary. They may even 1823 * copy out their return values and data before suspending. They may however be 1824 * accellerated in reaching the user boundary as we will wake up 1825 * any sleeping threads that are interruptable. (PCATCH). 1826 */ 1827int 1828thread_single(int force_exit) 1829{ 1830 struct thread *td; 1831 struct thread *td2; 1832 struct proc *p; 1833 1834 td = curthread; 1835 p = td->td_proc; 1836 mtx_assert(&Giant, MA_OWNED); 1837 PROC_LOCK_ASSERT(p, MA_OWNED); 1838 KASSERT((td != NULL), ("curthread is NULL")); 1839 1840 if ((p->p_flag & P_SA) == 0 && p->p_numthreads == 1) 1841 return (0); 1842 1843 /* Is someone already single threading? */ 1844 if (p->p_singlethread) 1845 return (1); 1846 1847 if (force_exit == SINGLE_EXIT) { 1848 p->p_flag |= P_SINGLE_EXIT; 1849 } else 1850 p->p_flag &= ~P_SINGLE_EXIT; 1851 p->p_flag |= P_STOPPED_SINGLE; 1852 mtx_lock_spin(&sched_lock); 1853 p->p_singlethread = td; 1854 while ((p->p_numthreads - p->p_suspcount) != 1) { 1855 FOREACH_THREAD_IN_PROC(p, td2) { 1856 if (td2 == td) 1857 continue; 1858 td2->td_flags |= TDF_ASTPENDING; 1859 if (TD_IS_INHIBITED(td2)) { 1860 if (force_exit == SINGLE_EXIT) { 1861 if (TD_IS_SUSPENDED(td2)) { 1862 thread_unsuspend_one(td2); 1863 } 1864 if (TD_ON_SLEEPQ(td2) && 1865 (td2->td_flags & TDF_SINTR)) { 1866 if (td2->td_flags & TDF_CVWAITQ) 1867 cv_abort(td2); 1868 else 1869 abortsleep(td2); 1870 } 1871 } else { 1872 if (TD_IS_SUSPENDED(td2)) 1873 continue; 1874 /* 1875 * maybe other inhibitted states too? 1876 * XXXKSE Is it totally safe to 1877 * suspend a non-interruptable thread? 1878 */ 1879 if (td2->td_inhibitors & 1880 (TDI_SLEEPING | TDI_SWAPPED)) 1881 thread_suspend_one(td2); 1882 } 1883 } 1884 } 1885 /* 1886 * Maybe we suspended some threads.. was it enough? 1887 */ 1888 if ((p->p_numthreads - p->p_suspcount) == 1) 1889 break; 1890 1891 /* 1892 * Wake us up when everyone else has suspended. 1893 * In the mean time we suspend as well. 1894 */ 1895 thread_suspend_one(td); 1896 DROP_GIANT(); 1897 PROC_UNLOCK(p); 1898 p->p_stats->p_ru.ru_nvcsw++; 1899 mi_switch(); 1900 mtx_unlock_spin(&sched_lock); 1901 PICKUP_GIANT(); 1902 PROC_LOCK(p); 1903 mtx_lock_spin(&sched_lock); 1904 } 1905 if (force_exit == SINGLE_EXIT) { 1906 if (td->td_upcall) 1907 upcall_remove(td); 1908 kse_purge(p, td); 1909 } 1910 mtx_unlock_spin(&sched_lock); 1911 return (0); 1912} 1913 1914/* 1915 * Called in from locations that can safely check to see 1916 * whether we have to suspend or at least throttle for a 1917 * single-thread event (e.g. fork). 1918 * 1919 * Such locations include userret(). 1920 * If the "return_instead" argument is non zero, the thread must be able to 1921 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1922 * 1923 * The 'return_instead' argument tells the function if it may do a 1924 * thread_exit() or suspend, or whether the caller must abort and back 1925 * out instead. 1926 * 1927 * If the thread that set the single_threading request has set the 1928 * P_SINGLE_EXIT bit in the process flags then this call will never return 1929 * if 'return_instead' is false, but will exit. 1930 * 1931 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1932 *---------------+--------------------+--------------------- 1933 * 0 | returns 0 | returns 0 or 1 1934 * | when ST ends | immediatly 1935 *---------------+--------------------+--------------------- 1936 * 1 | thread exits | returns 1 1937 * | | immediatly 1938 * 0 = thread_exit() or suspension ok, 1939 * other = return error instead of stopping the thread. 1940 * 1941 * While a full suspension is under effect, even a single threading 1942 * thread would be suspended if it made this call (but it shouldn't). 1943 * This call should only be made from places where 1944 * thread_exit() would be safe as that may be the outcome unless 1945 * return_instead is set. 1946 */ 1947int 1948thread_suspend_check(int return_instead) 1949{ 1950 struct thread *td; 1951 struct proc *p; 1952 1953 td = curthread; 1954 p = td->td_proc; 1955 PROC_LOCK_ASSERT(p, MA_OWNED); 1956 while (P_SHOULDSTOP(p)) { 1957 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1958 KASSERT(p->p_singlethread != NULL, 1959 ("singlethread not set")); 1960 /* 1961 * The only suspension in action is a 1962 * single-threading. Single threader need not stop. 1963 * XXX Should be safe to access unlocked 1964 * as it can only be set to be true by us. 1965 */ 1966 if (p->p_singlethread == td) 1967 return (0); /* Exempt from stopping. */ 1968 } 1969 if (return_instead) 1970 return (1); 1971 1972 mtx_lock_spin(&sched_lock); 1973 thread_stopped(p); 1974 /* 1975 * If the process is waiting for us to exit, 1976 * this thread should just suicide. 1977 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1978 */ 1979 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1980 while (mtx_owned(&Giant)) 1981 mtx_unlock(&Giant); 1982 if (p->p_flag & P_SA) 1983 thread_exit(); 1984 else 1985 thr_exit1(); 1986 } 1987 1988 /* 1989 * When a thread suspends, it just 1990 * moves to the processes's suspend queue 1991 * and stays there. 1992 */ 1993 thread_suspend_one(td); 1994 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1995 if (p->p_numthreads == p->p_suspcount) { 1996 thread_unsuspend_one(p->p_singlethread); 1997 } 1998 } 1999 DROP_GIANT(); 2000 PROC_UNLOCK(p); 2001 p->p_stats->p_ru.ru_nivcsw++; 2002 mi_switch(); 2003 mtx_unlock_spin(&sched_lock); 2004 PICKUP_GIANT(); 2005 PROC_LOCK(p); 2006 } 2007 return (0); 2008} 2009 2010void 2011thread_suspend_one(struct thread *td) 2012{ 2013 struct proc *p = td->td_proc; 2014 2015 mtx_assert(&sched_lock, MA_OWNED); 2016 PROC_LOCK_ASSERT(p, MA_OWNED); 2017 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); 2018 p->p_suspcount++; 2019 TD_SET_SUSPENDED(td); 2020 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 2021 /* 2022 * Hack: If we are suspending but are on the sleep queue 2023 * then we are in msleep or the cv equivalent. We 2024 * want to look like we have two Inhibitors. 2025 * May already be set.. doesn't matter. 2026 */ 2027 if (TD_ON_SLEEPQ(td)) 2028 TD_SET_SLEEPING(td); 2029} 2030 2031void 2032thread_unsuspend_one(struct thread *td) 2033{ 2034 struct proc *p = td->td_proc; 2035 2036 mtx_assert(&sched_lock, MA_OWNED); 2037 PROC_LOCK_ASSERT(p, MA_OWNED); 2038 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 2039 TD_CLR_SUSPENDED(td); 2040 p->p_suspcount--; 2041 setrunnable(td); 2042} 2043 2044/* 2045 * Allow all threads blocked by single threading to continue running. 2046 */ 2047void 2048thread_unsuspend(struct proc *p) 2049{ 2050 struct thread *td; 2051 2052 mtx_assert(&sched_lock, MA_OWNED); 2053 PROC_LOCK_ASSERT(p, MA_OWNED); 2054 if (!P_SHOULDSTOP(p)) { 2055 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2056 thread_unsuspend_one(td); 2057 } 2058 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 2059 (p->p_numthreads == p->p_suspcount)) { 2060 /* 2061 * Stopping everything also did the job for the single 2062 * threading request. Now we've downgraded to single-threaded, 2063 * let it continue. 2064 */ 2065 thread_unsuspend_one(p->p_singlethread); 2066 } 2067} 2068 2069void 2070thread_single_end(void) 2071{ 2072 struct thread *td; 2073 struct proc *p; 2074 2075 td = curthread; 2076 p = td->td_proc; 2077 PROC_LOCK_ASSERT(p, MA_OWNED); 2078 p->p_flag &= ~P_STOPPED_SINGLE; 2079 mtx_lock_spin(&sched_lock); 2080 p->p_singlethread = NULL; 2081 /* 2082 * If there are other threads they mey now run, 2083 * unless of course there is a blanket 'stop order' 2084 * on the process. The single threader must be allowed 2085 * to continue however as this is a bad place to stop. 2086 */ 2087 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2088 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2089 thread_unsuspend_one(td); 2090 } 2091 } 2092 mtx_unlock_spin(&sched_lock); 2093} 2094 2095 2096