kern_thread.c revision 111129
1/* 2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26 * DAMAGE. 27 * 28 * $FreeBSD: head/sys/kern/kern_thread.c 111129 2003-02-19 13:40:24Z davidxu $ 29 */ 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/kernel.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/smp.h> 39#include <sys/sysctl.h> 40#include <sys/sysproto.h> 41#include <sys/filedesc.h> 42#include <sys/sched.h> 43#include <sys/signalvar.h> 44#include <sys/sx.h> 45#include <sys/tty.h> 46#include <sys/user.h> 47#include <sys/jail.h> 48#include <sys/kse.h> 49#include <sys/ktr.h> 50#include <sys/ucontext.h> 51 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/pmap.h> 55#include <vm/uma.h> 56#include <vm/vm_map.h> 57 58#include <machine/frame.h> 59 60/* 61 * KSEGRP related storage. 62 */ 63static uma_zone_t ksegrp_zone; 64static uma_zone_t kse_zone; 65static uma_zone_t thread_zone; 66static uma_zone_t upcall_zone; 67 68/* DEBUG ONLY */ 69SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation"); 70static int thread_debug = 0; 71SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW, 72 &thread_debug, 0, "thread debug"); 73 74static int max_threads_per_proc = 30; 75SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW, 76 &max_threads_per_proc, 0, "Limit on threads per proc"); 77 78static int max_groups_per_proc = 5; 79SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW, 80 &max_groups_per_proc, 0, "Limit on thread groups per proc"); 81 82static int max_threads_hits; 83SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD, 84 &max_threads_hits, 0, ""); 85 86static int virtual_cpu; 87 88#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 89 90TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); 91TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses); 92TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps); 93TAILQ_HEAD(, kse_upcall) zombie_upcalls = 94 TAILQ_HEAD_INITIALIZER(zombie_upcalls); 95struct mtx kse_zombie_lock; 96MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN); 97 98static void kse_purge(struct proc *p, struct thread *td); 99static void kse_purge_group(struct thread *td); 100static int thread_update_usr_ticks(struct thread *td); 101static int thread_update_sys_ticks(struct thread *td); 102static void thread_alloc_spare(struct thread *td, struct thread *spare); 103 104static int 105sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS) 106{ 107 int error, new_val; 108 int def_val; 109 110#ifdef SMP 111 def_val = mp_ncpus; 112#else 113 def_val = 1; 114#endif 115 if (virtual_cpu == 0) 116 new_val = def_val; 117 else 118 new_val = virtual_cpu; 119 error = sysctl_handle_int(oidp, &new_val, 0, req); 120 if (error != 0 || req->newptr == NULL) 121 return (error); 122 if (new_val < 0) 123 return (EINVAL); 124 virtual_cpu = new_val; 125 return (0); 126} 127 128/* DEBUG ONLY */ 129SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW, 130 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I", 131 "debug virtual cpus"); 132 133/* 134 * Prepare a thread for use. 135 */ 136static void 137thread_ctor(void *mem, int size, void *arg) 138{ 139 struct thread *td; 140 141 td = (struct thread *)mem; 142 td->td_state = TDS_INACTIVE; 143} 144 145/* 146 * Reclaim a thread after use. 147 */ 148static void 149thread_dtor(void *mem, int size, void *arg) 150{ 151 struct thread *td; 152 153 td = (struct thread *)mem; 154 155#ifdef INVARIANTS 156 /* Verify that this thread is in a safe state to free. */ 157 switch (td->td_state) { 158 case TDS_INHIBITED: 159 case TDS_RUNNING: 160 case TDS_CAN_RUN: 161 case TDS_RUNQ: 162 /* 163 * We must never unlink a thread that is in one of 164 * these states, because it is currently active. 165 */ 166 panic("bad state for thread unlinking"); 167 /* NOTREACHED */ 168 case TDS_INACTIVE: 169 break; 170 default: 171 panic("bad thread state"); 172 /* NOTREACHED */ 173 } 174#endif 175} 176 177/* 178 * Initialize type-stable parts of a thread (when newly created). 179 */ 180static void 181thread_init(void *mem, int size) 182{ 183 struct thread *td; 184 185 td = (struct thread *)mem; 186 mtx_lock(&Giant); 187 pmap_new_thread(td, 0); 188 mtx_unlock(&Giant); 189 cpu_thread_setup(td); 190 td->td_sched = (struct td_sched *)&td[1]; 191} 192 193/* 194 * Tear down type-stable parts of a thread (just before being discarded). 195 */ 196static void 197thread_fini(void *mem, int size) 198{ 199 struct thread *td; 200 201 td = (struct thread *)mem; 202 pmap_dispose_thread(td); 203} 204 205/* 206 * Initialize type-stable parts of a kse (when newly created). 207 */ 208static void 209kse_init(void *mem, int size) 210{ 211 struct kse *ke; 212 213 ke = (struct kse *)mem; 214 ke->ke_sched = (struct ke_sched *)&ke[1]; 215} 216 217/* 218 * Initialize type-stable parts of a ksegrp (when newly created). 219 */ 220static void 221ksegrp_init(void *mem, int size) 222{ 223 struct ksegrp *kg; 224 225 kg = (struct ksegrp *)mem; 226 kg->kg_sched = (struct kg_sched *)&kg[1]; 227} 228 229/* 230 * KSE is linked into kse group. 231 */ 232void 233kse_link(struct kse *ke, struct ksegrp *kg) 234{ 235 struct proc *p = kg->kg_proc; 236 237 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist); 238 kg->kg_kses++; 239 ke->ke_state = KES_UNQUEUED; 240 ke->ke_proc = p; 241 ke->ke_ksegrp = kg; 242 ke->ke_thread = NULL; 243 ke->ke_oncpu = NOCPU; 244 ke->ke_flags = 0; 245} 246 247void 248kse_unlink(struct kse *ke) 249{ 250 struct ksegrp *kg; 251 252 mtx_assert(&sched_lock, MA_OWNED); 253 kg = ke->ke_ksegrp; 254 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 255 if (ke->ke_state == KES_IDLE) { 256 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 257 kg->kg_idle_kses--; 258 } 259 if (--kg->kg_kses == 0) 260 ksegrp_unlink(kg); 261 /* 262 * Aggregate stats from the KSE 263 */ 264 kse_stash(ke); 265} 266 267void 268ksegrp_link(struct ksegrp *kg, struct proc *p) 269{ 270 271 TAILQ_INIT(&kg->kg_threads); 272 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */ 273 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */ 274 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */ 275 TAILQ_INIT(&kg->kg_iq); /* all idle kses in ksegrp */ 276 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */ 277 kg->kg_proc = p; 278 /* 279 * the following counters are in the -zero- section 280 * and may not need clearing 281 */ 282 kg->kg_numthreads = 0; 283 kg->kg_runnable = 0; 284 kg->kg_kses = 0; 285 kg->kg_runq_kses = 0; /* XXXKSE change name */ 286 kg->kg_idle_kses = 0; 287 kg->kg_numupcalls = 0; 288 /* link it in now that it's consistent */ 289 p->p_numksegrps++; 290 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp); 291} 292 293void 294ksegrp_unlink(struct ksegrp *kg) 295{ 296 struct proc *p; 297 298 mtx_assert(&sched_lock, MA_OWNED); 299 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads")); 300 KASSERT((kg->kg_kses == 0), ("ksegrp_unlink: residual kses")); 301 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls")); 302 303 p = kg->kg_proc; 304 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 305 p->p_numksegrps--; 306 /* 307 * Aggregate stats from the KSE 308 */ 309 ksegrp_stash(kg); 310} 311 312struct kse_upcall * 313upcall_alloc(void) 314{ 315 struct kse_upcall *ku; 316 317 ku = uma_zalloc(upcall_zone, M_WAITOK); 318 bzero(ku, sizeof(*ku)); 319 return (ku); 320} 321 322void 323upcall_free(struct kse_upcall *ku) 324{ 325 326 uma_zfree(upcall_zone, ku); 327} 328 329void 330upcall_link(struct kse_upcall *ku, struct ksegrp *kg) 331{ 332 333 mtx_assert(&sched_lock, MA_OWNED); 334 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link); 335 ku->ku_ksegrp = kg; 336 kg->kg_numupcalls++; 337} 338 339void 340upcall_unlink(struct kse_upcall *ku) 341{ 342 struct ksegrp *kg = ku->ku_ksegrp; 343 344 mtx_assert(&sched_lock, MA_OWNED); 345 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__)); 346 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link); 347 kg->kg_numupcalls--; 348 upcall_stash(ku); 349} 350 351void 352upcall_remove(struct thread *td) 353{ 354 355 if (td->td_upcall) { 356 td->td_upcall->ku_owner = NULL; 357 upcall_unlink(td->td_upcall); 358 td->td_upcall = 0; 359 } 360} 361 362/* 363 * For a newly created process, 364 * link up all the structures and its initial threads etc. 365 */ 366void 367proc_linkup(struct proc *p, struct ksegrp *kg, 368 struct kse *ke, struct thread *td) 369{ 370 371 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */ 372 TAILQ_INIT(&p->p_threads); /* all threads in proc */ 373 TAILQ_INIT(&p->p_suspended); /* Threads suspended */ 374 p->p_numksegrps = 0; 375 p->p_numthreads = 0; 376 377 ksegrp_link(kg, p); 378 kse_link(ke, kg); 379 thread_link(td, kg); 380} 381 382/* 383struct kse_thr_interrupt_args { 384 struct kse_thr_mailbox * tmbx; 385}; 386*/ 387int 388kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap) 389{ 390 struct proc *p; 391 struct thread *td2; 392 393 p = td->td_proc; 394 if (!(p->p_flag & P_KSES) || (uap->tmbx == NULL)) 395 return (EINVAL); 396 mtx_lock_spin(&sched_lock); 397 FOREACH_THREAD_IN_PROC(p, td2) { 398 if (td2->td_mailbox == uap->tmbx) { 399 td2->td_flags |= TDF_INTERRUPT; 400 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) { 401 if (td2->td_flags & TDF_CVWAITQ) 402 cv_abort(td2); 403 else 404 abortsleep(td2); 405 } 406 mtx_unlock_spin(&sched_lock); 407 return (0); 408 } 409 } 410 mtx_unlock_spin(&sched_lock); 411 return (ESRCH); 412} 413 414/* 415struct kse_exit_args { 416 register_t dummy; 417}; 418*/ 419int 420kse_exit(struct thread *td, struct kse_exit_args *uap) 421{ 422 struct proc *p; 423 struct ksegrp *kg; 424 struct kse *ke; 425 426 p = td->td_proc; 427 /* 428 * Only UTS can call the syscall and current group 429 * should be a threaded group. 430 */ 431 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 432 return (EINVAL); 433 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 434 435 kg = td->td_ksegrp; 436 /* Serialize removing upcall */ 437 PROC_LOCK(p); 438 mtx_lock_spin(&sched_lock); 439 if ((kg->kg_numupcalls == 1) && (kg->kg_numthreads > 1)) { 440 mtx_unlock_spin(&sched_lock); 441 PROC_UNLOCK(p); 442 return (EDEADLK); 443 } 444 ke = td->td_kse; 445 upcall_remove(td); 446 if (p->p_numthreads == 1) { 447 kse_purge(p, td); 448 p->p_flag &= ~P_KSES; 449 mtx_unlock_spin(&sched_lock); 450 PROC_UNLOCK(p); 451 } else { 452 if (kg->kg_numthreads == 1) { /* Shutdown a group */ 453 kse_purge_group(td); 454 ke->ke_flags |= KEF_EXIT; 455 } 456 thread_exit(); 457 /* NOTREACHED */ 458 } 459 return (0); 460} 461 462/* 463 * Either becomes an upcall or waits for an awakening event and 464 * then becomes an upcall. Only error cases return. 465 */ 466/* 467struct kse_release_args { 468 register_t dummy; 469}; 470*/ 471int 472kse_release(struct thread *td, struct kse_release_args *uap) 473{ 474 struct proc *p; 475 struct ksegrp *kg; 476 477 p = td->td_proc; 478 kg = td->td_ksegrp; 479 /* 480 * Only UTS can call the syscall and current group 481 * should be a threaded group. 482 */ 483 if ((td->td_mailbox != NULL) || (td->td_ksegrp->kg_numupcalls == 0)) 484 return (EINVAL); 485 KASSERT((td->td_upcall != NULL), ("%s: not own an upcall", __func__)); 486 487 PROC_LOCK(p); 488 mtx_lock_spin(&sched_lock); 489 /* Change OURSELF to become an upcall. */ 490 td->td_flags = TDF_UPCALLING; 491 if (p->p_sflag & PS_NEEDSIGCHK) 492 td->td_flags |= TDF_ASTPENDING; 493 if ((td->td_upcall->ku_flags & KUF_DOUPCALL) == 0 && 494 (kg->kg_completed == NULL)) { 495 kg->kg_upsleeps++; 496 mtx_unlock_spin(&sched_lock); 497 msleep(&kg->kg_completed, &p->p_mtx, PPAUSE|PCATCH, "ksepause", 498 NULL); 499 kg->kg_upsleeps--; 500 PROC_UNLOCK(p); 501 } else { 502 mtx_unlock_spin(&sched_lock); 503 PROC_UNLOCK(p); 504 } 505 return (0); 506} 507 508/* struct kse_wakeup_args { 509 struct kse_mailbox *mbx; 510}; */ 511int 512kse_wakeup(struct thread *td, struct kse_wakeup_args *uap) 513{ 514 struct proc *p; 515 struct ksegrp *kg; 516 struct kse_upcall *ku; 517 struct thread *td2; 518 519 p = td->td_proc; 520 td2 = NULL; 521 ku = NULL; 522 /* KSE-enabled processes only, please. */ 523 if (!(p->p_flag & P_KSES)) 524 return (EINVAL); 525 526 PROC_LOCK(p); 527 mtx_lock_spin(&sched_lock); 528 if (uap->mbx) { 529 FOREACH_KSEGRP_IN_PROC(p, kg) { 530 FOREACH_UPCALL_IN_GROUP(kg, ku) { 531 if (ku->ku_mailbox == uap->mbx) 532 break; 533 } 534 if (ku) 535 break; 536 } 537 } else { 538 kg = td->td_ksegrp; 539 if (kg->kg_upsleeps) { 540 wakeup_one(&kg->kg_completed); 541 mtx_unlock_spin(&sched_lock); 542 PROC_UNLOCK(p); 543 return (0); 544 } 545 ku = TAILQ_FIRST(&kg->kg_upcalls); 546 } 547 if (ku) { 548 if ((td2 = ku->ku_owner) == NULL) { 549 panic("%s: no owner", __func__); 550 } else if (TD_ON_SLEEPQ(td2) && 551 (td2->td_wchan == &kg->kg_completed)) { 552 abortsleep(td2); 553 } else { 554 ku->ku_flags |= KUF_DOUPCALL; 555 } 556 mtx_unlock_spin(&sched_lock); 557 PROC_UNLOCK(p); 558 return (0); 559 } 560 mtx_unlock_spin(&sched_lock); 561 PROC_UNLOCK(p); 562 return (ESRCH); 563} 564 565/* 566 * No new KSEG: first call: use current KSE, don't schedule an upcall 567 * All other situations, do allocate max new KSEs and schedule an upcall. 568 */ 569/* struct kse_create_args { 570 struct kse_mailbox *mbx; 571 int newgroup; 572}; */ 573int 574kse_create(struct thread *td, struct kse_create_args *uap) 575{ 576 struct kse *newke; 577 struct ksegrp *newkg; 578 struct ksegrp *kg; 579 struct proc *p; 580 struct kse_mailbox mbx; 581 struct kse_upcall *newku; 582 int err, ncpus; 583 584 p = td->td_proc; 585 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx)))) 586 return (err); 587 588 /* Too bad, why hasn't kernel always a cpu counter !? */ 589#ifdef SMP 590 ncpus = mp_ncpus; 591#else 592 ncpus = 1; 593#endif 594 if (thread_debug && virtual_cpu != 0) 595 ncpus = virtual_cpu; 596 597 /* Easier to just set it than to test and set */ 598 p->p_flag |= P_KSES; 599 kg = td->td_ksegrp; 600 if (uap->newgroup) { 601 /* Have race condition but it is cheap */ 602 if (p->p_numksegrps >= max_groups_per_proc) 603 return (EPROCLIM); 604 /* 605 * If we want a new KSEGRP it doesn't matter whether 606 * we have already fired up KSE mode before or not. 607 * We put the process in KSE mode and create a new KSEGRP. 608 */ 609 newkg = ksegrp_alloc(); 610 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp, 611 kg_startzero, kg_endzero)); 612 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 613 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); 614 mtx_lock_spin(&sched_lock); 615 ksegrp_link(newkg, p); 616 if (p->p_numksegrps >= max_groups_per_proc) { 617 ksegrp_unlink(newkg); 618 mtx_unlock_spin(&sched_lock); 619 return (EPROCLIM); 620 } 621 mtx_unlock_spin(&sched_lock); 622 } else { 623 newkg = kg; 624 } 625 626 /* 627 * Creating upcalls more than number of physical cpu does 628 * not help performance. 629 */ 630 if (newkg->kg_numupcalls >= ncpus) 631 return (EPROCLIM); 632 633 if (newkg->kg_numupcalls == 0) { 634 /* 635 * Initialize KSE group, optimized for MP. 636 * Create KSEs as many as physical cpus, this increases 637 * concurrent even if userland is not MP safe and can only run 638 * on single CPU (for early version of libpthread, it is true). 639 * In ideal world, every physical cpu should execute a thread. 640 * If there is enough KSEs, threads in kernel can be 641 * executed parallel on different cpus with full speed, 642 * Concurrent in kernel shouldn't be restricted by number of 643 * upcalls userland provides. 644 * Adding more upcall structures only increases concurrent 645 * in userland. 646 * Highest performance configuration is: 647 * N kses = N upcalls = N phyiscal cpus 648 */ 649 while (newkg->kg_kses < ncpus) { 650 newke = kse_alloc(); 651 bzero(&newke->ke_startzero, RANGEOF(struct kse, 652 ke_startzero, ke_endzero)); 653#if 0 654 mtx_lock_spin(&sched_lock); 655 bcopy(&ke->ke_startcopy, &newke->ke_startcopy, 656 RANGEOF(struct kse, ke_startcopy, ke_endcopy)); 657 mtx_unlock_spin(&sched_lock); 658#endif 659 mtx_lock_spin(&sched_lock); 660 kse_link(newke, newkg); 661 /* Add engine */ 662 kse_reassign(newke); 663 mtx_unlock_spin(&sched_lock); 664 } 665 } 666 newku = upcall_alloc(); 667 newku->ku_mailbox = uap->mbx; 668 newku->ku_func = mbx.km_func; 669 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t)); 670 671 /* For the first call this may not have been set */ 672 if (td->td_standin == NULL) 673 thread_alloc_spare(td, NULL); 674 675 mtx_lock_spin(&sched_lock); 676 if (newkg->kg_numupcalls >= ncpus) { 677 upcall_free(newku); 678 mtx_unlock_spin(&sched_lock); 679 return (EPROCLIM); 680 } 681 upcall_link(newku, newkg); 682 683 /* 684 * Each upcall structure has an owner thread, find which 685 * one owns it. 686 */ 687 if (uap->newgroup) { 688 /* 689 * Because new ksegrp hasn't thread, 690 * create an initial upcall thread to own it. 691 */ 692 thread_schedule_upcall(td, newku); 693 } else { 694 /* 695 * If current thread hasn't an upcall structure, 696 * just assign the upcall to it. 697 */ 698 if (td->td_upcall == NULL) { 699 newku->ku_owner = td; 700 td->td_upcall = newku; 701 } else { 702 /* 703 * Create a new upcall thread to own it. 704 */ 705 thread_schedule_upcall(td, newku); 706 } 707 } 708 mtx_unlock_spin(&sched_lock); 709 return (0); 710} 711 712/* 713 * Fill a ucontext_t with a thread's context information. 714 * 715 * This is an analogue to getcontext(3). 716 */ 717void 718thread_getcontext(struct thread *td, ucontext_t *uc) 719{ 720 721/* 722 * XXX this is declared in a MD include file, i386/include/ucontext.h but 723 * is used in MI code. 724 */ 725#ifdef __i386__ 726 get_mcontext(td, &uc->uc_mcontext); 727#endif 728 uc->uc_sigmask = td->td_proc->p_sigmask; 729} 730 731/* 732 * Set a thread's context from a ucontext_t. 733 * 734 * This is an analogue to setcontext(3). 735 */ 736int 737thread_setcontext(struct thread *td, ucontext_t *uc) 738{ 739 int ret; 740 741/* 742 * XXX this is declared in a MD include file, i386/include/ucontext.h but 743 * is used in MI code. 744 */ 745#ifdef __i386__ 746 ret = set_mcontext(td, &uc->uc_mcontext); 747#else 748 ret = ENOSYS; 749#endif 750 if (ret == 0) { 751 SIG_CANTMASK(uc->uc_sigmask); 752 PROC_LOCK(td->td_proc); 753 td->td_proc->p_sigmask = uc->uc_sigmask; 754 PROC_UNLOCK(td->td_proc); 755 } 756 return (ret); 757} 758 759/* 760 * Initialize global thread allocation resources. 761 */ 762void 763threadinit(void) 764{ 765 766#ifndef __ia64__ 767 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 768 thread_ctor, thread_dtor, thread_init, thread_fini, 769 UMA_ALIGN_CACHE, 0); 770#else 771 /* 772 * XXX the ia64 kstack allocator is really lame and is at the mercy 773 * of contigmallloc(). This hackery is to pre-construct a whole 774 * pile of thread structures with associated kernel stacks early 775 * in the system startup while contigmalloc() still works. Once we 776 * have them, keep them. Sigh. 777 */ 778 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), 779 thread_ctor, thread_dtor, thread_init, thread_fini, 780 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE); 781 uma_prealloc(thread_zone, 512); /* XXX arbitary */ 782#endif 783 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(), 784 NULL, NULL, ksegrp_init, NULL, 785 UMA_ALIGN_CACHE, 0); 786 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(), 787 NULL, NULL, kse_init, NULL, 788 UMA_ALIGN_CACHE, 0); 789 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall), 790 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0); 791} 792 793/* 794 * Stash an embarasingly extra thread into the zombie thread queue. 795 */ 796void 797thread_stash(struct thread *td) 798{ 799 mtx_lock_spin(&kse_zombie_lock); 800 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq); 801 mtx_unlock_spin(&kse_zombie_lock); 802} 803 804/* 805 * Stash an embarasingly extra kse into the zombie kse queue. 806 */ 807void 808kse_stash(struct kse *ke) 809{ 810 mtx_lock_spin(&kse_zombie_lock); 811 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq); 812 mtx_unlock_spin(&kse_zombie_lock); 813} 814 815/* 816 * Stash an embarasingly extra upcall into the zombie upcall queue. 817 */ 818 819void 820upcall_stash(struct kse_upcall *ku) 821{ 822 mtx_lock_spin(&kse_zombie_lock); 823 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link); 824 mtx_unlock_spin(&kse_zombie_lock); 825} 826 827/* 828 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue. 829 */ 830void 831ksegrp_stash(struct ksegrp *kg) 832{ 833 mtx_lock_spin(&kse_zombie_lock); 834 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp); 835 mtx_unlock_spin(&kse_zombie_lock); 836} 837 838/* 839 * Reap zombie kse resource. 840 */ 841void 842thread_reap(void) 843{ 844 struct thread *td_first, *td_next; 845 struct kse *ke_first, *ke_next; 846 struct ksegrp *kg_first, * kg_next; 847 struct kse_upcall *ku_first, *ku_next; 848 849 /* 850 * Don't even bother to lock if none at this instant, 851 * we really don't care about the next instant.. 852 */ 853 if ((!TAILQ_EMPTY(&zombie_threads)) 854 || (!TAILQ_EMPTY(&zombie_kses)) 855 || (!TAILQ_EMPTY(&zombie_ksegrps)) 856 || (!TAILQ_EMPTY(&zombie_upcalls))) { 857 mtx_lock_spin(&kse_zombie_lock); 858 td_first = TAILQ_FIRST(&zombie_threads); 859 ke_first = TAILQ_FIRST(&zombie_kses); 860 kg_first = TAILQ_FIRST(&zombie_ksegrps); 861 ku_first = TAILQ_FIRST(&zombie_upcalls); 862 if (td_first) 863 TAILQ_INIT(&zombie_threads); 864 if (ke_first) 865 TAILQ_INIT(&zombie_kses); 866 if (kg_first) 867 TAILQ_INIT(&zombie_ksegrps); 868 if (ku_first) 869 TAILQ_INIT(&zombie_upcalls); 870 mtx_unlock_spin(&kse_zombie_lock); 871 while (td_first) { 872 td_next = TAILQ_NEXT(td_first, td_runq); 873 if (td_first->td_ucred) 874 crfree(td_first->td_ucred); 875 thread_free(td_first); 876 td_first = td_next; 877 } 878 while (ke_first) { 879 ke_next = TAILQ_NEXT(ke_first, ke_procq); 880 kse_free(ke_first); 881 ke_first = ke_next; 882 } 883 while (kg_first) { 884 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp); 885 ksegrp_free(kg_first); 886 kg_first = kg_next; 887 } 888 while (ku_first) { 889 ku_next = TAILQ_NEXT(ku_first, ku_link); 890 upcall_free(ku_first); 891 ku_first = ku_next; 892 } 893 } 894} 895 896/* 897 * Allocate a ksegrp. 898 */ 899struct ksegrp * 900ksegrp_alloc(void) 901{ 902 return (uma_zalloc(ksegrp_zone, M_WAITOK)); 903} 904 905/* 906 * Allocate a kse. 907 */ 908struct kse * 909kse_alloc(void) 910{ 911 return (uma_zalloc(kse_zone, M_WAITOK)); 912} 913 914/* 915 * Allocate a thread. 916 */ 917struct thread * 918thread_alloc(void) 919{ 920 thread_reap(); /* check if any zombies to get */ 921 return (uma_zalloc(thread_zone, M_WAITOK)); 922} 923 924/* 925 * Deallocate a ksegrp. 926 */ 927void 928ksegrp_free(struct ksegrp *td) 929{ 930 uma_zfree(ksegrp_zone, td); 931} 932 933/* 934 * Deallocate a kse. 935 */ 936void 937kse_free(struct kse *td) 938{ 939 uma_zfree(kse_zone, td); 940} 941 942/* 943 * Deallocate a thread. 944 */ 945void 946thread_free(struct thread *td) 947{ 948 949 cpu_thread_clean(td); 950 uma_zfree(thread_zone, td); 951} 952 953/* 954 * Store the thread context in the UTS's mailbox. 955 * then add the mailbox at the head of a list we are building in user space. 956 * The list is anchored in the ksegrp structure. 957 */ 958int 959thread_export_context(struct thread *td) 960{ 961 struct proc *p; 962 struct ksegrp *kg; 963 uintptr_t mbx; 964 void *addr; 965 int error,temp; 966 ucontext_t uc; 967 968 p = td->td_proc; 969 kg = td->td_ksegrp; 970 971 /* Export the user/machine context. */ 972 addr = (void *)(&td->td_mailbox->tm_context); 973 error = copyin(addr, &uc, sizeof(ucontext_t)); 974 if (error) 975 goto bad; 976 977 thread_getcontext(td, &uc); 978 error = copyout(&uc, addr, sizeof(ucontext_t)); 979 if (error) 980 goto bad; 981 982 /* Exports clock ticks in kernel mode */ 983 addr = (caddr_t)(&td->td_mailbox->tm_sticks); 984 temp = fuword(addr) + td->td_usticks; 985 if (suword(addr, temp)) 986 goto bad; 987 988 /* Get address in latest mbox of list pointer */ 989 addr = (void *)(&td->td_mailbox->tm_next); 990 /* 991 * Put the saved address of the previous first 992 * entry into this one 993 */ 994 for (;;) { 995 mbx = (uintptr_t)kg->kg_completed; 996 if (suword(addr, mbx)) { 997 error = EFAULT; 998 goto bad; 999 } 1000 PROC_LOCK(p); 1001 if (mbx == (uintptr_t)kg->kg_completed) { 1002 kg->kg_completed = td->td_mailbox; 1003 /* 1004 * The thread context may be taken away by 1005 * other upcall threads when we unlock 1006 * process lock. it's no longer valid to 1007 * use it again in any other places. 1008 */ 1009 td->td_mailbox = NULL; 1010 PROC_UNLOCK(p); 1011 break; 1012 } 1013 PROC_UNLOCK(p); 1014 } 1015 td->td_usticks = 0; 1016 return (0); 1017 1018bad: 1019 PROC_LOCK(p); 1020 psignal(p, SIGSEGV); 1021 PROC_UNLOCK(p); 1022 /* The mailbox is bad, don't use it */ 1023 td->td_mailbox = NULL; 1024 td->td_usticks = 0; 1025 return (error); 1026} 1027 1028/* 1029 * Take the list of completed mailboxes for this KSEGRP and put them on this 1030 * upcall's mailbox as it's the next one going up. 1031 */ 1032static int 1033thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku) 1034{ 1035 struct proc *p = kg->kg_proc; 1036 void *addr; 1037 uintptr_t mbx; 1038 1039 addr = (void *)(&ku->ku_mailbox->km_completed); 1040 for (;;) { 1041 mbx = (uintptr_t)kg->kg_completed; 1042 if (suword(addr, mbx)) { 1043 PROC_LOCK(p); 1044 psignal(p, SIGSEGV); 1045 PROC_UNLOCK(p); 1046 return (EFAULT); 1047 } 1048 /* XXXKSE could use atomic CMPXCH here */ 1049 PROC_LOCK(p); 1050 if (mbx == (uintptr_t)kg->kg_completed) { 1051 kg->kg_completed = NULL; 1052 PROC_UNLOCK(p); 1053 break; 1054 } 1055 PROC_UNLOCK(p); 1056 } 1057 return (0); 1058} 1059 1060/* 1061 * This function should be called at statclock interrupt time 1062 */ 1063int 1064thread_statclock(int user) 1065{ 1066 struct thread *td = curthread; 1067 1068 if (td->td_ksegrp->kg_numupcalls == 0) 1069 return (-1); 1070 if (user) { 1071 /* Current always do via ast() */ 1072 td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING); 1073 td->td_uuticks++; 1074 } else { 1075 if (td->td_mailbox != NULL) 1076 td->td_usticks++; 1077 else { 1078 /* XXXKSE 1079 * We will call thread_user_enter() for every 1080 * kernel entry in future, so if the thread mailbox 1081 * is NULL, it must be a UTS kernel, don't account 1082 * clock ticks for it. 1083 */ 1084 } 1085 } 1086 return (0); 1087} 1088 1089/* 1090 * Export user mode state clock ticks 1091 */ 1092static int 1093thread_update_usr_ticks(struct thread *td) 1094{ 1095 struct proc *p = td->td_proc; 1096 struct kse_thr_mailbox *tmbx; 1097 struct kse_upcall *ku; 1098 caddr_t addr; 1099 uint uticks; 1100 1101 if ((ku = td->td_upcall) == NULL) 1102 return (-1); 1103 1104 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1105 if ((tmbx == NULL) || (tmbx == (void *)-1)) 1106 return (-1); 1107 uticks = td->td_uuticks; 1108 td->td_uuticks = 0; 1109 if (uticks) { 1110 addr = (caddr_t)&tmbx->tm_uticks; 1111 uticks += fuword(addr); 1112 if (suword(addr, uticks)) { 1113 PROC_LOCK(p); 1114 psignal(p, SIGSEGV); 1115 PROC_UNLOCK(p); 1116 return (-2); 1117 } 1118 } 1119 return (0); 1120} 1121 1122/* 1123 * Export kernel mode state clock ticks 1124 */ 1125 1126static int 1127thread_update_sys_ticks(struct thread *td) 1128{ 1129 struct proc *p = td->td_proc; 1130 caddr_t addr; 1131 int sticks; 1132 1133 if (td->td_mailbox == NULL) 1134 return (-1); 1135 if (td->td_usticks == 0) 1136 return (0); 1137 addr = (caddr_t)&td->td_mailbox->tm_sticks; 1138 sticks = fuword(addr); 1139 sticks += td->td_usticks; 1140 td->td_usticks = 0; 1141 if (suword(addr, sticks)) { 1142 PROC_LOCK(p); 1143 psignal(p, SIGSEGV); 1144 PROC_UNLOCK(p); 1145 return (-2); 1146 } 1147 return (0); 1148} 1149 1150/* 1151 * Discard the current thread and exit from its context. 1152 * 1153 * Because we can't free a thread while we're operating under its context, 1154 * push the current thread into our CPU's deadthread holder. This means 1155 * we needn't worry about someone else grabbing our context before we 1156 * do a cpu_throw(). 1157 */ 1158void 1159thread_exit(void) 1160{ 1161 struct thread *td; 1162 struct kse *ke; 1163 struct proc *p; 1164 struct ksegrp *kg; 1165 1166 td = curthread; 1167 kg = td->td_ksegrp; 1168 p = td->td_proc; 1169 ke = td->td_kse; 1170 1171 mtx_assert(&sched_lock, MA_OWNED); 1172 KASSERT(p != NULL, ("thread exiting without a process")); 1173 KASSERT(ke != NULL, ("thread exiting without a kse")); 1174 KASSERT(kg != NULL, ("thread exiting without a kse group")); 1175 PROC_LOCK_ASSERT(p, MA_OWNED); 1176 CTR1(KTR_PROC, "thread_exit: thread %p", td); 1177 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant")); 1178 1179 if (td->td_standin != NULL) { 1180 thread_stash(td->td_standin); 1181 td->td_standin = NULL; 1182 } 1183 1184 cpu_thread_exit(td); /* XXXSMP */ 1185 1186 /* 1187 * The last thread is left attached to the process 1188 * So that the whole bundle gets recycled. Skip 1189 * all this stuff. 1190 */ 1191 if (p->p_numthreads > 1) { 1192 /* 1193 * Unlink this thread from its proc and the kseg. 1194 * In keeping with the other structs we probably should 1195 * have a thread_unlink() that does some of this but it 1196 * would only be called from here (I think) so it would 1197 * be a waste. (might be useful for proc_fini() as well.) 1198 */ 1199 TAILQ_REMOVE(&p->p_threads, td, td_plist); 1200 p->p_numthreads--; 1201 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist); 1202 kg->kg_numthreads--; 1203 if (p->p_maxthrwaits) 1204 wakeup(&p->p_numthreads); 1205 /* 1206 * The test below is NOT true if we are the 1207 * sole exiting thread. P_STOPPED_SNGL is unset 1208 * in exit1() after it is the only survivor. 1209 */ 1210 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1211 if (p->p_numthreads == p->p_suspcount) { 1212 thread_unsuspend_one(p->p_singlethread); 1213 } 1214 } 1215 1216 /* 1217 * Because each upcall structure has an owner thread, 1218 * owner thread exits only when process is in exiting 1219 * state, so upcall to userland is no longer needed, 1220 * deleting upcall structure is safe here. 1221 * So when all threads in a group is exited, all upcalls 1222 * in the group should be automatically freed. 1223 */ 1224 if (td->td_upcall) 1225 upcall_remove(td); 1226 1227 ke->ke_state = KES_UNQUEUED; 1228 ke->ke_thread = NULL; 1229 /* 1230 * Decide what to do with the KSE attached to this thread. 1231 */ 1232 if (ke->ke_flags & KEF_EXIT) 1233 kse_unlink(ke); 1234 else 1235 kse_reassign(ke); 1236 PROC_UNLOCK(p); 1237 td->td_kse = NULL; 1238 td->td_state = TDS_INACTIVE; 1239 td->td_proc = NULL; 1240 td->td_ksegrp = NULL; 1241 td->td_last_kse = NULL; 1242 PCPU_SET(deadthread, td); 1243 } else { 1244 PROC_UNLOCK(p); 1245 } 1246 cpu_throw(); 1247 /* NOTREACHED */ 1248} 1249 1250/* 1251 * Do any thread specific cleanups that may be needed in wait() 1252 * called with Giant held, proc and schedlock not held. 1253 */ 1254void 1255thread_wait(struct proc *p) 1256{ 1257 struct thread *td; 1258 1259 KASSERT((p->p_numthreads == 1), ("Muliple threads in wait1()")); 1260 KASSERT((p->p_numksegrps == 1), ("Muliple ksegrps in wait1()")); 1261 FOREACH_THREAD_IN_PROC(p, td) { 1262 if (td->td_standin != NULL) { 1263 thread_free(td->td_standin); 1264 td->td_standin = NULL; 1265 } 1266 cpu_thread_clean(td); 1267 } 1268 thread_reap(); /* check for zombie threads etc. */ 1269} 1270 1271/* 1272 * Link a thread to a process. 1273 * set up anything that needs to be initialized for it to 1274 * be used by the process. 1275 * 1276 * Note that we do not link to the proc's ucred here. 1277 * The thread is linked as if running but no KSE assigned. 1278 */ 1279void 1280thread_link(struct thread *td, struct ksegrp *kg) 1281{ 1282 struct proc *p; 1283 1284 p = kg->kg_proc; 1285 td->td_state = TDS_INACTIVE; 1286 td->td_proc = p; 1287 td->td_ksegrp = kg; 1288 td->td_last_kse = NULL; 1289 td->td_flags = 0; 1290 td->td_kse = NULL; 1291 1292 LIST_INIT(&td->td_contested); 1293 callout_init(&td->td_slpcallout, 1); 1294 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist); 1295 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist); 1296 p->p_numthreads++; 1297 kg->kg_numthreads++; 1298} 1299 1300/* 1301 * Purge a ksegrp resource. When a ksegrp is preparing to 1302 * exit, it calls this function. 1303 */ 1304void 1305kse_purge_group(struct thread *td) 1306{ 1307 struct ksegrp *kg; 1308 struct kse *ke; 1309 1310 kg = td->td_ksegrp; 1311 KASSERT(kg->kg_numthreads == 1, ("%s: bad thread number", __func__)); 1312 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1313 KASSERT(ke->ke_state == KES_IDLE, 1314 ("%s: wrong idle KSE state", __func__)); 1315 kse_unlink(ke); 1316 } 1317 KASSERT((kg->kg_kses == 1), 1318 ("%s: ksegrp still has %d KSEs", __func__, kg->kg_kses)); 1319 KASSERT((kg->kg_numupcalls == 0), 1320 ("%s: ksegrp still has %d upcall datas", 1321 __func__, kg->kg_numupcalls)); 1322} 1323 1324/* 1325 * Purge a process's KSE resource. When a process is preparing to 1326 * exit, it calls kse_purge to release any extra KSE resources in 1327 * the process. 1328 */ 1329void 1330kse_purge(struct proc *p, struct thread *td) 1331{ 1332 struct ksegrp *kg; 1333 struct kse *ke; 1334 1335 KASSERT(p->p_numthreads == 1, ("bad thread number")); 1336 mtx_lock_spin(&sched_lock); 1337 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) { 1338 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp); 1339 p->p_numksegrps--; 1340 /* 1341 * There is no ownership for KSE, after all threads 1342 * in the group exited, it is possible that some KSEs 1343 * were left in idle queue, gc them now. 1344 */ 1345 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) { 1346 KASSERT(ke->ke_state == KES_IDLE, 1347 ("%s: wrong idle KSE state", __func__)); 1348 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist); 1349 kg->kg_idle_kses--; 1350 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist); 1351 kg->kg_kses--; 1352 kse_stash(ke); 1353 } 1354 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) || 1355 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)), 1356 ("ksegrp has wrong kg_kses: %d", kg->kg_kses)); 1357 KASSERT((kg->kg_numupcalls == 0), 1358 ("%s: ksegrp still has %d upcall datas", 1359 __func__, kg->kg_numupcalls)); 1360 1361 if (kg != td->td_ksegrp) 1362 ksegrp_stash(kg); 1363 } 1364 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp); 1365 p->p_numksegrps++; 1366 mtx_unlock_spin(&sched_lock); 1367} 1368 1369/* 1370 * This function is intended to be used to initialize a spare thread 1371 * for upcall. Initialize thread's large data area outside sched_lock 1372 * for thread_schedule_upcall(). 1373 */ 1374void 1375thread_alloc_spare(struct thread *td, struct thread *spare) 1376{ 1377 if (td->td_standin) 1378 return; 1379 if (spare == NULL) 1380 spare = thread_alloc(); 1381 td->td_standin = spare; 1382 bzero(&spare->td_startzero, 1383 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero)); 1384 spare->td_proc = td->td_proc; 1385 /* Setup PCB and fork address */ 1386 cpu_set_upcall(spare, td->td_pcb); 1387 /* 1388 * XXXKSE do we really need this? (default values for the 1389 * frame). 1390 */ 1391 bcopy(td->td_frame, spare->td_frame, sizeof(struct trapframe)); 1392 spare->td_ucred = crhold(td->td_ucred); 1393} 1394 1395/* 1396 * Create a thread and schedule it for upcall on the KSE given. 1397 * Use our thread's standin so that we don't have to allocate one. 1398 */ 1399struct thread * 1400thread_schedule_upcall(struct thread *td, struct kse_upcall *ku) 1401{ 1402 struct thread *td2; 1403 1404 mtx_assert(&sched_lock, MA_OWNED); 1405 1406 /* 1407 * Schedule an upcall thread on specified kse_upcall, 1408 * the kse_upcall must be free. 1409 * td must have a spare thread. 1410 */ 1411 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__)); 1412 if ((td2 = td->td_standin) != NULL) { 1413 td->td_standin = NULL; 1414 } else { 1415 panic("no reserve thread when scheduling an upcall"); 1416 return (NULL); 1417 } 1418 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)", 1419 td2, td->td_proc->p_pid, td->td_proc->p_comm); 1420 bcopy(&td->td_startcopy, &td2->td_startcopy, 1421 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); 1422 thread_link(td2, ku->ku_ksegrp); 1423 /* Let the new thread become owner of the upcall */ 1424 ku->ku_owner = td2; 1425 td2->td_upcall = ku; 1426 td2->td_flags = TDF_UPCALLING; 1427 if (td->td_proc->p_sflag & PS_NEEDSIGCHK) 1428 td2->td_flags |= TDF_ASTPENDING; 1429 td2->td_kse = NULL; 1430 td2->td_state = TDS_CAN_RUN; 1431 td2->td_inhibitors = 0; 1432 setrunqueue(td2); 1433 return (td2); /* bogus.. should be a void function */ 1434} 1435 1436void 1437thread_signal_add(struct thread *td, int sig) 1438{ 1439 struct kse_upcall *ku; 1440 struct proc *p; 1441 sigset_t ss; 1442 int error; 1443 1444 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); 1445 td = curthread; 1446 ku = td->td_upcall; 1447 p = td->td_proc; 1448 1449 PROC_UNLOCK(p); 1450 error = copyin(&ku->ku_mailbox->km_sigscaught, &ss, sizeof(sigset_t)); 1451 if (error) 1452 goto error; 1453 1454 SIGADDSET(ss, sig); 1455 1456 error = copyout(&ss, &ku->ku_mailbox->km_sigscaught, sizeof(sigset_t)); 1457 if (error) 1458 goto error; 1459 1460 PROC_LOCK(p); 1461 return; 1462error: 1463 PROC_LOCK(p); 1464 sigexit(td, SIGILL); 1465} 1466 1467 1468/* 1469 * Schedule an upcall to notify a KSE process recieved signals. 1470 * 1471 */ 1472void 1473thread_signal_upcall(struct thread *td) 1474{ 1475 mtx_lock_spin(&sched_lock); 1476 td->td_flags |= TDF_UPCALLING; 1477 mtx_unlock_spin(&sched_lock); 1478 1479 return; 1480} 1481 1482/* 1483 * Setup done on the thread when it enters the kernel. 1484 * XXXKSE Presently only for syscalls but eventually all kernel entries. 1485 */ 1486void 1487thread_user_enter(struct proc *p, struct thread *td) 1488{ 1489 struct ksegrp *kg; 1490 struct kse_upcall *ku; 1491 1492 kg = td->td_ksegrp; 1493 /* 1494 * First check that we shouldn't just abort. 1495 * But check if we are the single thread first! 1496 * XXX p_singlethread not locked, but should be safe. 1497 */ 1498 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1499 PROC_LOCK(p); 1500 mtx_lock_spin(&sched_lock); 1501 thread_exit(); 1502 /* NOTREACHED */ 1503 } 1504 1505 /* 1506 * If we are doing a syscall in a KSE environment, 1507 * note where our mailbox is. There is always the 1508 * possibility that we could do this lazily (in kse_reassign()), 1509 * but for now do it every time. 1510 */ 1511 kg = td->td_ksegrp; 1512 if (kg->kg_numupcalls) { 1513 ku = td->td_upcall; 1514 KASSERT(ku, ("%s: no upcall owned", __func__)); 1515 KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__)); 1516 td->td_mailbox = 1517 (void *)fuword((void *)&ku->ku_mailbox->km_curthread); 1518 if ((td->td_mailbox == NULL) || 1519 (td->td_mailbox == (void *)-1)) { 1520 /* Don't schedule upcall when blocked */ 1521 td->td_mailbox = NULL; 1522 mtx_lock_spin(&sched_lock); 1523 td->td_flags &= ~TDF_CAN_UNBIND; 1524 mtx_unlock_spin(&sched_lock); 1525 } else { 1526 if (td->td_standin == NULL) 1527 thread_alloc_spare(td, NULL); 1528 mtx_lock_spin(&sched_lock); 1529 td->td_flags |= TDF_CAN_UNBIND; 1530 mtx_unlock_spin(&sched_lock); 1531 } 1532 } 1533} 1534 1535/* 1536 * The extra work we go through if we are a threaded process when we 1537 * return to userland. 1538 * 1539 * If we are a KSE process and returning to user mode, check for 1540 * extra work to do before we return (e.g. for more syscalls 1541 * to complete first). If we were in a critical section, we should 1542 * just return to let it finish. Same if we were in the UTS (in 1543 * which case the mailbox's context's busy indicator will be set). 1544 * The only traps we suport will have set the mailbox. 1545 * We will clear it here. 1546 */ 1547int 1548thread_userret(struct thread *td, struct trapframe *frame) 1549{ 1550 int error = 0, upcalls; 1551 struct kse_upcall *ku; 1552 struct ksegrp *kg, *kg2; 1553 struct proc *p; 1554 struct timespec ts; 1555 1556 p = td->td_proc; 1557 kg = td->td_ksegrp; 1558 1559 /* Nothing to do with non-threaded group/process */ 1560 if (td->td_ksegrp->kg_numupcalls == 0) 1561 return (0); 1562 1563 /* 1564 * Stat clock interrupt hit in userland, it 1565 * is returning from interrupt, charge thread's 1566 * userland time for UTS. 1567 */ 1568 if (td->td_flags & TDF_USTATCLOCK) { 1569 thread_update_usr_ticks(td); 1570 mtx_lock_spin(&sched_lock); 1571 td->td_flags &= ~TDF_USTATCLOCK; 1572 mtx_unlock_spin(&sched_lock); 1573 } 1574 1575 /* 1576 * Optimisation: 1577 * This thread has not started any upcall. 1578 * If there is no work to report other than ourself, 1579 * then it can return direct to userland. 1580 */ 1581 if (TD_CAN_UNBIND(td)) { 1582 mtx_lock_spin(&sched_lock); 1583 td->td_flags &= ~TDF_CAN_UNBIND; 1584 mtx_unlock_spin(&sched_lock); 1585 if ((kg->kg_completed == NULL) && 1586 (td->td_upcall->ku_flags & KUF_DOUPCALL) == 0) { 1587 thread_update_sys_ticks(td); 1588 td->td_mailbox = NULL; 1589 return (0); 1590 } 1591 error = thread_export_context(td); 1592 if (error) { 1593 /* 1594 * Failing to do the KSE operation just defaults 1595 * back to synchonous operation, so just return from 1596 * the syscall. 1597 */ 1598 return (0); 1599 } 1600 /* 1601 * There is something to report, and we own an upcall 1602 * strucuture, we can go to userland. 1603 * Turn ourself into an upcall thread. 1604 */ 1605 mtx_lock_spin(&sched_lock); 1606 td->td_flags |= TDF_UPCALLING; 1607 mtx_unlock_spin(&sched_lock); 1608 } else if (td->td_mailbox) { 1609 error = thread_export_context(td); 1610 if (error) { 1611 PROC_LOCK(td->td_proc); 1612 mtx_lock_spin(&sched_lock); 1613 /* possibly upcall with error? */ 1614 } else { 1615 PROC_LOCK(td->td_proc); 1616 mtx_lock_spin(&sched_lock); 1617 /* 1618 * There are upcall threads waiting for 1619 * work to do, wake one of them up. 1620 * XXXKSE Maybe wake all of them up. 1621 */ 1622 if (kg->kg_upsleeps) 1623 wakeup_one(&kg->kg_completed); 1624 } 1625 thread_exit(); 1626 /* NOTREACHED */ 1627 } 1628 1629 if (td->td_flags & TDF_UPCALLING) { 1630 KASSERT(TD_CAN_UNBIND(td) == 0, ("upcall thread can unbind")); 1631 ku = td->td_upcall; 1632 /* 1633 * There is no more work to do and we are going to ride 1634 * this thread up to userland as an upcall. 1635 * Do the last parts of the setup needed for the upcall. 1636 */ 1637 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)", 1638 td, td->td_proc->p_pid, td->td_proc->p_comm); 1639 1640 /* 1641 * Set user context to the UTS. 1642 * Will use Giant in cpu_thread_clean() because it uses 1643 * kmem_free(kernel_map, ...) 1644 */ 1645 cpu_set_upcall_kse(td, ku); 1646 1647 /* 1648 * Clear TDF_UPCALLING after set upcall context, 1649 * profiling code looks TDF_UPCALLING to avoid account 1650 * a wrong user %EIP 1651 */ 1652 mtx_lock_spin(&sched_lock); 1653 td->td_flags &= ~TDF_UPCALLING; 1654 if (ku->ku_flags & KUF_DOUPCALL) 1655 ku->ku_flags &= ~KUF_DOUPCALL; 1656 mtx_unlock_spin(&sched_lock); 1657 1658 /* 1659 * Unhook the list of completed threads. 1660 * anything that completes after this gets to 1661 * come in next time. 1662 * Put the list of completed thread mailboxes on 1663 * this KSE's mailbox. 1664 */ 1665 error = thread_link_mboxes(kg, ku); 1666 if (error) 1667 goto out; 1668 1669 /* 1670 * Set state and clear the thread mailbox pointer. 1671 * From now on we are just a bound outgoing process. 1672 * **Problem** userret is often called several times. 1673 * it would be nice if this all happenned only on the first 1674 * time through. (the scan for extra work etc.) 1675 */ 1676 error = suword((caddr_t)&ku->ku_mailbox->km_curthread, 0); 1677 if (error) 1678 goto out; 1679 1680 /* Export current system time */ 1681 nanotime(&ts); 1682 error = copyout(&ts, (caddr_t)&ku->ku_mailbox->km_timeofday, 1683 sizeof(ts)); 1684 } 1685 1686out: 1687 if (p->p_numthreads > max_threads_per_proc) { 1688 max_threads_hits++; 1689 PROC_LOCK(p); 1690 while (p->p_numthreads > max_threads_per_proc) { 1691 if (P_SHOULDSTOP(p)) 1692 break; 1693 upcalls = 0; 1694 mtx_lock_spin(&sched_lock); 1695 FOREACH_KSEGRP_IN_PROC(p, kg2) { 1696 if (kg2->kg_numupcalls == 0) 1697 upcalls++; 1698 else 1699 upcalls += kg2->kg_numupcalls; 1700 } 1701 mtx_unlock_spin(&sched_lock); 1702 if (upcalls >= max_threads_per_proc) 1703 break; 1704 p->p_maxthrwaits++; 1705 msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH, 1706 "maxthreads", NULL); 1707 p->p_maxthrwaits--; 1708 } 1709 PROC_UNLOCK(p); 1710 } 1711 1712 if (error) { 1713 /* 1714 * Things are going to be so screwed we should just kill 1715 * the process. 1716 * how do we do that? 1717 */ 1718 PROC_LOCK(td->td_proc); 1719 psignal(td->td_proc, SIGSEGV); 1720 PROC_UNLOCK(td->td_proc); 1721 } else { 1722 /* 1723 * Optimisation: 1724 * Ensure that we have a spare thread available, 1725 * for when we re-enter the kernel. 1726 */ 1727 if (td->td_standin == NULL) 1728 thread_alloc_spare(td, NULL); 1729 } 1730 1731 /* 1732 * Clear thread mailbox first, then clear system tick count. 1733 * The order is important because thread_statclock() use 1734 * mailbox pointer to see if it is an userland thread or 1735 * an UTS kernel thread. 1736 */ 1737 td->td_mailbox = NULL; 1738 td->td_usticks = 0; 1739 return (error); /* go sync */ 1740} 1741 1742/* 1743 * Enforce single-threading. 1744 * 1745 * Returns 1 if the caller must abort (another thread is waiting to 1746 * exit the process or similar). Process is locked! 1747 * Returns 0 when you are successfully the only thread running. 1748 * A process has successfully single threaded in the suspend mode when 1749 * There are no threads in user mode. Threads in the kernel must be 1750 * allowed to continue until they get to the user boundary. They may even 1751 * copy out their return values and data before suspending. They may however be 1752 * accellerated in reaching the user boundary as we will wake up 1753 * any sleeping threads that are interruptable. (PCATCH). 1754 */ 1755int 1756thread_single(int force_exit) 1757{ 1758 struct thread *td; 1759 struct thread *td2; 1760 struct proc *p; 1761 1762 td = curthread; 1763 p = td->td_proc; 1764 mtx_assert(&Giant, MA_OWNED); 1765 PROC_LOCK_ASSERT(p, MA_OWNED); 1766 KASSERT((td != NULL), ("curthread is NULL")); 1767 1768 if ((p->p_flag & P_KSES) == 0) 1769 return (0); 1770 1771 /* Is someone already single threading? */ 1772 if (p->p_singlethread) 1773 return (1); 1774 1775 if (force_exit == SINGLE_EXIT) { 1776 p->p_flag |= P_SINGLE_EXIT; 1777 } else 1778 p->p_flag &= ~P_SINGLE_EXIT; 1779 p->p_flag |= P_STOPPED_SINGLE; 1780 p->p_singlethread = td; 1781 /* XXXKSE Which lock protects the below values? */ 1782 while ((p->p_numthreads - p->p_suspcount) != 1) { 1783 mtx_lock_spin(&sched_lock); 1784 FOREACH_THREAD_IN_PROC(p, td2) { 1785 if (td2 == td) 1786 continue; 1787 td->td_flags |= TDF_ASTPENDING; 1788 if (TD_IS_INHIBITED(td2)) { 1789 if (force_exit == SINGLE_EXIT) { 1790 if (TD_IS_SUSPENDED(td2)) { 1791 thread_unsuspend_one(td2); 1792 } 1793 if (TD_ON_SLEEPQ(td2) && 1794 (td2->td_flags & TDF_SINTR)) { 1795 if (td2->td_flags & TDF_CVWAITQ) 1796 cv_abort(td2); 1797 else 1798 abortsleep(td2); 1799 } 1800 } else { 1801 if (TD_IS_SUSPENDED(td2)) 1802 continue; 1803 /* 1804 * maybe other inhibitted states too? 1805 * XXXKSE Is it totally safe to 1806 * suspend a non-interruptable thread? 1807 */ 1808 if (td2->td_inhibitors & 1809 (TDI_SLEEPING | TDI_SWAPPED)) 1810 thread_suspend_one(td2); 1811 } 1812 } 1813 } 1814 /* 1815 * Maybe we suspended some threads.. was it enough? 1816 */ 1817 if ((p->p_numthreads - p->p_suspcount) == 1) { 1818 mtx_unlock_spin(&sched_lock); 1819 break; 1820 } 1821 1822 /* 1823 * Wake us up when everyone else has suspended. 1824 * In the mean time we suspend as well. 1825 */ 1826 thread_suspend_one(td); 1827 mtx_unlock(&Giant); 1828 PROC_UNLOCK(p); 1829 p->p_stats->p_ru.ru_nvcsw++; 1830 mi_switch(); 1831 mtx_unlock_spin(&sched_lock); 1832 mtx_lock(&Giant); 1833 PROC_LOCK(p); 1834 } 1835 if (force_exit == SINGLE_EXIT) { 1836 if (td->td_upcall) { 1837 mtx_lock_spin(&sched_lock); 1838 upcall_remove(td); 1839 mtx_unlock_spin(&sched_lock); 1840 } 1841 kse_purge(p, td); 1842 } 1843 return (0); 1844} 1845 1846/* 1847 * Called in from locations that can safely check to see 1848 * whether we have to suspend or at least throttle for a 1849 * single-thread event (e.g. fork). 1850 * 1851 * Such locations include userret(). 1852 * If the "return_instead" argument is non zero, the thread must be able to 1853 * accept 0 (caller may continue), or 1 (caller must abort) as a result. 1854 * 1855 * The 'return_instead' argument tells the function if it may do a 1856 * thread_exit() or suspend, or whether the caller must abort and back 1857 * out instead. 1858 * 1859 * If the thread that set the single_threading request has set the 1860 * P_SINGLE_EXIT bit in the process flags then this call will never return 1861 * if 'return_instead' is false, but will exit. 1862 * 1863 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 1864 *---------------+--------------------+--------------------- 1865 * 0 | returns 0 | returns 0 or 1 1866 * | when ST ends | immediatly 1867 *---------------+--------------------+--------------------- 1868 * 1 | thread exits | returns 1 1869 * | | immediatly 1870 * 0 = thread_exit() or suspension ok, 1871 * other = return error instead of stopping the thread. 1872 * 1873 * While a full suspension is under effect, even a single threading 1874 * thread would be suspended if it made this call (but it shouldn't). 1875 * This call should only be made from places where 1876 * thread_exit() would be safe as that may be the outcome unless 1877 * return_instead is set. 1878 */ 1879int 1880thread_suspend_check(int return_instead) 1881{ 1882 struct thread *td; 1883 struct proc *p; 1884 struct ksegrp *kg; 1885 1886 td = curthread; 1887 p = td->td_proc; 1888 kg = td->td_ksegrp; 1889 PROC_LOCK_ASSERT(p, MA_OWNED); 1890 while (P_SHOULDSTOP(p)) { 1891 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1892 KASSERT(p->p_singlethread != NULL, 1893 ("singlethread not set")); 1894 /* 1895 * The only suspension in action is a 1896 * single-threading. Single threader need not stop. 1897 * XXX Should be safe to access unlocked 1898 * as it can only be set to be true by us. 1899 */ 1900 if (p->p_singlethread == td) 1901 return (0); /* Exempt from stopping. */ 1902 } 1903 if (return_instead) 1904 return (1); 1905 1906 /* 1907 * If the process is waiting for us to exit, 1908 * this thread should just suicide. 1909 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. 1910 */ 1911 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { 1912 mtx_lock_spin(&sched_lock); 1913 while (mtx_owned(&Giant)) 1914 mtx_unlock(&Giant); 1915 thread_exit(); 1916 } 1917 1918 /* 1919 * When a thread suspends, it just 1920 * moves to the processes's suspend queue 1921 * and stays there. 1922 */ 1923 mtx_lock_spin(&sched_lock); 1924 if ((p->p_flag & P_STOPPED_SIG) && 1925 (p->p_suspcount+1 == p->p_numthreads)) { 1926 mtx_unlock_spin(&sched_lock); 1927 PROC_LOCK(p->p_pptr); 1928 if ((p->p_pptr->p_procsig->ps_flag & 1929 PS_NOCLDSTOP) == 0) { 1930 psignal(p->p_pptr, SIGCHLD); 1931 } 1932 PROC_UNLOCK(p->p_pptr); 1933 mtx_lock_spin(&sched_lock); 1934 } 1935 mtx_assert(&Giant, MA_NOTOWNED); 1936 thread_suspend_one(td); 1937 PROC_UNLOCK(p); 1938 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { 1939 if (p->p_numthreads == p->p_suspcount) { 1940 thread_unsuspend_one(p->p_singlethread); 1941 } 1942 } 1943 p->p_stats->p_ru.ru_nivcsw++; 1944 mi_switch(); 1945 mtx_unlock_spin(&sched_lock); 1946 PROC_LOCK(p); 1947 } 1948 return (0); 1949} 1950 1951void 1952thread_suspend_one(struct thread *td) 1953{ 1954 struct proc *p = td->td_proc; 1955 1956 mtx_assert(&sched_lock, MA_OWNED); 1957 p->p_suspcount++; 1958 TD_SET_SUSPENDED(td); 1959 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq); 1960 /* 1961 * Hack: If we are suspending but are on the sleep queue 1962 * then we are in msleep or the cv equivalent. We 1963 * want to look like we have two Inhibitors. 1964 * May already be set.. doesn't matter. 1965 */ 1966 if (TD_ON_SLEEPQ(td)) 1967 TD_SET_SLEEPING(td); 1968} 1969 1970void 1971thread_unsuspend_one(struct thread *td) 1972{ 1973 struct proc *p = td->td_proc; 1974 1975 mtx_assert(&sched_lock, MA_OWNED); 1976 TAILQ_REMOVE(&p->p_suspended, td, td_runq); 1977 TD_CLR_SUSPENDED(td); 1978 p->p_suspcount--; 1979 setrunnable(td); 1980} 1981 1982/* 1983 * Allow all threads blocked by single threading to continue running. 1984 */ 1985void 1986thread_unsuspend(struct proc *p) 1987{ 1988 struct thread *td; 1989 1990 mtx_assert(&sched_lock, MA_OWNED); 1991 PROC_LOCK_ASSERT(p, MA_OWNED); 1992 if (!P_SHOULDSTOP(p)) { 1993 while (( td = TAILQ_FIRST(&p->p_suspended))) { 1994 thread_unsuspend_one(td); 1995 } 1996 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && 1997 (p->p_numthreads == p->p_suspcount)) { 1998 /* 1999 * Stopping everything also did the job for the single 2000 * threading request. Now we've downgraded to single-threaded, 2001 * let it continue. 2002 */ 2003 thread_unsuspend_one(p->p_singlethread); 2004 } 2005} 2006 2007void 2008thread_single_end(void) 2009{ 2010 struct thread *td; 2011 struct proc *p; 2012 2013 td = curthread; 2014 p = td->td_proc; 2015 PROC_LOCK_ASSERT(p, MA_OWNED); 2016 p->p_flag &= ~P_STOPPED_SINGLE; 2017 p->p_singlethread = NULL; 2018 /* 2019 * If there are other threads they mey now run, 2020 * unless of course there is a blanket 'stop order' 2021 * on the process. The single threader must be allowed 2022 * to continue however as this is a bad place to stop. 2023 */ 2024 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { 2025 mtx_lock_spin(&sched_lock); 2026 while (( td = TAILQ_FIRST(&p->p_suspended))) { 2027 thread_unsuspend_one(td); 2028 } 2029 mtx_unlock_spin(&sched_lock); 2030 } 2031} 2032 2033 2034